1
2
3 """
4 Performing statistics on the peak and integrated intensity of resolved
5 molecular lines.
6
7 Author: R. Lombaert
8
9 """
10
11 import os
12 import scipy
13 from scipy import argmin,ones
14 from scipy import array
15 from scipy import sum
16 import operator
17 import types
18 import numpy as np
19
20 import cc.path
21 from cc.tools.io import DataIO
22 from cc.statistics.Statistics import Statistics
23 import matplotlib.pyplot as plt
24 from cc.modeling.objects import Transition
25 from cc.statistics import BasicStats
26 from time import gmtime
27
29
30 """
31 Environment with several tools to perform statistics on the peak and
32 integrated intensities of resolved molecular lines.
33
34 """
35
36 - def __init__(self,star_name,code='GASTRoNOoM',path_code='codeJun2013',\
37 lll_p=None,use_bestvlsr=1,vmin=0.0,vmax=0.0):
38
39 """
40 Initializing an instance of IntIntStats.
41
42 Then run setInstrument, setModels and setIntensities. The rest of the
43 class works interactively.
44
45 @param star_name: Star name from Star.dat
46 @type star_name: string
47
48 @keyword code: the code used for producing your output
49
50 (default: 'GASTRoNOoM')
51 @type code: string
52 @keyword path_code: Output folder in the code's home folder
53
54 (default: 'codeSep2010')
55 @type path_code: string
56 @keyword lll_p: The number of variable parameters in the model grid. If
57 None, the automatic loglikelihood determination is not
58 done. You can still set the threshold values with a
59 class bound method.
60
61 (default: None)
62 @type lll_p: int
63 @keyword use_bestvlsr: Use the fitted best-guess for the v_lsr when
64 determining the velocity grid for the model. If
65 not, the vlsr from the Star.dat file or the fits
66 file is used.
67
68 (default: 1)
69 @type use_bestvlsr: bool
70 @keyword vmin: The minimum value in km/s of the spectral line window.
71 Ideally this is the same for all lines under
72 consideration. If an invalid spectral window is given
73 (vmax==vmin, or vmin>vmax), the spectral window is taken
74 from the line fit results. This leads to a different
75 window for each line under consideration, and is not
76 recommended for calculating the loglikelihood.
77
78 (default: 0.0)
79 @type vmin: float
80 @keyword vmax: The maximum value in km/s of the spectral line window.
81 Ideally this is the same for all lines under
82 consideration. If an invalid spectral window is given
83 (vmax==vmin, or vmin>vmax), the spectral window is taken
84 from the line fit results. This leads to a different
85 window for each line under consideration, and is not
86 recommended for calculating the loglikelihood.
87
88 (default: 0.0)
89 @type vmax: float
90
91 """
92
93 super(ResoStats,self).__init__(star_name=star_name,\
94 code=code,path_code=path_code)
95
96
97 self.use_bestvlsr = use_bestvlsr
98 self.vmin = vmin
99 self.vmax = vmax
100 print 'Calculating loglikelihood statistics using spectral window:'
101 print 'v_min = {} km/s, v_max = {} km/s.'.format(str(vmin),str(vmax))
102
103
104 self.translist = []
105
106 self.trans_models = dict()
107
108
109
110 self.star_selection = dict()
111
112
113
114
115
116
117
118
119 self.dinttmb = dict()
120 self.dpeaktmb = dict()
121
122
123
124
125
126
127
128
129 self.minttmb = dict()
130 self.mpeaktmb = dict()
131 self.loglikelihood = dict()
132 self.ratiopeak = dict()
133 self.ratioint = dict()
134 self.ratiocombo = dict()
135
136
137
138 self.no_stats = False
139 self.stats = dict([('peak',self.ratiopeak),('int',self.ratioint),\
140 ('combo',self.ratiocombo)])
141
142
143
144
145 telescopes = DataIO.getInputData(keyword='TELESCOPE',start_index=5,\
146 filename='Telescope.dat')
147 abs_errs = DataIO.getInputData(keyword='ABS_ERR',start_index=5,\
148 filename='Telescope.dat')
149 self.tele_uncertainties = dict(zip(telescopes,abs_errs))
150
151
152
153 self.trans_uncertainties = dict()
154 self.lll_threshold = dict()
155
156
157
158 self.noisy = dict()
159
160
161
162 if lll_p is None or lll_p == '' or int(lll_p) < 1 or int(lll_p) > 20:
163 self.lll_p = None
164 print 'No STAT_LLL_P given. Not calculating the loglikelihood '+\
165 'threshold automatically.'
166 else:
167 self.lll_p = int(lll_p)
168
169
171
172 '''
173 Set and read the data objects for this statistics module.
174
175 In this case, a list of sample transitions in which the data will be
176 read is the only input required.
177
178 Make sure sample_transitions are copies of the originals, such as
179 returned by Transition.extractTransFromStars().
180
181 @param sample_transitions: Sample transitions used as reference for
182 the data files.
183 @type sample_transitions: list[Transition()]
184
185 '''
186
187 super(ResoStats,self).setInstrument(instrument_name='FREQ_RESO')
188
189
190
191
192 if not sample_transitions:
193 print 'WARNING! No sample transitions given for Statistics ' +\
194 'module with instrument == FREQ_RESO. No stats will ' + \
195 'be calculated.'
196 [t.readData() for t in sample_transitions]
197 self.sample_trans = sample_transitions
198
199
201
202 """
203 The data intensities are stored in the dictionary
204 self.dintint/self.dpeakint, the model intensities in
205 self.mintint/self.mpeakint.
206
207 They keys in the dictionaries are the Transition to which the sphinx
208 or datafiles belong.
209
210 The model values are lists in accordance with self.star_grid, the data
211 values are single floats for the first dataset for the transition.
212
213 In addition, the loglikelihoods are calculated.
214
215 """
216
217 if not self.star_grid:
218 return
219
220 self.translist = [t
221 for t in self.sample_trans
222 if t.lpdata]
223 self.includedtrans = [i for i in range(len(self.translist))]
224
225
226
227
228 for ist,st in enumerate(self.translist):
229
230 noise = st.getNoise()
231
232
233 for k,v in self.tele_uncertainties.items():
234 if k in st.telescope:
235 self.trans_uncertainties[ist] = v
236 continue
237 self.lll_threshold[ist] = None
238
239
240
241
242 self.trans_models[st] = [star.getTransition(st)
243 for star in self.star_grid
244 if star['LAST_GASTRONOOM_MODEL']]
245
246
247
248 self.star_selection[st] = [star
249 for star in self.star_grid
250 if star['LAST_GASTRONOOM_MODEL']]
251
252
253
254
255 all_ids = [bool(t.getModelId()) for t in self.trans_models[st]]
256 if False in all_ids:
257 self.no_stats = True
258 print 'One of the sphinx models was not calculated properly,'+\
259 ' even though cooling succeeded. Cannot do statistics.'
260 return
261
262
263
264
265
266
267 for mt in self.trans_models[st]:
268 mt.setData(st)
269
270
271 self.minttmb[st] = array([mt.getIntTmbSphinx()
272 for mt in self.trans_models[st]])
273 self.mpeaktmb[st] = array([mt.getPeakTmbSphinx()
274 for mt in self.trans_models[st]])
275
276
277 self.dpeaktmb[st] = st.getPeakTmbData()
278
279 if self.dpeaktmb[st] <= 3*noise:
280 self.noisy[ist] = True
281 else:
282 self.noisy[ist] = False
283 self.dinttmb[st]= st.getIntTmbData(use_fit=self.noisy[ist])[0]
284
285
286 llls = array([mt.getLoglikelihood(use_bestvlsr=self.use_bestvlsr,\
287 vmin=self.vmin,vmax=self.vmax,\
288 use_fit=self.noisy[ist])\
289 for mt in self.trans_models[st]])
290 self.loglikelihood[st] = llls
291
292
293 self.ratioint[st] = self.minttmb[st]/self.dinttmb[st]
294 self.ratiopeak[st] = self.mpeaktmb[st]/self.dpeaktmb[st]
295 self.ratiocombo[st] = zip(self.ratiopeak[st],self.ratioint[st])
296
297 self.calcLoglikelihoodThreshold()
298
299
301
302 '''
303 Determine the loglikelihood thresholds from the model grid, assuming
304 the amount of free parameters in the model grid is known.
305
306 The latter is given by STAT_LLL_P in the CC input.
307
308 If a selection of models is given, and the index of a sample transition
309 the max lll of the selection of models and threshold value based on
310 that --- for this sample trans --- are returned. No values are
311 remembered in this case.
312
313 @keyword bfms: Subselection of models for which the LLL threshold is
314 calculated. If None, ist is ignored, and the LLL
315 threshold is calculated for the whole collection of
316 models, the value for which is saved in
317 self.lll_threshold
318
319 (default: [])
320 @type bfms: list[Star]
321 @keyword ist: If bfms is not an empty list, this gives the index of the
322 sample trans for which the lll threshold is calculated
323 based on subgrid of Star() models, given by bfms. If
324 bfms == [], ist is ignored.
325
326 (default: None)
327 @type ist: int
328
329 @return: The max lll and lll threshold for this sample transistion and
330 subset of models. None if no subset of models was given.
331 @rtype: (float,float)
332
333 '''
334
335
336
337
338 quantiles = dict([(1,3.8414588),(2,5.9914645),(3,7.8147279),\
339 (4,9.4877290),(5,11.070498),(6,12.591587),\
340 (7,14.067140),(8,15.507313),(9,16.918978),\
341 (10,18.307038),(11,19.675138),(12,21.026070),\
342 (13,22.362032),(14,23.684791),(15,24.995790),\
343 (16,26.296228),(17,27.587112),(18,28.869299),\
344 (19,30.143527),(20,31.410433)])
345 if self.lll_p is None:
346 return
347 quant = quantiles[self.lll_p]
348 self.lll_quant = quant
349 if not bfms:
350 for ist,st in enumerate(self.translist):
351
352 maxlll = max(self.loglikelihood[st])
353 self.lll_threshold[ist] = -quant/2.+maxlll
354 return
355 else:
356 st = self.translist[ist]
357 maxlll = max([lll
358 for lll,smodel in zip(self.loglikelihood[st],\
359 self.star_selection[st])
360 if smodel in bfms])
361 lll_threshold = -quant/2.+maxlll
362 return (maxlll,lll_threshold)
363
364
366
367 '''
368 Set a transition given by its index to be included or excluded in the
369 proper best fit determination, through either integrated or peak Tmbs.
370 The other option is to merely look at the 3 sigma level compared to the
371 model peak value.
372
373 By default, the script checks if dtmb is above 3 sigma in the dataset.
374
375 Sometimes if dtmb is between 2 sigma and 3 sigma, one may want to
376 include it properly anyway. Also if the peak Tmb determination is not
377 very accurate just because of the noisy data, it may be necessary to
378 include it regardless. This is possible here.
379
380 Note that the state of the 'noisiness' has to be included when calling
381 this method. False or True works, of course.
382
383 @param ist: The index/indices of the transitions to be included.
384 @type ist: int/list
385 @param isnoisy: Is the dataset noisy or not?
386 @type isnoisy: bool
387
388 '''
389
390 isnoisy = int(isnoisy)
391 self.noisy[ist] = isnoisy
392
393
394
396
397 '''
398 Change criterion for noisy lines. By default, the script checks if
399 the peak of the line profile is above 3 sigma in the dataset.
400 With this method, the factor can be changed.
401
402 The integrated line strengths are also recalculated based on the new
403 criterion.
404
405 @keyword factor: Sigma level. If the peak intensity of a line is below
406 this level, it is marked as noisy.
407 @type factor: int
408
409 '''
410
411 for ist,st in enumerate(self.translist):
412 noise = st.getNoise()
413 if self.dpeaktmb[st] <= factor*noise:
414 self.noisy[ist] = True
415 else:
416 self.noisy[ist] = False
417
418 self.resetLineStrengths()
419
420
421
423
424 '''
425 Reset the line strengths of the data and calculate the ratio with model
426 line strengths. Also recalculate the loglikelihood in this case.
427
428 The fitted line profile is used in case the line is flagged as noisy by
429 setting use_fit. The fit is either a gaussian or a parabolic fit as
430 determined by LPTools.fitLP
431
432 '''
433
434 for ist,st in enumerate(self.translist):
435
436
437 self.dinttmb[st] = st.getIntTmbData(use_fit = self.noisy[ist])[0]
438 self.ratioint[st] = self.minttmb[st]/self.dinttmb[st]
439 self.ratiocombo[st] = zip(self.ratiopeak[st],self.ratioint[st])
440
441
442 llls = array([mt.getLoglikelihood(use_bestvlsr=self.use_bestvlsr,\
443 vmin=self.vmin,vmax=self.vmax,\
444 use_fit=self.noisy[ist])\
445 for mt in self.trans_models[st]])
446 self.loglikelihood[st] = llls
447
448
449
451
452 '''
453 Include a transition in selecting the best fit models.
454
455 By default, all transitions that have data and sphinx models associated
456 with them are selected.
457
458 Use the index of the transition to include them.
459
460 @param ist: The index/indices of the transitions to be included.
461 @type ist: int/list
462
463 '''
464
465 if type(ist) is types.IntType:
466 ist = [ist]
467 self.includedtrans.extend(ist)
468 self.includedtrans = sorted(list(set(self.includedtrans)))
469
470
471
473
474 '''
475 Exclude a transition in selecting the best fit models.
476
477 By default, all transitions that have data and sphinx models associated
478 with them are selected.
479
480 Use the index of the transition to exclude them.
481
482 @param ist: The index/indices of the transitions to be included.
483 @type ist: int/list
484
485 '''
486
487 if type(ist) is types.IntType:
488 ist = [ist]
489 self.includedtrans = [i for i in self.includedtrans if i not in ist]
490
491
492
494
495 '''
496 List all the transitions that have both data and sphinx models
497 associated with them.
498
499 Can be used to figure the index of each transition so you can in- or
500 exclude them.
501
502 Also indicates whether it is a noisy line or not.
503 '''
504
505 strings = ['%i [%scluded]: %s \t at %.2f uncertainty. The line is %snoisy!%s'\
506 %(ist,ist in self.includedtrans and 'IN' or 'EX',\
507 DataIO.fillOutSpaces(str(st),60),\
508 self.trans_uncertainties[ist],\
509 (not self.noisy[ist] and 'NOT ' or ''),\
510 not self.lll_threshold[ist] is None and \
511 ' The lll threshold is %.2e'%self.lll_threshold[ist] or\
512 '')
513 for ist,st in enumerate(self.translist)]
514 print '\n'.join(strings)
515
516
517
519
520 '''
521 Change the default uncertainty for a given transition.
522
523 Makes use of the template transition index! (see listTrans method)
524
525 The value is given as a decimal number, i.e. x% / 100.
526
527 @param ist: The index of transition
528 @type ist: int
529 @param value: The new uncertainty requested
530 @type value: float
531
532 '''
533
534 self.trans_uncertainties[ist] = float(value)
535
536
537
539
540 '''
541 Set the loglikelihood below which models are considered best fit models
542
543 Makes use of the template transition index!
544
545 An automatically calculated value is determined as well from the lll
546 values of all models. This assumes the amount of variable parameters
547 in the model grid is known, and set through the CC input! (STAT_LLL_P)
548
549 @param ist: The index of transition
550 @type ist: int
551 @param value: The new loglikelihood threshold requested
552 @type value: float
553
554 '''
555
556 self.lll_threshold[ist] = float(value)
557
558
559
561
562 '''
563 Convenience method that runs setLogelikelihoodThreshold().
564
565 @param ist: The index of transition
566 @type ist: int
567 @param value: The new loglikelihood threshold requested
568 @type value: float
569
570 '''
571
572 self.setLoglikelihoodThreshold(ist,value)
573
574
576
577
578 '''
579 Print the statistics for all transitions and models.
580
581 @keyword bfms: A list of a subselection of Star() models. If specified
582 only the models in this list will be printed.
583
584 (default: [])
585 @type bfms: list[Star]
586
587 '''
588
589 bfms = list(bfms)
590 if self.no_stats: return
591 for ist,st in enumerate(self.translist):
592 if ist in self.includedtrans:
593
594
595
596 noise = st.getNoise()
597 print '*******************************************************'
598 print 'Statistics for %i: %s:'%(ist,str(st))
599 print '-------------------------------------'
600 print 'Data intensities [integrated --- peak --- STD (noise)]:'
601 print '%f K km/s \t---\t %f K \t---\t %f K'\
602 %(self.dinttmb[st],self.dpeaktmb[st],noise)
603 print '-------------------------------------'
604 print 'Model/Data intensities [integrated --- peak --- lll]:'
605 lines = ['- %s: \t %.3f \t---\t %.3f \t---\t %.2e\
606 '%(str(tr.getModelId()),ri,rp,lll)
607 for tr,ri,rp,lll,s in zip(self.trans_models[st],\
608 self.ratioint[st],\
609 self.ratiopeak[st],\
610 self.loglikelihood[st],\
611 self.star_selection[st])
612 if (not bfms or s in bfms)]
613 print '\n'.join(lines)
614 if not bfms:
615 print 'The max LLL for this model is : %.2e.'\
616 %max(self.loglikelihood[st])
617 if not self.lll_p is None:
618 print 'This leads to a threshold of %.2e with %i free parameters.'\
619 %(self.lll_threshold[ist],self.lll_p)
620 else:
621 if not self.lll_p is None:
622 maxlll, lll_thr = self.calcLoglikelihoodThreshold(bfms,ist)
623 print 'The max LLL for this subselection of models is: ' +\
624 '%.2e.' %maxlll
625 print 'This leads to a threshold of %.2e with %i free ' \
626 %(lll_thr,self.lll_p) + 'parameters.'
627 else:
628 maxlll = max([lll
629 for lll,smodel in zip(self.loglikelihood[st],\
630 self.star_selection[st])
631 if smodel in bfms])
632 print 'The max LLL for this subselection of models is: ' +\
633 '%.2e.' %maxlll
634
635
636
638
639 '''
640 Returns a list of Star() models for which the uncertainty criteria
641 are satisfied for the selected transitions.
642
643 Transitions can be selected or deselected by the includeTrans and
644 excludeTrans methods.
645
646 The uncertainty criteria are preset but can be changed by the
647 setUncertainty method.
648
649 Note that for lines in which the peak Tmb is less than 3*sigma, the
650 goodness of fit is determined based on the comparison between peak of
651 sphinx model and the 3*sigma level. Less: included, more: excluded.
652 Note that the sphinx peak values are scaled down by the uncertainty on
653 the data, to take into account data uncertainties as well.
654
655 @keyword mode: The mode for model selection. Include: int, peak, combo
656 int: based on the integrated intensity ratios,
657 peak: based on the peak intensity ratios
658 combo: Based on both
659
660 (default: 'int')
661 @type mode: string
662 @keyword use_lll: Also use the loglikelihood statistics in selecting
663 best fit models. For this it is required to set a
664 threshold loglikelihood value: Anything smaller than
665 this is included in the best fit model list. Use
666 the setLoglikelihoodThreshold() or setLLL()
667
668 (default: 1)
669 @type use_lll: bool
670
671 @return: The 'best fitting' Star() models according to the
672 uncertainty criteria.
673 @rtype: list[Star()]
674
675 '''
676
677 if self.no_stats: return
678 if output:
679 print 'Selecting best fit models in <%s> mode.'%mode
680
681 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].getModelId() \
682 for ii in range(len(self.star_grid))]
683 stars = array(self.modellist)
684
685 bfbools = ones(len(stars),dtype='bool')
686 for ist,st in enumerate(self.translist):
687 if ist in self.includedtrans:
688 dpeak = self.dpeaktmb[st]
689 noise = st.getNoise()
690 err = self.trans_uncertainties[ist]
691 lll_thresh = self.lll_threshold[ist]
692 for i,(rat,lll,mt) in enumerate(zip(self.stats[mode][st],\
693 self.loglikelihood[st],\
694 self.trans_models[st])):
695 if self.noisy[ist]:
696 this_bool = mt.getPeakTmbSphinx()*(1.-err) <= 3.*noise
697 if not this_bool:
698 bfbools[i] = False
699 elif mode == 'combo':
700 this_bool = (rat[0] < 1.+err and rat[0] > 1.-err) and\
701 (rat[1] < 1.+err and rat[1] > 1.-err)
702 if not this_bool: bfbools[i] = False
703 else:
704 this_bool = rat < 1.+err and rat > 1.-err
705 if not this_bool:
706 bfbools[i] = False
707
708
709 if not lll_thresh is None and not self.noisy[ist] \
710 and use_lll and lll < lll_thresh:
711 bfbools[i] = False
712
713 self.bfm = stars[bfbools]
714 self.bfm = list(self.bfm)
715 return self.bfm
716
717
718
720
721 '''
722 Same function as selectBestFitModels, but uses only the
723 loglikelihood statistic.
724
725 @return: The 'best fitting' Star() models according to the
726 loglikelihood statistic.
727 @rtype: list[Star()]
728
729 '''
730
731 if self.no_stats: return
732 if output:
733 print 'Selecting best fit models, \
734 only based on loglikelihood statistic.'
735
736 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].getModelId() \
737 for ii in range(len(self.star_grid))]
738 stars = array(self.modellist)
739 bfbools = ones(len(stars),dtype='bool')
740 for ist,st in enumerate(self.translist):
741 if ist in self.includedtrans:
742 lll_thresh = self.lll_threshold[ist]
743 for i,lll in enumerate(self.loglikelihood[st]):
744
745
746
747
748 if not lll_thresh is None \
749 and lll < lll_thresh:
750 bfbools[i] = False
751 self.bfmlll = stars[bfbools]
752 self.bfmlll = list(self.bfmlll)
753
754 return self.bfmlll
755
756
757
759
760 '''
761 Determines whether a model lies within a 95% confidence interval around
762 the best fitting model (per line). As the best fitting model is included
763 in the arrays, at least one model within the range is to be expected.
764
765 Based on Decin et al. 2007:
766 - l_m \leq l \leq l_m - quant/2
767 - l_m \leq l \leq lll_threshold
768
769 The 'output' is divided into three components.
770 * Dictionary confidenceLLL (transitions as keywords)
771 Gives the difference between the calculate loglikelihood
772 and the threshold value
773 * Dictionary condidenceLLL_verdict (transitions as keywords)
774 Contains the result (1 or 0) for every model per transition
775 * Array confidenceLLL_models
776 Models that fit all included transitions
777
778 '''
779
780 self.confidenceLLL = dict()
781 self.confidenceLLL_verdict = dict()
782
783 for i,st in enumerate(self.translist):
784 self.confidenceLLL[st] = []
785 self.confidenceLLL_verdict[st] = []
786
787 maxlll = max(self.loglikelihood[st])
788
789 for kk in range(len(self.loglikelihood[st])):
790 if maxlll >= self.loglikelihood[st][kk] \
791 and self.loglikelihood[st][kk] >= self.lll_threshold[i]:
792 self.confidenceLLL[st].append(\
793 self.loglikelihood[st][kk]-self.lll_threshold[i])
794 self.confidenceLLL_verdict[st].append(1)
795 else:
796 self.confidenceLLL[st].append(\
797 self.loglikelihood[st][kk]-self.lll_threshold[i])
798 self.confidenceLLL_verdict[st].append(0)
799
800 total = []
801 if includedTrans == 1:
802 includedTrans = self.includedtrans
803
804 for itr in includedTrans:
805 for i in range(len(self.star_grid)):
806 if self.confidenceLLL_verdict[self.translist[itr]][i] == 1:
807 total.append(i)
808
809 counted = [total.count(k) for k in set(total)]
810 self.confidenceLLL_models = np.where(array(counted) == max(counted))[0]
811
812
813
814
816
817 '''
818 Checks which lines are fitted by which models, using all four selection
819 criteria. For every criterion, a list 'ocurrences_bfmxxx' is made. This
820 list contains a sublist for each model, len(list) = # models. Every sublist
821 contains a one or zero, indicating wether a line is fitted by the model
822 according to the criterion, len(sublist) = # lines.
823
824 The lists fittedLines_bfmxxx contain how often a line was modelled according
825 to the criterion, len(list) = # lines.
826
827 The lists fittedModels_bfmxxx contain the number of lines each model fitted
828 according to the criterion, len(list) = # models.
829
830 '''
831
832 if self.no_stats: return
833 print "Checking which model occurs most as a best fit per line..."
834
835 trans = self.translist
836 T = len(trans)
837 orig_included = self.includedtrans
838
839
840 if excludevib:
841 self.excludeTrans([i for i in range(T) \
842 if str(trans[i]).split()[1] != '0'])
843
844
845 all_bfmint = []
846 all_bfmpeak = []
847 all_bfmcombo = []
848 all_bfmlll = []
849 for ii in orig_included:
850 self.excludeTrans([i for i in range(T)])
851 self.includeTrans(ii)
852 all_bfmint.append(self.selectBestFitModels('int', \
853 use_lll, output = 0))
854 all_bfmpeak.append(self.selectBestFitModels('peak', \
855 use_lll, output = 0))
856 all_bfmcombo.append(self.selectBestFitModels('combo', \
857 use_lll, output = 0))
858 all_bfmlll.append(self.selectBestFitModelsLLL(output = 0))
859
860
861 self.occurences_bfmint = [[all_bfmint[x].count(self.modellist[i]) \
862 for x in range(len(all_bfmint))] \
863 for i in range(len(self.modellist))]
864
865 self.occurences_bfmpeak = [[all_bfmpeak[x].count(self.modellist[i]) \
866 for x in range(len(all_bfmpeak))] \
867 for i in range(len(self.modellist))]
868
869 self.occurences_bfmcombo = [[all_bfmcombo[x].count(self.modellist[i]) \
870 for x in range(len(all_bfmcombo))] \
871 for i in range(len(self.modellist))]
872
873 self.occurences_bfmlll = [[all_bfmlll[x].count(self.modellist[i]) \
874 for x in range(len(all_bfmlll))] \
875 for i in range(len(self.modellist))]
876
877
878 self.includedtrans = orig_included
879
880
881 self.fittedLines_bfmint = sum(self.occurences_bfmint, axis = 0)
882 self.fittedLines_bfmpeak = sum(self.occurences_bfmpeak, axis = 0)
883 self.fittedLines_bfmcombo = sum(self.occurences_bfmcombo, axis = 0)
884 self.fittedLines_bfmlll = sum(self.occurences_bfmlll, axis = 0)
885
886
887 self.fittedModels_bfmint = sum(self.occurences_bfmint, axis = 1)
888 self.fittedModels_bfmpeak = sum(self.occurences_bfmpeak, axis = 1)
889 self.fittedModels_bfmcombo = sum(self.occurences_bfmcombo, axis = 1)
890 self.fittedModels_bfmlll = sum(self.occurences_bfmlll, axis = 1)
891
892
894
895 '''
896 Checks which lines are fitted by which models according to the
897 loglikelihood statistic. A list 'ocurrences_bfmlll' is made. This
898 list contains a sublist for each model, len(list) = # models.
899 Every sublist contains a one or zero, indicating wether a line is
900 fitted by the model according to the criterion, len(sublist) = # lines.
901
902 The list fittedLines_bfmlll contains how often a line was modelled
903 according to the criterion, len(list) = # lines.
904
905 The list fittedModels_bfmlll contains the number of lines each model
906 fitted according to the criterion, len(list) = # models.
907
908 '''
909
910 if self.no_stats: return
911 print "Checking which model occurs most as a best fit per line..."
912 trans = self.translist
913 T = len(trans)
914 orig_included = self.includedtrans
915
916
917 if excludevib:
918 self.excludeTrans([i for i in range(T) \
919 if str(trans[i]).split()[1] != '0'])
920
921
922 all_bfmlll = []
923 for ii in orig_included:
924 self.excludeTrans([i for i in range(T)])
925 self.includeTrans(ii)
926 all_bfmlll.append(self.selectBestFitModelsLLL(output = 0))
927
928
929 self.occurences_bfmlll = [[all_bfmlll[x].count(self.modellist[i]) \
930 for x in range(len(all_bfmlll))] \
931 for i in range(len(self.modellist))]
932
933
934 self.includedtrans = orig_included
935
936
937 self.fittedLines_bfmlll = sum(self.occurences_bfmlll, axis = 0)
938
939
940 self.model_bfmlll = sum(self.occurences_bfmlll, axis = 1)
941
942 if plot:
943 plot_id = 'plot_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
944 %(gmtime()[0],gmtime()[1],gmtime()[2],\
945 gmtime()[3],gmtime()[4],gmtime()[5])
946 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].getModelId()\
947 .replace('_','-') for ii in range(len(self.star_grid))]
948
949 plt.clf()
950 fig = plt.figure(2, figsize = (15, 10))
951 ax = fig.add_subplot(111)
952 ax.set_xticks(np.arange(len(self.translist))-0.5)
953 ax.set_yticks(np.arange(len(self.modellist)))
954 ax.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '+\
955 st.telescope if self.noisy[ist]== False \
956 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+\
957 st.telescope+'}' for ist,st in enumerate(self.translist)], \
958 rotation = 60)
959 ax.yaxis.set_ticklabels([i for i in self.modellist])
960 ax.imshow(self.occurences_bfmlll, interpolation='nearest', \
961 origin='upper', cmap = 'Blues')
962 plt.tight_layout()
963 fig.suptitle('Loglikehood criterium', size = 18)
964 path = os.path.join(getattr(cc.path,self.code.lower()), \
965 self.path_code,'stars', self.star_name)
966 DataIO.testFolderExistence(os.path.join(path,'resostats'))
967 filename_combo = os.path.join(path, 'resostats','lll-%s_len_%s-%s'\
968 %(self.modellist[0],(len(self.modellist)),plot_id))
969 fig.savefig(filename_combo+'.pdf')
970 print '*** Plot of stats can be found at:'
971 print filename_combo+'.pdf'
972
973
974
976
977 '''
978 Calculate the loglikelihood function per transition.
979 Output similar to that of calcRatioxxxTmb.
980
981 Output can be found in:
982 * Dictionary line_lll: dictionary containing whether
983 a line satisfies the condition or not. One list per transition,
984 each list containing the verdict per model.
985 * List model_lll: list containing the verdict per transition,
986 per model. Number of lists = number of models.
987 Length of each list = number of transitions.
988 * List verdict_model_lll: list containing the verdict per
989 transition, per model. Number of lists = number of models.
990 Each list contains a single number.
991
992 '''
993
994 self.line_lll = dict()
995 self.model_lll = []
996 for ist,st in enumerate(self.translist):
997 if ist in self.includedtrans:
998 self.line_lll[st] = []
999 lll_thresh = self.lll_threshold[ist]
1000 for i,lll in enumerate(self.loglikelihood[st]):
1001 if lll >= lll_thresh:
1002 self.line_lll[st].append(1)
1003 else:
1004 self.line_lll[st].append(0)
1005
1006 for ii in range(len(self.star_grid)):
1007 self.model_lll.append([self.line_lll[tr][ii] \
1008 for i,tr in enumerate(self.translist) \
1009 if i in self.includedtrans])
1010 self.verdict_model_lll = sum(self.model_lll, axis = 1)
1011
1012 if plot:
1013 plot_id = 'plot_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
1014 %(gmtime()[0],gmtime()[1],gmtime()[2],\
1015 gmtime()[3],gmtime()[4],gmtime()[5])
1016 self.modellist = [self.star_grid[ii]['GAS_LINES'][0]\
1017 .getModelId().replace('_','-') \
1018 for ii in range(len(self.star_grid))]
1019
1020 plt.clf()
1021 fig = plt.figure(1, figsize = (15, 10))
1022 ax1 = fig.add_subplot(111)
1023 ax1.set_xticks(np.arange(len(self.includedtrans))-0.5)
1024 ax1.set_yticks(np.arange(len(self.modellist)))
1025 ax1.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '+\
1026 st.telescope if self.noisy[ist]== False\
1027 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+\
1028 st.telescope+'}' for ist,st in enumerate(self.translist) \
1029 if ist in self.includedtrans], rotation = 60)
1030 ax1.yaxis.set_ticklabels([i for i in self.modellist])
1031 ax1.imshow(self.model_lll, interpolation='nearest', \
1032 origin='upper', cmap = 'Blues')
1033 ax1.set_title('LLL criterion')
1034
1035 plt.tight_layout()
1036 path = os.path.join(getattr(cc.path,self.code.lower()), \
1037 self.path_code,'stars', self.star_name)
1038 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1039 filename = os.path.join(path,'resostats',\
1040 'LLL-%s_len_%s_vcut_%s-%s--%s'%(self.modellist[0],\
1041 (len(self.modellist)),self.vmin,self.vmax,plot_id))
1042 fig.savefig(filename+'.pdf')
1043 print '*** Plot of stats can be found at:'
1044 print filename+'.pdf'
1045
1046
1047
1049
1050 '''
1051 Calculate whether the loglikelihood function per transition kies within
1052 the confidence interval.
1053 Output similar to that of calcRatioxxxTmb.
1054
1055 Output can be found in:
1056 * Dictionary line_lll_range: dictionary containing whether
1057 a line satisfies the condition or not. One list per transition,
1058 each list containing the verdict per model.
1059 * List model_lll_range: list containing the verdict per transition,
1060 per model. Number of lists = number of models.
1061 Length of each list = number of transitions.
1062 * List verdict_model_lll_range: list containing the verdict per
1063 transition, per model. Number of lists = number of models.
1064 Each list contains a single number.
1065
1066 '''
1067
1068 self.line_lll_range = dict()
1069 self.model_lll_range = []
1070
1071 for ist,st in enumerate(self.translist):
1072 if ist in self.includedtrans:
1073 self.line_lll_range[st] = []
1074 maxlll = max(self.loglikelihood[st])
1075 lll_thresh = self.lll_threshold[ist]
1076 for i,lll in enumerate(self.loglikelihood[st]):
1077 if maxlll >= self.loglikelihood[st][kk] \
1078 and self.loglikelihood[st][kk] >= self.lll_threshold[ist]:
1079 self.line_lll_range[st].append(1)
1080 else:
1081 self.line_lll_range[st].append(0)
1082
1083 for ii in range(len(self.star_grid)):
1084 self.model_lll_range.append([self.line_lll_range[tr][ii] \
1085 for i,tr in enumerate(self.translist) \
1086 if i in self.includedtrans])
1087
1088 self.verdict_model_lll_range = sum(self.model_lll_range, axis = 1)
1089
1090 if plot:
1091 plot_id = 'plot_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
1092 %(gmtime()[0],gmtime()[1],gmtime()[2],\
1093 gmtime()[3],gmtime()[4],gmtime()[5])
1094 self.modellist = [self.star_grid[ii]['GAS_LINES'][0]\
1095 .getModelId().replace('_','-') \
1096 for ii in range(len(self.star_grid))]
1097
1098 plt.clf()
1099 fig = plt.figure(1, figsize = (15, 10))
1100 ax2 = fig.add_subplot(111)
1101 ax2.set_xticks(np.arange(len(translist))-0.5)
1102 ax2.set_yticks(np.arange(len(self.modellist)))
1103 ax2.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '\
1104 +st.telescope if self.noisy[ist]== False\
1105 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+\
1106 st.telescope+'}' for ist,st in enumerate(translist)], \
1107 rotation = 60)
1108 ax2.yaxis.set_ticklabels([i for i in self.modellist])
1109 ax2.imshow(self.model_lll_range, interpolation='nearest', \
1110 origin='upper', cmap = 'Blues')
1111 ax2.set_title('LLL 95\% confidence interval')
1112
1113 plt.tight_layout()
1114 path = os.path.join(getattr(cc.path,self.code.lower()),\
1115 self.path_code,'stars', self.star_name)
1116 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1117 filename = os.path.join(path, 'resostats','LLL+range-%s_len_%s-%s'\
1118 %(self.modellist[0],(len(self.modellist)),plot_id))
1119 fig.savefig(filename+'.pdf')
1120 print '*** Plot of stats can be found at:'
1121 print filename+'.pdf'
1122
1123
1124
1125
1126 - def calcRatioIntTmb(self, useNoisy = 1, useRms = 0, \
1127 err = 0.2, err_noisy = 0.3, plot = 0):
1128
1129 '''
1130 Calculate the ratio of the integrated main beam intensity per transition.
1131
1132 @keyword useNoisy: assign a larger error to noisy lines
1133 @type useNoisy: bool
1134
1135 @keyword useRms: use statistical noise in addition to instrumental error
1136 @type useRms: bool
1137
1138 @keyword err: error on data
1139 @type err: float
1140
1141 @keyword err_noisy: error on noisy data (only needed when useNoisy = 1)
1142 @type err_noisy: float
1143
1144 @keyword plot: Plot the output
1145 @type plot: bool
1146
1147 Output can be found in:
1148 * Dictionary verdict_ratioint: dictionary containing whether
1149 a line satisfies the condition or not. One list per transition,
1150 each list containing the verdict per model.
1151 * List model_ratioint: list containing the verdict per transition,
1152 per model. Number of lists = number of models.
1153 Length of each list = number of transitions.
1154 * List model_ratioint_verdict: list containing the verdict per
1155 transition, per model. Number of lists = number of models.
1156 Each list contains a single number.
1157
1158 '''
1159
1160 print 'Calculating ratios of integrated main beam intensities...'
1161 print 'Error on data = '+str(err)
1162 if useNoisy:
1163 print 'Error on noisy data = '+str(err_noisy)
1164
1165 self.line_ratioint = dict()
1166 translist = [self.translist[i] for i in self.includedtrans]
1167
1168 for ist, st in enumerate(translist):
1169 if useNoisy == 1 and useRms == 1:
1170 if self.noisy[ist] == True:
1171 self.line_ratioint[st] = [1 \
1172 if x <= (1. + err_noisy + st.getNoise()) \
1173 and x >= (1. - err_noisy - st.getNoise()) \
1174 else 0 for x in self.ratioint[st]]
1175 else:
1176 self.line_ratioint[st] = [1 \
1177 if x <= (1. + err + st.getNoise()) \
1178 and x >= (1. - err - st.getNoise()) \
1179 else 0 for x in self.ratioint[st]]
1180 elif useNoisy == 1 and useRms == 0:
1181 if self.noisy[ist] == True:
1182 self.line_ratioint[st] = [1 \
1183 if x <= (1. + err_noisy) and x >= (1. - err_noisy) \
1184 else 0 for x in self.ratioint[st]]
1185 else:
1186 self.line_ratioint[st] = [1 \
1187 if x <= 1. + err and x >= 1. - err \
1188 else 0 for x in self.ratioint[st]]
1189 elif useNoisy == 0 and useRms == 1:
1190 self.line_ratioint[st] = [1 \
1191 if x <= (1. + err + st.getNoise()) \
1192 and x >= (1. - err - st.getNoise()) \
1193 else 0 for x in self.ratioint[st]]
1194 else:
1195 self.line_ratioint[st] = [1 if x <= 1. + err \
1196 and x >= 1. - err else 0 for x in self.ratioint[st]]
1197
1198
1199 self.model_ratioint = []
1200 for ii in range(len(self.star_grid)):
1201 self.model_ratioint.append([self.line_ratioint[tr][ii] \
1202 for tr in translist])
1203 self.verdict_model_ratioint = \
1204 sum(array(self.model_ratioint), axis = 1)
1205
1206 if plot:
1207 plot_id = 'plot_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
1208 %(gmtime()[0],gmtime()[1],gmtime()[2],\
1209 gmtime()[3],gmtime()[4],gmtime()[5])
1210 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].\
1211 getModelId().replace('_','-') \
1212 for ii in range(len(self.star_grid))]
1213
1214 plt.clf()
1215 fig = plt.figure(2, figsize = (15, 10))
1216 ax = fig.add_subplot(111)
1217 ax.set_xticks(np.arange(len(translist))-0.5)
1218 ax.set_yticks(np.arange(len(self.modellist)))
1219 ax.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '+\
1220 st.telescope if self.noisy[ist]== False\
1221 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+st.telescope+'}' \
1222 for ist,st in enumerate(translist)], rotation = 60)
1223 ax.yaxis.set_ticklabels([i for i in self.modellist])
1224 ax.imshow(self.model_ratioint, interpolation='nearest', \
1225 origin='upper', cmap = 'Blues')
1226 plt.tight_layout()
1227 fig.suptitle('Ratio of integrated intensities \\ Error = '+\
1228 str(err*100.)+'\%, error noisy lines = '+\
1229 str(err_noisy*100.)+'\%', size = 18)
1230 path = os.path.join(getattr(cc.path,self.code.lower()), \
1231 self.path_code,'stars', self.star_name)
1232 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1233 filename_combo = os.path.join(path, 'resostats','int-%s_len_%s-%s'%\
1234 (self.modellist[0],(len(self.modellist)),plot_id))
1235 fig.savefig(filename_combo+'.pdf')
1236 print '*** Plot of stats can be found at:'
1237 print filename_combo+'.pdf'
1238
1239
1240 - def combineRatioIntLLL(self, useNoisy = 1, useRms = 0,\
1241 err = 0.2, err_noisy = 0.3, plot = 0):
1242
1243 self.calcRatioIntTmb(useNoisy=useNoisy,useRms=useRms,\
1244 err=err,err_noisy=err_noisy)
1245 self.combRatioIntLLL = []
1246 self.calcLLL()
1247
1248 translist = [self.translist[i] for i in self.includedtrans]
1249
1250 for ii in range(len(self.star_grid)):
1251 self.combRatioIntLLL.append([self.model_lll[ii][x] \
1252 if self.model_lll[ii][x] == self.model_ratioint[ii][x] else 0 \
1253 for x in range(len(translist))])
1254
1255 self.verdict_combRatioIntLLL = sum(array(self.combRatioIntLLL), axis = 1)
1256
1257 if plot:
1258 plt.clf()
1259 plot_id = 'plot_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
1260 %(gmtime()[0],gmtime()[1],gmtime()[2],\
1261 gmtime()[3],gmtime()[4],gmtime()[5])
1262 self.modellist = [self.star_grid[ii]['GAS_LINES'][0]\
1263 .getModelId().replace('_','-') \
1264 for ii in range(len(self.star_grid))]
1265
1266 fig = plt.figure(2, figsize = (15, 10))
1267 ax1 = fig.add_subplot(131)
1268 ax1.set_xticks(np.arange(len(translist))-0.5)
1269 ax1.set_yticks(np.arange(len(self.modellist)))
1270 ax1.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '\
1271 +st.telescope if self.noisy[ist]== False\
1272 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+st.telescope+'}' \
1273 for ist,st in enumerate(translist)], rotation = 60)
1274 ax1.yaxis.set_ticklabels([i for i in self.modellist])
1275 ax1.imshow(self.combRatioIntLLL, interpolation='nearest', \
1276 origin='upper', cmap = 'Blues')
1277 ax1.set_title('Combination')
1278
1279 ax2 = fig.add_subplot(132)
1280 ax2.set_xticks(np.arange(len(translist))-0.5)
1281 ax2.set_yticks(np.arange(len(self.modellist)))
1282 ax2.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '\
1283 +st.telescope if self.noisy[ist]== False\
1284 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+st.telescope+'}' \
1285 for ist,st in enumerate(translist)], rotation = 60)
1286 ax2.yaxis.set_ticklabels([i for i in self.modellist])
1287 ax2.imshow(self.model_ratioint, interpolation='nearest', \
1288 origin='upper', cmap = 'Blues')
1289 ax2.set_title('Ratio integrated intensities')
1290
1291 ax3 = fig.add_subplot(133)
1292 ax3.set_xticks(np.arange(len(translist))-0.5)
1293 ax3.set_yticks(np.arange(len(self.modellist)))
1294 ax3.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '\
1295 +st.telescope if self.noisy[ist]== False\
1296 else '\\textbf{'+str(st.jup)+'-'+str(st.jlow)+' '+st.telescope+'}'\
1297 for ist,st in enumerate(translist)], rotation = 60)
1298 ax3.yaxis.set_ticklabels([i for i in self.modellist])
1299 ax3.imshow(self.model_lll, interpolation='nearest',\
1300 origin='upper', cmap = 'Blues')
1301 ax3.set_title('Loglikelihood')
1302
1303 plt.tight_layout()
1304 fig.suptitle('Models complying to integrated intensity and \
1305 loglikelihood criteria \\ Error = '+str(err*100.)+'\%, \
1306 error noisy lines = '+str(err_noisy*100.)+'\%', size = 18)
1307 path = os.path.join(getattr(cc.path,self.code.lower()), \
1308 self.path_code,'stars', self.star_name)
1309 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1310 filename = os.path.join(path, 'resostats',\
1311 'intLLL-%s_len_%s_vcut_%s-%s--%s'%(self.modellist[0],\
1312 (len(self.modellist)),self.vmin,self.vmax,plot_id))
1313 fig.savefig(filename+'.pdf')
1314 print '*** Plot of stats can be found at:'
1315 print filename+'.pdf'
1316
1317
1318
1323
1324
1325
1326
1327
1328 - def calcRatioPeakTmb(self, useNoisy = 1, useRms = 0, \
1329 err = 0.2, err_noisy = 0.3, plot = 0):
1330
1331 '''
1332 Calculate the ratio of the peak main beam intensity per transition.
1333
1334 @keyword useNoisy: assign a larger error to noisy lines
1335 @type useNoisy: bool
1336
1337 @keyword useRms: use statistical noise in addition to instrumental error
1338 @type useRms: bool
1339
1340 @keyword err: error on data
1341 @type err: float
1342
1343 @keyword err_noisy: error on noisy data (only needed when useNoisy = 1)
1344 @type err_noisy: float
1345
1346 Output can be found in:
1347 * Dictionary verdict_ratiopeak: dictionary containing whether
1348 a line satisfies the condition or not. One list per transition,
1349 each list containing the verdict per model.
1350 * List model_ratiopeak: list containing the verdict per transition,
1351 per model. Number of lists = number of models.
1352 Length of each list = number of transitions.
1353 * List model_ratiopeak_verdict: list containing the verdict per
1354 transition, per model. Number of lists = number of models.
1355 Each list contains a single number.
1356
1357 '''
1358
1359 print 'Calculating ratios of peak main beam intensities...'
1360 print 'Error on data = '+str(err)
1361 if useNoisy:
1362 print 'Error on noisy data = '+str(err_noisy)
1363
1364 self.verdict_ratiopeak = dict()
1365 for ist, st in enumerate(self.translist):
1366 if useNoisy == 1 and useRms == 1:
1367 if self.noisy[ist] == True:
1368 self.verdict_ratiopeak[st] = [1 \
1369 if x <= (1. + err_noisy + st.getNoise()) \
1370 and x >= (1. - err_noisy - st.getNoise()) \
1371 else 0 for x in self.ratiopeak[st]]
1372 else:
1373 self.verdict_ratiopeak[st] = [1 \
1374 if x <= (1. + err + st.getNoise()) \
1375 and x >= (1. - err - st.getNoise()) \
1376 else 0 for x in self.ratiopeak[st]]
1377 elif useNoisy == 1 and useRms == 0:
1378 if self.noisy[ist] == True:
1379 self.verdict_ratiopeak[st] = [1 \
1380 if x <= (1. + err_noisy) \
1381 and x >= (1. - err_noisy)\
1382 else 0 for x in self.ratiopeak[st]]
1383 else:
1384 self.verdict_ratiopeak[st] = [1 \
1385 if x <= 1. + err and x >= 1. - err \
1386 else 0 for x in self.ratiopeak[st]]
1387 elif useNoisy == 0 and useRms == 1:
1388 self.verdict_ratiopeak[st] = [1 \
1389 if x <= (1. + err + st.getNoise()) \
1390 and x >= (1. - err - st.getNoise()) \
1391 else 0 for x in self.ratiopeak[st]]
1392 else:
1393 self.verdict_ratiopeak[st] = [1 if x <= 1. + err \
1394 and x >= 1. - err else 0 for x in self.ratiopeak[st]]
1395
1396
1397 self.model_ratiopeak = []
1398 for ii in range(len(self.star_grid)):
1399 self.model_ratiopeak.append([self.verdict_ratiopeak[tr][ii] \
1400 for tr in self.translist])
1401 self.model_ratiopeak_verdict = sum(array(self.model_ratiopeak), axis = 1)
1402
1403 if plot:
1404 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].getModelId() \
1405 for ii in range(len(self.star_grid))]
1406 tr = [self.verdict_ratiopeak[st] for st in self.translist]
1407
1408 plt.clf()
1409 fig = plt.figure(1, figsize = (15, 10))
1410 ax = fig.add_subplot(111)
1411 ax.set_xticks(np.arange(len(tr))-0.5)
1412 ax.set_yticks(np.arange(len(self.modellist)))
1413 ax.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '+\
1414 st.telescope for st in self.translist], rotation = 60)
1415 ax.yaxis.set_ticklabels([i for i in self.modellist])
1416 ax.imshow(self.model_ratiopeak, interpolation='nearest', \
1417 origin='upper', cmap = 'Blues')
1418 plt.tight_layout()
1419 fig.suptitle('Ratio of peak intensities', size = 18)
1420 path = os.path.join(getattr(cc.path,self.code.lower()), \
1421 self.path_code,'stars', self.star_name)
1422 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1423 filename_combo = os.path.join(path, 'resostats','int-%s_len_%s-%s'\
1424 %(self.modellist[0],(len(self.modellist)),plot_id))
1425 fig.savefig(filename_combo+'.pdf')
1426 print '*** Plot of stats can be found at:'
1427 print filename_combo+'.pdf'
1428
1429
1430
1431
1432
1433 - def calcRatioComboTmb(self, useNoisy = 1, useRms = 0, \
1434 err = 0.2, err_noisy = 0.3, plot = 0):
1435
1436 '''
1437 Combine the ratio of the integrated and peak main beam intensity
1438 per transition.
1439
1440 @keyword useNoisy: assign a larger error to noisy lines
1441 @type useNoisy: bool
1442
1443 @keyword useRms: use statist. noise in addition to instrumental error
1444 @type useRms: bool
1445
1446 @keyword err: error on data
1447 @type err: float
1448
1449 @keyword err_noisy: error on noisy data (only needed when useNoisy = 1)
1450 @type err_noisy: float
1451
1452 Output can be found in:
1453 * Dictionary verdict_ratiocombo: dictionary containing whether
1454 a line satisfies the condition or not. One list per transition,
1455 each list containing the verdict per model.
1456 * List model_ratiocombo: list containing the verdict per transition,
1457 per model. Number of lists = number of models.
1458 Length of each list = number of transitions.
1459 * List model_ratiocombo_verdict: list containing the verdict per
1460 transition, per model. Number of lists = number of models.
1461 Each list contains a single number.
1462
1463 '''
1464
1465 print 'Calculating ratios of main beam intensities, \
1466 combining int and peak... '
1467 print 'Error on data = '+str(err)
1468 if useNoisy:
1469 print 'Error on noisy data = '+str(err_noisy)
1470
1471 self.verdict_ratiocombo = dict()
1472 self.y = dict()
1473 for ist, st in enumerate(self.translist):
1474 if useNoisy == 1 and useRms == 1:
1475 if self.noisy[ist] == True:
1476 self.y[st] = [[1 if x <= (1. + err_noisy + st.getNoise()) \
1477 and x >= (1. - err_noisy - st.getNoise()) \
1478 else 0 for x in self.ratiocombo[st][i]] \
1479 for i in range(len(self.ratiocombo[st]))]
1480 else:
1481 self.y[st] = [[1 if x <= (1. + err + st.getNoise()) \
1482 and x >= (1. - err - st.getNoise()) \
1483 else 0 for x in self.ratiocombo[st][i]] \
1484 for i in range(len(self.ratiocombo[st]))]
1485 elif useNoisy == 1 and useRms == 0:
1486 if self.noisy[ist] == True:
1487 self.y[st] = [[1 if x <= (1. + err_noisy)
1488 and x >= (1. - err_noisy) \
1489 else 0 for x in self.ratiocombo[st][i]] \
1490 for i in range(len(self.ratiocombo[st]))]
1491 else:
1492 self.y[st] = [[1 if x <= 1. + err \
1493 and x >= 1. - err \
1494 else 0 for x in self.ratiocombo[st][i]] \
1495 for i in range(len(self.ratiocombo[st]))]
1496 elif useNoisy == 0 and useRms == 1:
1497 self.y[st] = [[1 if x <= (1. + err + st.getNoise()) \
1498 and x >= (1. - err - st.getNoise()) \
1499 else 0 for x in self.ratiocombo[st][i]] \
1500 for i in range(len(self.ratiocombo[st]))]
1501 else:
1502 self.y[st] = [[1 if x <= 1. + err and x >= 1. - err \
1503 else 0 for x in self.ratiocombo[st][i]] \
1504 for i in range(len(self.ratiocombo[st]))]
1505
1506 for st in self.translist:
1507 self.verdict_ratiocombo[st] = [0 if 0 in self.y[st][i] \
1508 else 1 for i in range(len(self.y[st]))]
1509
1510 self.model_ratiocombo = []
1511 for ii in range(len(self.star_grid)):
1512 self.model_ratiocombo.append([self.verdict_ratiocombo[tr][ii] \
1513 for tr in self.translist])
1514 self.model_ratiocombo_verdict = \
1515 sum(array(self.model_ratiocombo), axis = 1)
1516
1517 if plot:
1518 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].getModelId() \
1519 for ii in range(len(self.star_grid))]
1520 tr = [self.verdict_ratiocombo[st] for st in self.translist]
1521
1522 plt.clf()
1523 fig = plt.figure(3, figsize = (15, 10))
1524 ax = fig.add_subplot(111)
1525 ax.set_xticks(np.arange(len(tr))-0.5)
1526 ax.set_yticks(np.arange(len(self.modellist)))
1527 ax.xaxis.set_ticklabels([str(st.jup)+'-'+str(st.jlow)+' '+\
1528 st.telescope for st in self.translist], rotation = 60)
1529 ax.yaxis.set_ticklabels([i for i in self.modellist])
1530 ax.imshow(self.model_ratiocombo, interpolation='nearest',\
1531 origin='upper', cmap = 'Blues')
1532 plt.tight_layout()
1533 fig.suptitle('Combo - ratio of integrated intensities \
1534 and peak intensities', size = 18)
1535 path = os.path.join(getattr(cc.path,self.code.lower()), \
1536 self.path_code,'stars', self.star_name)
1537 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1538 filename_combo = os.path.join(path, 'resostats','int-%s_len_%s-%s'\
1539 %(self.modellist[0],(len(self.modellist)),plot_id))
1540 fig.savefig(filename_combo+'.pdf')
1541 print '*** Plot of stats can be found at:'
1542 print filename_combo+'.pdf'
1543
1544
1545
1546 - def calcChiSquared(self, P = 2, useTeleUncertainties = 1, \
1547 useNoisy = 1, err = 0.2, err_noisy = 0.3):
1548
1549 '''
1550 Calculate the (reduced) chi squared of the integrated
1551 main beam intensities.
1552
1553 Output can be found in:
1554 - list chiSquared: Chi squared values of each model
1555 - list redChiSquared: Reduced chi squared values of each model
1556 - float errRedChiSquared: Error on reduced chi squared
1557 - list redChiSquaredWithinThreeSigma: Models that have a reduced chi
1558 squared within three sigma of the
1559 best model (ie the model with the
1560 lowest reduced chi squared)
1561
1562 @keyword P: Degrees of freedom = len(data) - P
1563 @type P: int
1564
1565 @keyword useTeleUncertainties: Use telescope uncertainties
1566 instead of a fixed error.
1567 @type useTeleUncertainties: bool
1568
1569 @keyword useNoisy: Use a higher uncertainty for noisy lines
1570 @type useNoisy: bool
1571
1572 @keyword err: Fixed uncertainty on lines
1573 (if not telescope uncertainties)
1574 @type err: float
1575
1576 @keyword err_noisy: Uncertainty on noisy lines
1577 @type err_noisy: float
1578
1579 '''
1580
1581 self.modellist = [self.star_grid[ii]['GAS_LINES'][0].getModelId() \
1582 for ii in range(len(self.star_grid))]
1583
1584 self.chiSquared = []
1585 self.redChiSquared = []
1586 data = array([self.dinttmb[st] for st in self.translist])
1587 dof = P - 1
1588
1589 for mm in range(len(self.modellist)):
1590 model = array([self.minttmb[st][mm] for st in self.translist])
1591
1592 if useTeleUncertainties == 1 and useNoisy == 1:
1593 noise = array([self.tele_uncertainties[t.telescope]\
1594 *self.dinttmb[t] if not self.noisy[i] \
1595 else err_noisy*self.dinttmb[t] \
1596 for i,t in enumerate(self.translist)])
1597 cs = BasicStats.calcChiSquared(data,model,noise,ndf=dof)
1598 self.redChiSquared.append(cs)
1599 self.chiSquared.append(cs*(len(data) - dof - 1))
1600
1601 if useTeleUncertainties == 0 and useNoisy == 1:
1602 noise = array([err*self.dinttmb[t] if not self.noisy[i]\
1603 *self.dinttmb[t] else err_noisy*self.dinttmb[t] \
1604 for i,t in enumerate(self.translist)])
1605 cs = BasicStats.calcChiSquared(data,model,noise,ndf=dof)
1606 self.redChiSquared.append(\
1607 BasicStats.calcChiSquared(data,model,noise,ndf=dof))
1608 self.chiSquared.append(cs*(len(data) - dof - 1))
1609
1610 if useTeleUncertainties == 1 and useNoisy == 0:
1611 noise = [self.tele_uncertainties[t.telescope] \
1612 for t in self.translist]*data
1613 cs = BasicStats.calcChiSquared(data,model,noise,ndf=dof)
1614 self.redChiSquared.append(\
1615 BasicStats.calcChiSquared(data,model,noise,ndf=dof))
1616 self.chiSquared.append(cs*(len(data) - dof - 1))
1617
1618 if useTeleUncertainties == 0 and useNoisy == 0:
1619 noise = err*data
1620 cs = BasicStats.calcChiSquared(data,model,noise,ndf=dof)
1621 self.redChiSquared.append(\
1622 BasicStats.calcChiSquared(data,model,noise,ndf=dof))
1623 self.chiSquared.append(cs*(len(data) - dof - 1))
1624
1625
1626 self.errRedChiSquared = (2.0/len(self.translist))**0.5
1627
1628 best = np.where(np.array(self.redChiSquared) \
1629 == min(self.redChiSquared))[0][0]
1630
1631 self.redChiSquaredWithinThreeSigma = \
1632 [ii for ii in range(len(self.modellist)) \
1633 if self.redChiSquared[ii]>\
1634 (min(self.redChiSquared)-3*self.errRedChiSquared)\
1635 and self.redChiSquared[ii] \
1636 < (min(self.redChiSquared) + 3*self.errRedChiSquared)]
1637
1638 print '*******************************************************'
1639 print '****************** Chi Squared ************************'
1640 print '******** Chi squared - Reduced chi squared ********'
1641 print '*******************************************************'
1642 for mm in range(len(self.modellist)):
1643 print str(self.modellist[mm]) + '\t' + \
1644 str('%.3f' %self.chiSquared[mm]) + '\t' + \
1645 str('%.3f' %self.redChiSquared[mm]) + '+-' + \
1646 str('%.3f' %self.errRedChiSquared)
1647 print '\n'
1648 print 'Best model = ' + str(best) +', '+ str(self.modellist[best])+\
1649 ', with ' + str('%.3f' %self.redChiSquared[best])
1650 print '*******************************************************'
1651
1652
1653
1654
1655
1656
1657 - def plotBestFit(self, plot_int = 1, plot_peak = 0, plot_combo = 0):
1658
1659 '''
1660 Can only be perfomed after self.selectBestFitperLine().
1661
1662 Visualization of self.occurences_bfm(mode), self,verdict_(mode)_soft,
1663 and self.occurences_bfmlll.
1664
1665 '''
1666
1667 jup = [t.jup for t in self.translist]
1668
1669 if plot_int:
1670 plt.close()
1671 fig1 = plt.figure(1, figsize = (20,11))
1672 ax1 = fig1.add_subplot(131)
1673 ax1.set_xticks(np.arange(len(jup)))
1674 ax1.xaxis.set_ticklabels(jup)
1675 ax1.set_yticks(np.arange(len(self.modellist)))
1676 ax1.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:],\
1677 str(self.fittedModels_bfmint[i])]) \
1678 for i in range(len(self.modellist))])
1679 ax1.imshow(self.occurences_bfmint, interpolation='nearest',\
1680 origin='upper', cmap = 'Blues')
1681 ax1.set_xlabel('$J_{up}$')
1682 ax1.set_title('Int')
1683
1684 ax2 = fig1.add_subplot(132)
1685 ax2.set_xticks(np.arange(len(jup)))
1686 ax2.set_yticks(np.arange(len(self.modellist)))
1687 ax2.xaxis.set_ticklabels(jup)
1688 ax2.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:],\
1689 str(self.verdictpermodel_int[i])])\
1690 for i in range(len(self.modellist))])
1691 ax2.imshow(self.verdict_int_soft, interpolation='nearest',\
1692 origin='upper', cmap = 'Blues')
1693 ax2.set_xlabel('$J_{up}$')
1694 ax2.set_title('$\int T_\mathrm{mb}$')
1695
1696 ax3 = fig1.add_subplot(133)
1697 ax3.set_xticks(np.arange(len(jup)))
1698 ax3.set_yticks(np.arange(len(self.modellist)))
1699 ax3.xaxis.set_ticklabels(jup)
1700 ax3.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:],\
1701 str(self.fittedModels_bfmlll[i])]) \
1702 for i in range(len(self.modellist))])
1703 ax3.imshow(self.occurences_bfmlll, interpolation='nearest',
1704 origin='upper', cmap = 'Blues')
1705 ax3.set_xlabel('$J_{up}$')
1706 ax3.set_title('LLL')
1707
1708 plt.tight_layout()
1709
1710 fig1.suptitle('ResoStats mode = int',size = 18)
1711
1712 path = os.path.join(getattr(cc.path,self.code.lower()), \
1713 self.path_code,'stars', self.star_name)
1714 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1715 filename_int = os.path.join(path, 'resostats',\
1716 'int-%s_len_%i'%(self.modellist[0],(len(self.modellist))))
1717 fig1.savefig(filename_int+'.pdf')
1718
1719 print '** Plot of ResoStats Int can be found at:'
1720 print filename_int+'.pdf'
1721 print '***********************************'
1722
1723 if plot_peak:
1724 plt.close()
1725 fig2 = plt.figure(2, figsize = (20,11))
1726 ax1 = fig2.add_subplot(131)
1727 ax1.set_xticks(np.arange(len(jup)))
1728 ax1.xaxis.set_ticklabels(jup)
1729 ax1.set_yticks(np.arange(len(self.modellist)))
1730 ax1.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:],\
1731 str(self.fittedModels_bfmpeak[i])]) \
1732 for i in range(len(self.modellist))])
1733 ax1.imshow(self.occurences_bfmpeak, interpolation='nearest',\
1734 origin='upper', cmap = 'Blues')
1735 ax1.set_xlabel('$J_{up}$')
1736 ax1.set_title('Peak')
1737
1738 ax2 = fig2.add_subplot(132)
1739 ax2.set_xticks(np.arange(len(jup)))
1740 ax2.set_yticks(np.arange(len(self.modellist)))
1741 ax2.xaxis.set_ticklabels(jup)
1742 ax2.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:], \
1743 str(self.verdictpermodel_peak[i])]) \
1744 for i in range(len(self.modellist))])
1745 ax2.imshow(self.verdict_peak_hard, interpolation='nearest', \
1746 origin='upper', cmap = 'Blues')
1747 ax2.set_xlabel('$J_{up}$')
1748 ax2.set_title('$\int T_\mathrm{mb}$')
1749
1750 ax3 = fig2.add_subplot(133)
1751 ax3.set_xticks(np.arange(len(jup)))
1752 ax3.set_yticks(np.arange(len(self.modellist)))
1753 ax3.xaxis.set_ticklabels(jup)
1754 ax3.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:], \
1755 str(self.fittedModels_bfmlll[i])]) \
1756 for i in range(len(self.modellist))])
1757 ax3.imshow(self.occurences_bfmlll, interpolation='nearest', \
1758 origin='upper', cmap = 'Blues')
1759 ax3.set_xlabel('$J_{up}$')
1760 ax3.set_title('LLL')
1761
1762 plt.tight_layout()
1763 fig2.suptitle('ResoStats mode = peak',size = 18)
1764 path = os.path.join(getattr(cc.path,self.code.lower()), \
1765 self.path_code,'stars', self.star_name)
1766 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1767 filename_peak = os.path.join(path, 'resostats','peak-%s_len_%i'\
1768 %(self.modellist[0],(len(self.modellist))))
1769 fig2.savefig(filename_peak+'.pdf')
1770
1771 print '** Plot of ResoStats Peak can be found at:'
1772 print filename_peak+'.pdf'
1773 print '***********************************'
1774
1775
1776 if plot_combo:
1777 plt.close()
1778 fig3 = plt.figure(3, figsize = (20,11))
1779 ax1 = fig3.add_subplot(131)
1780 ax1.set_xticks(np.arange(len(jup)))
1781 ax1.xaxis.set_ticklabels(jup)
1782 ax1.set_yticks(np.arange(len(self.modellist)))
1783 ax1.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:], \
1784 str(self.fittedModels_bfmcombo[i])]) \
1785 for i in range(len(self.modellist))])
1786 ax1.imshow(self.occurences_bfmcombo, interpolation='nearest', \
1787 origin='upper', cmap = 'Blues')
1788 ax1.set_xlabel('$J_{up}$')
1789 ax1.set_title('Combo')
1790
1791 ax2 = fig3.add_subplot(132)
1792 ax2.set_xticks(np.arange(len(jup)))
1793 ax2.set_yticks(np.arange(len(self.modellist)))
1794 ax2.xaxis.set_ticklabels(jup)
1795 ax2.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:],\
1796 str(self.verdictpermodel_combo[i])]) \
1797 for i in range(len(self.modellist))])
1798 ax2.imshow(self.verdict_combo_hard, interpolation='nearest',\
1799 origin='upper', cmap = 'Blues')
1800 ax2.set_xlabel('$J_{up}$')
1801 ax2.set_title('$\int T_\mathrm{mb}$')
1802
1803 ax3 = fig3.add_subplot(133)
1804 ax3.set_xticks(np.arange(len(jup)))
1805 ax3.set_yticks(np.arange(len(self.modellist)))
1806 ax3.xaxis.set_ticklabels(jup)
1807 ax3.yaxis.set_ticklabels([" -- ".join([self.modellist[i][6:], \
1808 str(self.fittedModels_bfmlll[i])]) \
1809 for i in range(len(self.modellist))])
1810 ax3.imshow(self.occurences_bfmlll, interpolation='nearest', \
1811 origin='upper', cmap = 'Blues')
1812 ax3.set_xlabel('$J_{up}$')
1813 ax3.set_title('LLL')
1814
1815 plt.tight_layout()
1816
1817 fig3.suptitle('ResoStats mode = combo',size = 18)
1818
1819 path = os.path.join(getattr(cc.path,self.code.lower()), \
1820 self.path_code,'stars', self.star_name)
1821 DataIO.testFolderExistence(os.path.join(path,'resostats'))
1822 filename_combo = os.path.join(path, 'resostats','combo-%s_len_%i'\
1823 %(self.modellist[0],(len(self.modellist))))
1824 fig3.savefig(filename_combo+'.pdf')
1825
1826 print '** Plot of ResoStats Combo can be found at:'
1827 print filename_combo+'.pdf'
1828 print '***********************************'
1829
1830
1832
1833 '''
1834 Print output of selectBestFitperLine(), using fittedLines_bfmxxx.
1835
1836 '''
1837 calcTrans = [str(self.translist[x]) for x in self.includedtrans]
1838
1839 if bfm_int == 1:
1840 print '-------------------------------------------------------------------'
1841 print 'How often was a line modeled according to selectBestFitModels?'
1842 print 'Mode = int'
1843 print '-------------------------------------------------------------------'
1844 print 'Number of models calculated = ' + str(len(self.modellist))
1845 print ''
1846 print 'Transition - Fitted by # models'
1847 for ii in range(len(calcTrans)):
1848 print calcTrans[ii] + "\t" + str(self.fittedLines_bfmint[ii])
1849
1850 if bfm_peak == 1:
1851 print '-------------------------------------------------------------------'
1852 print 'How often was a line modeled according to selectBestFitModels?'
1853 print 'Mode = peak'
1854 print '-------------------------------------------------------------------'
1855 print 'Number of models calculated = ' + str(len(self.modellist))
1856 print ''
1857 print 'Transition - Fitted by # models'
1858 for ii in range(len(calcTrans)):
1859 print calcTrans[ii] + "\t" + str(self.fittedLines_bfmpeak[ii])
1860
1861 if bfm_combo == 1:
1862 print '-------------------------------------------------------------------'
1863 print 'How often was a line modeled according to selectBestFitModels?'
1864 print 'Mode = combo'
1865 print '-------------------------------------------------------------------'
1866 print 'Number of models calculated = ' + str(len(self.modellist))
1867 print ''
1868 print 'Transition - Fitted by # models'
1869 for ii in range(len(calcTrans)):
1870 print calcTrans[ii] + "\t" + str(self.fittedLines_bfmcombo[ii])
1871
1872 if bfm_lll == 1:
1873 print '----------------------------------------------------------------------'
1874 print 'How often was a line modeled according to selectBestFitModelsLLL?'
1875 print '----------------------------------------------------------------------'
1876 print 'Number of models calculated = ' + str(len(self.modellist))
1877 print ''
1878 print 'Transition - Fitted by # models'
1879 for ii in range(len(calcTrans)):
1880 print calcTrans[ii] + "\t" + str(self.fittedLines_bfmlll[ii])
1881
1882
1884
1885 '''
1886 Print output of selectBestFitperLine(), using fittedModels_bfmxxx.
1887
1888 '''
1889
1890 if bfm_int == 1:
1891 print '-------------------------------------------'
1892 print 'Best model according to selectBestFitModels'
1893 print 'Mode = int'
1894 print '-------------------------------------------'
1895 print 'Number of lines included = ' + str(len(self.includedtrans))
1896 print ''
1897 print 'Model - # lines fitted '
1898 for ii in range(len(self.modellist)):
1899 print self.modellist[ii] + "\t" + str(self.fittedModels_bfmint[ii])
1900 print ''
1901
1902 if bfm_peak == 1:
1903 print '-------------------------------------------'
1904 print 'Best model according to selectBestFitModels'
1905 print 'Mode = peak'
1906 print '-------------------------------------------'
1907 print 'Number of lines included = ' + str(len(self.includedtrans))
1908 print ''
1909 print 'Model - # lines fitted '
1910 for ii in range(len(self.modellist)):
1911 print self.modellist[ii] + "\t" + str(self.fittedModels_bfmpeak[ii])
1912 print ''
1913
1914 if bfm_combo == 1:
1915 print '-------------------------------------------'
1916 print 'Best model according to selectBestFitModels'
1917 print 'Mode = combo'
1918 print '---------------------------------------------------------------------'
1919 print 'Number of lines included = ' + str(len(self.includedtrans))
1920 print ''
1921 print 'Model - # lines fitted '
1922 for ii in range(len(self.modellist)):
1923 print self.modellist[ii] + "\t" + str(self.fittedModels_bfmcombo[ii])
1924 print ''
1925
1926 if bfm_lll == 1:
1927 print '-------------------------------------------'
1928 print 'Best model according to selectBestFitModels'
1929 print '-------------------------------------------'
1930 print 'Number of lines included = ' + str(len(self.includedtrans))
1931 print ''
1932 print 'Model - # lines fitted '
1933 for ii in range(len(self.modellist)):
1934 print self.modellist[ii] + "\t" + str(self.fittedModels_bfmlll[ii])
1935 print ''
1936