Package ComboCode :: Package cc :: Package data :: Package instruments :: Module Instrument
[hide private]
[frames] | no frames]

Source Code for Module ComboCode.cc.data.instruments.Instrument

  1  # -*- coding: utf-8 -*- 
  2   
  3  """ 
  4  Interface for Instrument-specific methods. 
  5   
  6  Author: R. Lombaert 
  7   
  8  """ 
  9   
 10  import os 
 11  from glob import glob 
 12  from scipy import argmin,argmax,array,sqrt 
 13  import scipy 
 14   
 15  import cc.path 
 16  from cc.tools.io import DataIO 
 17  from cc.modeling.objects import Star 
 18   
 19   
 20   
21 -class Instrument(object):
22 23 """ 24 Instrument-specific calculations. 25 26 """ 27
28 - def __init__(self,star_name,instrument_name,oversampling,\ 29 code='GASTRoNOoM',path=None,intrinsic=1,path_linefit='',\ 30 blend_factor=1.2):
31 32 """ 33 Initializing an instance of Instrument. 34 35 @param star_name: The name of the star from Star.py 36 @type star_name: string 37 @param instrument_name: The name of the instrument (SPIRE or PACS) 38 @type instrument_name: string 39 @param oversampling: The instrumental oversampling, for correct 40 convolution of the Sphinx output. 41 @type oversampling: int 42 43 @keyword code: The radiative transfer code used to model the data 44 45 (default: 'GASTRoNOoM') 46 @type code: string 47 @keyword path: Output folder in the code's home folder. Used to locate 48 eg PACS database. If None, no model info required (eg 49 for line measurement matching/identification) 50 51 (default: None) 52 @type path: string 53 @keyword intrinsic: Use the intrinsic Sphinx line profiles for 54 convolving with the spectral resolution? Otherwise 55 the beam convolved line profiles are used. 56 57 (default: 1) 58 @type intrinsic: bool 59 60 @keyword path_linefit: The folder name for linefit results from Hipe 61 (created by Pierre, assuming his syntax). The 62 folder is located in $path_pacs$/$star_name$/. 63 If no folder is given, no linefits are 64 processed. 65 66 (default: '') 67 @type path_linefit: string 68 @keyword blend_factor: The relative factor with respect to the intrinsic 69 instrumental FWHM that is compared with the 70 fitted Gaussian FWHM to determine whether an 71 emission line may be blended. 72 73 (default: 1.2) 74 @type blend_factor: float 75 76 """ 77 78 self.path = path 79 self.code = code 80 self.star_name = star_name 81 self.path_linefit = path_linefit 82 self.instrument = instrument_name.lower() 83 self.path_instrument = getattr(cc.path,'d%s'%self.instrument) 84 self.intrinsic = intrinsic 85 self.oversampling = int(oversampling) 86 self.blend_factor = float(blend_factor) 87 self.data_filenames = [] 88 istar = DataIO.getInputData(keyword='STAR_NAME').index(star_name) 89 #-- Set relevant velocities in cm/s 90 self.c = 2.99792458e10 91 self.vlsr = float(DataIO.getInputData(keyword='V_LSR',rindex=istar))*10**5 92 if not self.path is None: 93 pp = getattr(cc.path,self.code.lower()) 94 DataIO.testFolderExistence(os.path.join(pp,self.path,'stars')) 95 DataIO.testFolderExistence(os.path.join(pp,self.path,'stars',\ 96 self.star_name)) 97 98 self.readTelescopeProperties()
99 100 101
102 - def readTelescopeProperties(self):
103 104 """ 105 Read the telescope properties from Telescope.dat. 106 107 This currently includes the telescope size in m, and the default 108 absolute flux calibration uncertainty. 109 110 """ 111 112 all_telescopes = DataIO.getInputData(keyword='TELESCOPE',start_index=5,\ 113 filename='Telescope.dat') 114 115 try: 116 tel_index = all_telescopes.index(self.instrument.upper()) 117 except ValueError: 118 raise ValueError('%s not found in Telescope.dat.'\ 119 %self.instrument.upper()) 120 121 self.telescope_size = DataIO.getInputData(keyword='SIZE',start_index=5,\ 122 filename='Telescope.dat',\ 123 rindex=tel_index) 124 self.absflux_err = DataIO.getInputData(keyword='ABS_ERR',start_index=5,\ 125 filename='Telescope.dat',\ 126 rindex=tel_index)
127 128 129
130 - def makeModelList(self,star_grid,id_type):
131 132 ''' 133 Return a list of all model id's in the star_grid. 134 135 @param star_grid: The grid of parameter sets 136 @type star_grid: list[Star()] 137 @param id_type: the type of model id (MCMAX or GASTRONOOM or PACS) 138 @type id_type: string 139 @return: the model_ids 140 @rtype: list[string] 141 142 ''' 143 144 return [star['LAST_'+id_type.upper()+'_MODEL'] for star in star_grid]
145 146 147
148 - def getDataFilenames(self,searchstring=''):
149 150 ''' 151 Retrieve the data filenames that you wish to be plotted. 152 153 @keyword searchstring: the searchstring conditional for selecting data 154 files. 155 156 (default: '') 157 @type searchstring: string 158 @return: the data filenames found in the search 159 @rtype: list[string] 160 161 ''' 162 163 return [f 164 for f in glob(os.path.join(self.path_instrument,\ 165 self.star_name,'cont_subtracted',\ 166 '*'+searchstring+'*')) 167 if f[-1] != '~' and f[-7:] != '.tar.gz']
168 169 170
171 - def setData(self,data_filenames=[],searchstring=''):
172 173 ''' 174 Read data from given filenames and remember them. 175 176 The data are saved as wavelength and flux lists in the object. 177 178 @keyword data_filenames: The data filenames. If empty, auto-search is 179 done 180 181 (default: []) 182 @type data_filenames: list[string] 183 @keyword searchstring: the searchstring conditional for the auto-search 184 185 (default: '') 186 @type searchstring: string 187 188 ''' 189 if not self.data_filenames: 190 if not data_filenames: 191 data_filenames = self.getDataFilenames(searchstring=\ 192 searchstring) 193 if not data_filenames: 194 print '** No data filenames or star object given: ' + \ 195 'Cannot retrieve data. No %s data will be read.'\ 196 %self.instrument.upper() 197 else: 198 print '** Reading %s data.'%self.instrument.upper() 199 self.data_filenames = data_filenames 200 self.readData() 201 if self.instrument == 'pacs': 202 bands = ['B2A','B3A','B2B','R1A','R1B'] 203 elif self.instrument == 'spire': 204 bands = ['SSW','SLW'] 205 self.data_ordernames = [[word 206 for word in os.path.split(f)[1].split('_') 207 if word.upper() in bands][0] 208 for f in self.data_filenames] 209 if len(self.data_ordernames) != len(self.data_filenames): 210 raise IOError('Could not match number of ordernames to ' + \ 211 'number of filenames when selecting PACS ' + \ 212 'datafiles. Check filenames for missing or ' + \ 213 'extra order indications between "_".')
214 215 216
217 - def readData(self):
218 219 ''' 220 Read in data, taking special care of NaNs. 221 222 Four colums are taken as input! wave - contsub - original - continuum 223 224 Two columns still works, but may result in errors in other places in 225 the code. 226 227 Data are always read in Jy versus micron, for both SPIRE and PACS. 228 229 ''' 230 231 self.data_wave_list = [] 232 self.data_flux_list = [] 233 self.data_original_list = [] 234 self.data_continuum_list = [] 235 for filename in self.data_filenames: 236 data = DataIO.readCols(filename=filename,nans=1) 237 self.data_wave_list.append(data[0]) 238 self.data_flux_list.append(data[1]) 239 if len(data) == 2: 240 continue 241 self.data_original_list.append(data[2]) 242 self.data_continuum_list.append(data[3])
243 244 245
246 - def readLineFit(self,**kwargs):
247 248 ''' 249 Read the data from the line fit procedure. 250 251 @keyword kwargs: Extra keywords for the readCols method. 252 253 (default: dict()) 254 @type kwargs: dict 255 256 @return: The line fit columns are returned. 257 @rtype: list[array] 258 259 ''' 260 261 fn = os.path.join(self.path_instrument,self.star_name,\ 262 self.path_linefit,'lineFitResults') 263 if not self.path_linefit or not os.path.isfile(fn): 264 self.linefit = None 265 return 266 dd = DataIO.readCols(fn,make_array=0,**kwargs) 267 return dd
268 269 270
271 - def intIntMatch(self,trans_list,ifn):
272 273 ''' 274 Match the wavelengths of integrated intensities with transitions. 275 276 Checks if a blend might be present based on the data, as well as the 277 available transitions. 278 279 Note that if a line is observed multiple times in the same band (eg in 280 a line scan), it cannot at this moment be discerned. In other words, 281 there is no point including a line fitted twice in two line scans, as 282 it will not be taken into account for the int matching algorithm. 283 284 @param trans_list: The list of transitions for which the check is done. 285 @type trans_list: list[Transition()] 286 @param ifn: The index of the filename in self.data_filenames. This is 287 done per file! 288 @type ifn: int 289 290 ''' 291 292 if self.linefit is None: 293 return 294 dwav = self.data_wave_list[ifn] 295 ordername = self.data_ordernames[ifn] 296 fn = self.data_filenames[ifn] 297 # 1) Prep: Create a list of sample transitions and get central 298 # wavs corrected for vlsr, in micron. Select fit results 299 # for this particular band. 300 lf = self.linefit[self.linefit['band'] == ordername] 301 # No info available for band, so don't set anything. 302 if not list(lf.wave_fit): 303 return 304 305 # 2) Collect all transitions with doppler shifted central wavelength 306 # within the wavelength region of the datafile selected here. 307 strans = [(t,t.wavelength*10**4*1./(1-self.vlsr/t.c)) 308 for t in trans_list 309 if t.wavelength*10**4*1./(1-self.vlsr/t.c) >= dwav[0]\ 310 and t.wavelength*10**4*1./(1-self.vlsr/t.c) <= dwav[-2]] 311 312 # 3) Check if the wav of a trans matches a wav in the fitted 313 # intensities list, within the fitted_fwhm of the line with 314 # respect to the fitted central wavelength, on BOTH sides. 315 # Note that there cannot be 2 fitted lines with central wav 316 # closer than fwhm/2. Those lines would be inseparable! 317 # With the exception of lines superimposed, which should typically 318 # be avoided. Line matching will not be very accurate in this 319 # case. 320 imatches = [argmin(abs(lf.wave_fit-mwav)) 321 for (st,mwav) in strans] 322 matches = [(mwav <= lf.wave_fit[ii] + lf.fwhm_fit[ii] \ 323 and mwav >= lf.wave_fit[ii] - lf.fwhm_fit[ii]) \ 324 and (lf.wave_fit[ii],ii) or (None,None) 325 for (st,mwav),ii in zip(strans,imatches)] 326 # 4) If match found, check if multiple mtrans fall within 327 # fitted_FWHM/2 from the fitted central wavelength of the 328 # line. These are blended IN MODEL and/or IN DATA. 329 # Check for model blend is done by looking for ALL transitions 330 # that have been matched with a single fitted wavelength. 331 matches_wv = array([mm[0] for mm in matches]) 332 wf_blends = [list(array([st for st,mwav in strans])[matches_wv==wv]) 333 for wv in lf.wave_fit] 334 # Use the wave_fit array indices from matches[:][1] to check 335 # if indeed multiple transitions were found for the same wav. 336 # If positive, include True if the particular transition is 337 # the first among the blended ones, else include False. The 338 # False ones are not taken into account as blended because: 339 # 1) no match was found at all, 340 # 2) only one match was found, 341 # 3) if multiple matches have been found it was not the first. 342 # 343 blended = [not ii is None and len(wf_blends[ii]) > 1. \ 344 and wf_blends[ii].index(st) != 0 345 for (st,mwav),(match,ii) in zip(strans,matches)] 346 for (st,mwav),blend,(match,ii) in zip(strans,blended,matches): 347 # 5) No match found in linefit for this band: no 348 # integrated intensity is set for this filename in this 349 # transition. Simply move on to the next transtion. (not 350 # setting gives None when asking Trans for integrated line 351 # of unresolved lines) 352 if match is None: 353 continue 354 # 6) Line is blended with other line that is already added. Just 355 # for bookkeeping purposes, all blended lines involved are 356 # added here as well. 357 elif blend: 358 st.setIntIntUnresolved(fn,'inblend',None,self.vlsr,wf_blends[ii]) 359 # 7) Match found with a wave_fit value once. Check for line 360 # blend IN DATA: Check the ratio fitted FWHM/PACS FWHM. If 361 # larger by 30% or more, put the int int negative. 362 elif len(wf_blends[ii]) == 1: 363 err = sqrt((lf.line_flux_rel[ii])**2+self.absflux_err**2) 364 factor = lf.fwhm_rel[ii] >= self.blend_factor and -1 or 1 365 st.setIntIntUnresolved(fn,factor*lf.line_flux[ii],err,self.vlsr) 366 # 8) If multiple matches, give a selection of strans included 367 # in the blend (so they can be used to select model 368 # specific transitions later for addition of the 369 # integrated intensities of the mtrans). Make the integrated 370 # line strength negative to indicate a line blend. 371 # The *OTHER* transitions found this way are not compared 372 # with any data and get None. (see point 4) ) 373 else: 374 err = sqrt((lf.line_flux_rel[ii])**2+self.absflux_err**2) 375 st.setIntIntUnresolved(fn,-1.*lf.line_flux[ii],err,self.vlsr, 376 wf_blends[ii])
377 378 379
380 - def mergeSphinx(self,star):
381 382 ''' 383 Merge Sphinx output line profiles on a zero-continuum. 384 385 For now only done in wavelength units of micron for PACS/SPIRE spectra. 386 387 @param star: The Star object for which all lines are collected + merged 388 @type star: Star() 389 @return: wave list in micron and flux list in Jy 390 @rtype: (list,list) 391 392 ''' 393 394 #read all sphinx output data for the relevant instrument 395 sphinx_transitions = [trans 396 for trans in star['GAS_LINES'] 397 if trans.getModelId() \ 398 and self.instrument.upper() in trans.telescope] 399 #- If no sphinx output found, this list will be empty and no convolution 400 #- should be done. 401 if not sphinx_transitions: 402 return [[],[]] 403 404 [trans.readSphinx() for trans in sphinx_transitions] 405 sphinx_input = [self.intrinsic \ 406 and (trans.sphinx.getVelocityIntrinsic(),\ 407 trans.sphinx.getLPIntrinsic()) 408 or (trans.sphinx.getVelocity,\ 409 trans.sphinx.getLPConvolved()) 410 for trans in sphinx_transitions] 411 412 #- convert km/s to cm/s to micron and flux to Jy 413 #- doppler shift (1-(v_source - v_observer=delta_v)/c)*f_zero converted 414 #- to wavelength in micron 415 sphinx_input = [(1/(1.-(vel*10**5/star.c))*trans.wavelength*10**(4),\ 416 flux*10**(23)) 417 for (vel,flux),trans in zip(sphinx_input,\ 418 sphinx_transitions)] 419 420 #-- In case the wavelength/freq scale is counting down, reverse arrays 421 sphinx_input = [wav[0] > wav[-1] \ 422 and (wav[::-1],flux[::-1]) or (wav,flux) 423 for wav,flux in sphinx_input] 424 sphinx_input = [(list(wav),list(flux)) for wav,flux in sphinx_input] 425 426 #- Make sure all sphinx segments are increasing in wavelength/frequency 427 sphinx_input.sort() 428 429 #- Check if overlap between lines is present: 430 #- False if THIS line is overlapping with the one before it. 431 #- Hence the first line will always be true. 432 overlap_bools = [1] + \ 433 [sphinx_input[i-1][0][-1] <= sphinx_input[i][0][0] 434 for i in xrange(1,len(sphinx_input))] 435 436 if False in overlap_bools: 437 print 'WARNING! There is overlap between emission lines in ' + \ 438 'Sphinx output. Overlap is included by simple addition only!' 439 440 #- Add zeroes on a grid before the first line to make sure the 441 #- convolution goes right 442 sphinx_final = [(sphinx_input[0][0][0] - 1 + d/1000.,0.0) 443 for d in xrange(1,1000)] 444 445 #- For now stitch up the segments with zeroes. Later on: 446 #- Make grid with zeroes, then add segments if no overlap, 447 #- if overlap, make sure the addition works well 448 for i in xrange(len(sphinx_input)-1): 449 #- if True, no overlap with the sphinx file before i and it has to 450 #- be added 451 #- if False, overlap with the sphinx file before i and it has been 452 #- added already 453 if overlap_bools[i]: 454 j = 1 455 this_wave = sphinx_input[i][0] 456 this_flux = sphinx_input[i][1] 457 blend_wave = [] 458 blend_flux = [] 459 #- If False, overlap: interpolation is needed, select all the 460 #- sphinx results for which there is a subsequent blend 461 while not i+j == len(sphinx_input)-1 \ 462 and not overlap_bools[i+j]: 463 blend_wave.append(sphinx_input[i+j][0]) 464 blend_flux.append(sphinx_input[i+j][1]) 465 j += 1 466 467 #- note that at i+j there is no more blend, and it is a 468 #- discrete sphinx result: This should not be added yet 469 #- if there are blends here, blend_wave is not [] 470 if blend_wave: 471 #- remember where the largest wavelength is to be found 472 max_index = argmax(array([wav[-1] 473 for wav in blend_wave]))+i+1 474 if sphinx_input[max_index][0][-1] < sphinx_input[i][0][-1]: 475 max_index = i 476 #- Making a new wavelength grid beyond the current line at 477 #- i; there should be no need to expand the grid 478 #- to lower wavelengths, since sphinx_final was sorted. 479 delta_lambda = (this_wave[-1]-this_wave[0])/len(this_wave) 480 #- expand until all wavelenghts in all blends are covered 481 while this_wave[-1] < max([wav[-1] for wav in blend_wave]): 482 this_wave.append(this_wave[-1]+delta_lambda) 483 this_flux.append(0.0) 484 interpolations = [] 485 #- interpolate to get values for the new grid, where the 486 #- original wavelength grid for every blend is expanded 487 #- such that all wavelengths in the new grid are covered, 488 #- adding zeroes in this way does not change the result 489 for x,y in zip(blend_wave,blend_flux): 490 this_x = array([this_wave[0],x[0]-(x[1]-x[0])] + x + \ 491 [x[-1]+(x[-1]-x[-2]),this_wave[-1]]) 492 this_y = array([0,0] + y + [0,0]) 493 interpolations.append(array(scipy.interpolate.interp1d\ 494 (this_x,this_y)(this_wave))) 495 #- add together 496 this_flux=array(this_flux) 497 for blend in interpolations: this_flux += blend 498 else: 499 max_index = i 500 501 #- extend result with the new wavelengths and fluxes, 502 #- including blends if needed 503 sphinx_final.extend([(x,y) 504 for x,y in zip(this_wave,this_flux)]) 505 #- extend result further with zeroes up until the next sphinx 506 #- line, after the last line in the blend 507 #- Take delta equal to average delta in last Sphinx segment, 508 #- this may be different from delta in next segment, 509 #- but since they're zeroes you don't really care 510 sphinx_final.append((2*sphinx_final[-1][0]\ 511 -sphinx_final[-2][0],\ 512 0.0)) 513 while sphinx_final[-1][0] + 0.001 < sphinx_input[i+j][0][0]: 514 sphinx_final.append((sphinx_final[-1][0]+0.001,0.0)) 515 sphinx_final.append((2*sphinx_input[i+j][0][0]-\ 516 sphinx_input[i+j][0][1],\ 517 0.0)) 518 #-- Remember overlap_bools contains FALSE if there IS an overlap. 519 # Counterintuitive, I know. 520 if overlap_bools[-1]: 521 sphinx_final.extend([(w,f) 522 for w,f in zip(sphinx_input[-1][0],\ 523 sphinx_input[-1][1])]) 524 sphinx_final.extend([(sphinx_input[-1][0][-1] + d/100.,0.0) 525 for d in xrange(1,1000)]) 526 527 #- No final sort for now, it should be aranged already, even if line 528 #- blends are present 529 return [s[0] 530 for s in sphinx_final],\ 531 [s[1] 532 for s in sphinx_final]
533