From abc446056823ef8df4117cb63fb51af3ff15daea Mon Sep 17 00:00:00 2001 From: ZacharyLane1204 Date: Tue, 2 Jul 2024 14:47:43 +1200 Subject: [PATCH 1/5] Merge dev --- tessreduce/tessreduce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tessreduce/tessreduce.py b/tessreduce/tessreduce.py index 9c8ec31..1972d09 100644 --- a/tessreduce/tessreduce.py +++ b/tessreduce/tessreduce.py @@ -1487,7 +1487,7 @@ def psf_photometry(self,xPix,yPix,size=5,snap='brightest',ext_shift=True,plot=Fa Whether plots will a. The default is False. diff : TYPE, optional DESCRIPTION. The default is None. - +flux_to_jansky Returns ------- flux : numpy array From f70425c27432c37acc070947348baf5a7933e2ce Mon Sep 17 00:00:00 2001 From: ble61 Date: Thu, 27 Jun 2024 13:18:28 +1200 Subject: [PATCH 2/5] try to add docstring --- tessreduce/lastpercent.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tessreduce/lastpercent.py b/tessreduce/lastpercent.py index 12ed9a3..82eb1fa 100644 --- a/tessreduce/lastpercent.py +++ b/tessreduce/lastpercent.py @@ -7,6 +7,9 @@ from copy import deepcopy def cor_minimizer(coeff,pix_lc,bkg_lc): + """ + + """ lc = pix_lc - coeff * bkg_lc ind = np.isfinite(lc) & np.isfinite(bkg_lc) #bkgnorm = bkg_lc/np.nanmax(bkg_lc) From 8f05e77152077e8de602100821fafa5abc06133a Mon Sep 17 00:00:00 2001 From: ble61 Date: Fri, 28 Jun 2024 14:14:42 +1200 Subject: [PATCH 3/5] lastPercent.py Docstringed --- tessreduce/lastpercent.py | 109 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/tessreduce/lastpercent.py b/tessreduce/lastpercent.py index 82eb1fa..30f056b 100644 --- a/tessreduce/lastpercent.py +++ b/tessreduce/lastpercent.py @@ -8,7 +8,21 @@ def cor_minimizer(coeff,pix_lc,bkg_lc): """ + Calculates the Pearson r correlation coefficent between the background subtracted lightcurve and the background itself. Takes inputs in a form for minimizing methods to be run on this function. + + Parameters: + ---------- + coeff: float + The multiplier on the background flux to be subtracted from the lightcurve. This is the variable being changed in any minimization. + pix_lc: ArrayLike + The full lightcurve of pixel flux data. Has a back + bkg_lc: ArrayLike + The background lightcurve, to be multiplied by coeff and subtracted from pix_lc + Returns: + ------- + corr: float + The absolute value of the Pearson r correlation coefficent between the background subtracted lightcurve and the background. """ lc = pix_lc - coeff * bkg_lc ind = np.isfinite(lc) & np.isfinite(bkg_lc) @@ -19,6 +33,28 @@ def cor_minimizer(coeff,pix_lc,bkg_lc): return abs(corr) def _parallel_correlation(pixel,bkg,arr,coord,smth_time): + """ + Calculates the Pearson r correlation coefficent between the savgol filtered lightcurve and the upper 30% of the background, at the same indices. + + Parameters: + ---------- + pixel: ArrayLike + The flux lightcurve to be filtered and correlated. + bkg: ArrayLike + The background lightcurve. + + arr: Not Used But Positional + + coord: Not Used But Positional + + smth_time: int + The window lenght of the savgol filter, must be <= size of pixel + + Returns: + ------- + corr: float + The absolute value of the Pearson r correlation coefficent between the filtered lightcurve and the upper 30% of the background, rounded to 2 decimal places. + """ nn = np.isfinite(pixel) ff = savgol_filter(pixel[nn],smth_time,2) b = bkg[nn] @@ -27,6 +63,21 @@ def _parallel_correlation(pixel,bkg,arr,coord,smth_time): return np.round(abs(corr),2) def _find_bkg_cor(tess,cores): + """ + Takes a TESSreduce object and calculates the flux-background Pearson r correlation coefficent in parallel. + + Parameters: + ---------- + tess: TESSreduce Object + The TESSreduce object that is needing the correlation coefficents calculated. + cores: int + The number of cores to be used for parallel processing. + + Returns: + cors: ArrayLike + The array of Pearson r correlation coefficents + + """ y,x = np.where(np.isfinite(tess.ref)) coord = np.c_[y,x] cors = np.zeros_like(tess.ref) @@ -40,6 +91,26 @@ def _find_bkg_cor(tess,cores): return cors def _address_peaks(flux,bkg,std): + """ + Filters the upper 30% of the background values and their corresponding flux values. The fit to the background involves a minimization of the correlation coefficents and an interpolated savgol filter of the fluxes. The fluxes are modified by the same savgol filter, and the median of the lower 16% of the std. + + Parameters: + ---------- + flux: ArrayLike + The flux array of interest + bkg: ArrayLike + The background flux array corresponding to flux. + std: ArrayLike + An array of the standard deviations of the background + + Returns: + ------- + new_flux: ArrayLike + The modified flux array. If there is nothing to modify, new_flux==flux. + new_bkg: ArrayLike + The modified background array. If there is nothing to modify, new_bkg==bkg. + """ + nn = np.isfinite(flux) b = bkg[nn] f = flux[nn] @@ -88,6 +159,24 @@ def _address_peaks(flux,bkg,std): return new_flux, new_bkg def _calc_bkg_std(data,coord,d=6): + """ + Calculates the background standard deviation of data in a rectangle of size d pixels around the coord point given. + + Parameters: + ---------- + data: ArrayLike + A 2d Array of flux values to calculate the standard deviation of. + coord: ArrayLike (shape(2,)) + The y, x coordinate to calculate the standard deviation around. + d: int, optional + The size of the rectangle to have the standard deviation calculates in. If the pairing of coord and d would result in a rectangle indexing outside of data, this is corrected for, so d is the maximum size of the rectangle, and will give a square box if no corrections are needed. Default is 6. + + Returns: + ------- + std: float + The standard deviation of data at and around coord. + """ + y = coord[0]; x = coord[1] ylow = y-d; yhigh=y+d+1 if ylow < 0: @@ -105,6 +194,26 @@ def _calc_bkg_std(data,coord,d=6): def multi_correlation_cor(tess,limit=0.8,cores=7): + """ + Corrects for correlation coefficents larger than limit. If the flux and the background of tess are correlated (absolute value of correlation coefficent, |r|) to a level higher than limit, a fit to minimize this coefficent is preformed, and the new background and flux values are returned + + Parameters: + ---------- + tess: TESSreduce Object + + limit: float, optional + The largest acceptable |r| before any modifications are needed. Should be in range (0,1) for comparison to |r| to make any sense. Default is 0.8. + cores: int, optional + The number of cores to use for multiprocessing. Default is 7. + + Returns: + ------- + flux: ArrayLike + The modified flux array, after any needed changes have been made. If nothing is needed to be changed, or the modification breaks, flux == tess.flux. + bkg: + The modified background array, after any needed changes have been made. If nothing is needed to be changed, or the modification breaks, bkg == tess.bkg. + + """ cors = _find_bkg_cor(tess,cores=cores) y,x = np.where(cors > limit) flux = deepcopy(tess.flux) From 3de389fdddd1c4326661cd4fa7dfb011297b2f99 Mon Sep 17 00:00:00 2001 From: Ryan Ridden Date: Wed, 10 Jul 2024 14:52:50 +1200 Subject: [PATCH 4/5] bkg update --- tessreduce/tessreduce.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tessreduce/tessreduce.py b/tessreduce/tessreduce.py index 835c403..647ca63 100644 --- a/tessreduce/tessreduce.py +++ b/tessreduce/tessreduce.py @@ -1799,7 +1799,7 @@ def reduce(self, aper = None, align = None, parallel = None, calibrate=None, print('made reference') # make source mask if mask is None: - self.make_mask(catalogue_path=self._catalogue_path,maglim=18,strapsize=7,scale=mask_scale)#Source_mask(ref,grid=0) + self.make_mask(catalogue_path=self._catalogue_path,maglim=18,strapsize=7,scale=mask_scale) frac = np.nansum((self.mask == 0) * 1.) / (self.mask.shape[0] * self.mask.shape[1]) #print('mask frac ',frac) if frac < 0.05: @@ -1887,7 +1887,7 @@ def reduce(self, aper = None, align = None, parallel = None, calibrate=None, self.ref -= self.bkg[self.ref_ind] # remake mask - self.make_mask(catalogue_path=self._catalogue_path,maglim=18,strapsize=7,scale=mask_scale*.8,useref=True)#Source_mask(ref,grid=0) + self.make_mask(catalogue_path=self._catalogue_path,maglim=18,strapsize=7,scale=mask_scale*.8,useref=False)#Source_mask(ref,grid=0) frac = np.nansum((self.mask== 0) * 1.) / (self.mask.shape[0] * self.mask.shape[1]) #print('mask frac ',frac) if frac < 0.05: From 2590b53c706fc5d18206f5a268af551a0cd21b0f Mon Sep 17 00:00:00 2001 From: ZacharyLane1204 Date: Thu, 11 Jul 2024 15:40:54 +1200 Subject: [PATCH 5/5] Doc strings and other minor fixes/optimisations --- tessreduce/lastpercent.py | 2 +- tessreduce/tessreduce.py | 324 +++++++++++++++++++++----------------- 2 files changed, 178 insertions(+), 148 deletions(-) diff --git a/tessreduce/lastpercent.py b/tessreduce/lastpercent.py index 30f056b..b731b11 100644 --- a/tessreduce/lastpercent.py +++ b/tessreduce/lastpercent.py @@ -195,7 +195,7 @@ def _calc_bkg_std(data,coord,d=6): def multi_correlation_cor(tess,limit=0.8,cores=7): """ - Corrects for correlation coefficents larger than limit. If the flux and the background of tess are correlated (absolute value of correlation coefficent, |r|) to a level higher than limit, a fit to minimize this coefficent is preformed, and the new background and flux values are returned + Corrects for correlation coefficents larger than limit. If the flux and the background of tess are correlated (absolute value of correlation coefficent, |r|) to a level higher than limit, a fit to minimize this coefficent is performed, and the new background and flux values are returned Parameters: ---------- diff --git a/tessreduce/tessreduce.py b/tessreduce/tessreduce.py index 647ca63..3ab6ee9 100644 --- a/tessreduce/tessreduce.py +++ b/tessreduce/tessreduce.py @@ -86,7 +86,7 @@ def __init__(self,ra=None,dec=None,name=None,obs_list=None,tpf=None,size=90,sect Declination of the target object. The default is None. name : str Name of the object, used in saving. The default is None. - obs_list : array, optional + obs_list : array_like, optional Array generated by the sn_lookup and spacetime_lookup functions. The default is None. tpf : target pixel file, optional TESS target pixel file. The default is None. @@ -1508,8 +1508,11 @@ def correlation_corrector(self,limit=0.8): Parameters ---------- - limit : TYPE, optional - DESCRIPTION. The default is 0.8. + limit : float, optional + Corrects for correlation coefficents larger than limit. + If the flux and the background of tess are correlated (absolute value of correlation coefficent, |r|) + to a level higher than limit, a fit to minimize this coefficent is preformed, + and the new background and flux values are returned. Default is 0.8. Returns ------- @@ -1520,24 +1523,23 @@ def correlation_corrector(self,limit=0.8): self.flux = flux self.bkg = bkg - def _psf_initialise(self,cutoutSize,loc,time_ind=None,ref=False): """ For gathering the cutouts and PRF base. Parameters ---------- - cutoutSize : TYPE - DESCRIPTION. - loc : TYPE - DESCRIPTION. - ref : TYPE, optional - DESCRIPTION. The default is False. + cutoutSize : int + Size of the cutouts in pixels. + loc : array_like + Pixel coordinates to evaluate. + ref : bool, optional + Toggles whether the reference image is used for calculations. The default is False. Returns ------- - prf : TYPE - DESCRIPTION. + prf : TESS_PRF Class object + The effective point-spread function generated from TESS_PRF. cutout : TYPE DESCRIPTION. @@ -1545,10 +1547,11 @@ def _psf_initialise(self,cutoutSize,loc,time_ind=None,ref=False): if time_ind is None: time_ind = np.arange(0,len(self.flux)) - if (type(loc[0]) == float) | (type(loc[0]) == np.float64) | (type(loc[0]) == np.float32): - loc[0] = int(loc[0]+0.5) - if (type(loc[1]) == float) | (type(loc[1]) == np.float64) | (type(loc[1]) == np.float32): - loc[1] = int(loc[1]+0.5) + if isinstance(loc[0], (float, np.floating, np.float32, np.float64)): + loc[0] = int(loc[0] + 0.5) + if isinstance(loc[1], (float, np.floating, np.float32, np.float64)): + loc[1] = int(loc[1] + 0.5) + col = self.tpf.column - int(self.size/2-1) + loc[0] # find column and row, when specifying location on a *say* 90x90 px cutout row = self.tpf.row - int(self.size/2-1) + loc[1] @@ -1560,6 +1563,29 @@ def _psf_initialise(self,cutoutSize,loc,time_ind=None,ref=False): return prf, cutout def moving_psf_photometry(self,xpos,ypos,size=5,time_ind=None,xlim=2,ylim=2): + """ + PSF photometry for moving targets. + + Parameters + ---------- + xpos : array_like + x pixel locations for the initial guess of the target region. + ypos : array_like + y pixel locations for the initial guess of the target region. + size : int, optional + Size of pixel cutout to use (should be odd). The default is 5. + time_ind : array_like, optional + Indices of the time series to use. The default is None. + xlim : int, optional + Width of the cutout in pixels. The default is 2. + ylim : int, optional + Height of the cutout in pixels. The default is 2. + + Returns + ------- + flux : array_like + Flux light curve across entire sector. + """ if time_ind is None: if len(xpos) != len(self.flux): m = 'If "times" is not specified then xpos must have the same length as flux.' @@ -1597,28 +1623,29 @@ def moving_psf_photometry(self,xpos,ypos,size=5,time_ind=None,xlim=2,ylim=2): pos[0,:] += xpos; pos[1,:] += ypos return flux, pos - def psf_photometry(self,xPix,yPix,size=5,snap='brightest',ext_shift=True,plot=False,diff=None): """ Main PSF Photometry function Parameters ---------- - xPix : TYPE - x pixel location of target region. - yPix : TYPE - y pixel location of target region. + xPix : float + x pixel location for the initial guess of the target region. + yPix : float + y pixel location for the initial guess of the target region. size : int, optional - Size of cutout to use (should be odd). The default is 5. - repFact : TYPE, optional + Size of pixel cutout to use (should be odd). The default is 5. + repFact : int, optional Super sampling factor for modelling. The default is 10. - snap : TYPE, optional - Determines how psf position is fit.. The default is 'brightest'. + snap : str or int, optional + Determines how psf position is fit. The default is 'brightest'. Valid Options: None = each frame's position will be fit and used when fitting for flux 'brightest' = the position of the brightest cutout frame will be applied to all subsequent frames int = providing an integer allows for explicit choice of which frame to use as position reference 'ref' = use the reference as the position fit point + ext_shift : array_like, optional + External shift in the pixel positions. The default is True. <<<<<<< HEAD ext_shift : TYPE, optional DESCRIPTION. The default is True. @@ -1635,13 +1662,13 @@ def psf_photometry(self,xPix,yPix,size=5,snap='brightest',ext_shift=True,plot=Fa ext_shift : TYPE, optional DESCRIPTION. The default is True. plot : bool, optional - Whether plots will a. The default is False. - diff : TYPE, optional - DESCRIPTION. The default is None. + Whether plots will shown. The default is False. + diff : bool, optional + If True then difference imaging will occur. The default is None. Returns ------- - flux : numpy array + flux : array_like Flux light curve across entire sector.. >>>>>>> upstream/dev @@ -1651,75 +1678,73 @@ def psf_photometry(self,xPix,yPix,size=5,snap='brightest',ext_shift=True,plot=Fa diff = self.diff flux = [] - if isinstance(xPix,(list,np.ndarray)): - self.moving_psf_phot() - - else: - if snap == None: # if no snap, each cutout has their position fitted and considered during flux fitting - prf, cutouts = self._psf_initialise(size,(xPix,yPix)) # gather base PRF and the array of cutouts data - xShifts = [] - yShifts = [] - for cutout in tqdm(cutouts): - PSF = create_psf(prf,size) - PSF.psf_position(cutout) - PSF.psf_flux(cutout) - flux.append(PSF.flux) - yShifts.append(PSF.source_y) - xShifts.append(PSF.source_x) - if plot: - fig,ax = plt.subplots(ncols=3,figsize=(12,4)) - ax[0].plot(flux) - ax[0].set_ylabel('Flux') - ax[1].plot(xShifts,marker='.',linestyle=' ') - ax[1].set_ylabel('xShift') - ax[2].plot(yShifts,marker='.',linestyle=' ') - ax[2].set_ylabel('yShift') - - elif type(snap) == str: - if snap == 'brightest': # each cutout has position snapped to brightest frame fit position - prf, cutouts = self._psf_initialise(size,(xPix,yPix),ref=(not diff)) # gather base PRF and the array of cutouts data - ind = np.where(cutouts==np.nanmax(cutouts))[0][0] - ref = cutouts[ind] - base = create_psf(prf,size) - base.psf_position(ref,ext_shift=self.shift[ind]) - elif snap == 'ref': - prf, cutouts = self._psf_initialise(size,(xPix,yPix),ref=True) # gather base PRF and the array of cutouts data - ref = cutouts[self.ref_ind] - base = create_psf(prf,size) - base.psf_position(ref) - if diff: - _, cutouts = self._psf_initialise(size,(xPix,yPix),ref=False) - if self.parallel: - inds = np.arange(len(cutouts)) - flux = Parallel(n_jobs=self.num_cores)(delayed(par_psf_flux)(cutouts[i],base,self.shift[i]) for i in inds) - else: - for i in range(len(cutouts)): - flux += [par_psf_flux(cutouts[i],base,self.shift[i])] - if plot: - plt.figure() - plt.plot(flux) - plt.ylabel('Flux') + # if isinstance(xPix,(list,np.ndarray)): + # self.moving_psf_phot() + + # else: + if snap == None: # if no snap, each cutout has their position fitted and considered during flux fitting + prf, cutouts = self._psf_initialise(size,(xPix,yPix)) # gather base PRF and the array of cutouts data + xShifts = [] + yShifts = [] + for cutout in tqdm(cutouts): + PSF = create_psf(prf,size) + PSF.psf_position(cutout) + PSF.psf_flux(cutout) + flux.append(PSF.flux) + yShifts.append(PSF.source_y) + xShifts.append(PSF.source_x) + if plot: + fig,ax = plt.subplots(ncols=3,figsize=(12,4)) + ax[0].plot(flux) + ax[0].set_ylabel('Flux') + ax[1].plot(xShifts,marker='.',linestyle=' ') + ax[1].set_ylabel('xShift') + ax[2].plot(yShifts,marker='.',linestyle=' ') + ax[2].set_ylabel('yShift') + + elif type(snap) == str: + if snap == 'brightest': # each cutout has position snapped to brightest frame fit position + prf, cutouts = self._psf_initialise(size,(xPix,yPix),ref=(not diff)) # gather base PRF and the array of cutouts data + ind = np.where(cutouts==np.nanmax(cutouts))[0][0] + ref = cutouts[ind] + base = create_psf(prf,size) + base.psf_position(ref,ext_shift=self.shift[ind]) + elif snap == 'ref': + prf, cutouts = self._psf_initialise(size,(xPix,yPix),ref=True) # gather base PRF and the array of cutouts data + ref = cutouts[self.ref_ind] + base = create_psf(prf,size) + base.psf_position(ref) + if diff: + _, cutouts = self._psf_initialise(size,(xPix,yPix),ref=False) + if self.parallel: + inds = np.arange(len(cutouts)) + flux = Parallel(n_jobs=self.num_cores)(delayed(par_psf_flux)(cutouts[i],base,self.shift[i]) for i in inds) + else: + for i in range(len(cutouts)): + flux += [par_psf_flux(cutouts[i],base,self.shift[i])] + if plot: + plt.figure() + plt.plot(flux) + plt.ylabel('Flux') - elif type(snap) == int: # each cutout has position snapped to 'snap' frame fit position (snap is integer) - base = create_psf(prf,size) - base.psf_position(cutouts[snap]) - for cutout in cutouts: - PSF = create_psf(prf,size) - PSF.source_x = base.source_x - PSF.source_y = base.source_y - PSF.psf_flux(cutout) - flux.append(PSF.flux) - if plot: - fig,ax = plt.subplots(ncols=1,figsize=(12,4)) - ax.plot(flux) - ax.set_ylabel('Flux') - flux = np.array(flux) + elif type(snap) == int: # each cutout has position snapped to 'snap' frame fit position (snap is integer) + base = create_psf(prf,size) + base.psf_position(cutouts[snap]) + for cutout in cutouts: + PSF = create_psf(prf,size) + PSF.source_x = base.source_x + PSF.source_y = base.source_y + PSF.psf_flux(cutout) + flux.append(PSF.flux) + if plot: + fig,ax = plt.subplots(ncols=1,figsize=(12,4)) + ax.plot(flux) + ax.set_ylabel('Flux') + flux = np.array(flux) return flux - - def reduce(self, aper = None, align = None, parallel = None, calibrate=None, bin_size = 0, plot = None, mask_scale = 1, ref_start=None, ref_stop=None, diff_lc = None,diff=None,verbose=None, tar_ap=3,sky_in=7,sky_out=11, @@ -1730,7 +1755,7 @@ def reduce(self, aper = None, align = None, parallel = None, calibrate=None, Parameters ---------- - aper : None, list or numpy array, optional + aper : None, list or array_like, optional Aperature to do photometry on. The default is None. align : TYPE, optional DESCRIPTION. The default is None. @@ -1931,15 +1956,23 @@ def reduce(self, aper = None, align = None, parallel = None, calibrate=None, # print('Retrieving external photometry') self.external_photometry() - except Exception: print(traceback.format_exc()) def external_photometry(self,size=50,phot=None): + """ + Perform aperture photometry on an external source - event_cutout((self.ra,self.dec),size,phot) + Parameters + ---------- + size : int, optional + Size of the cutout to use in arcseconds. The default is 50. + phot : str, optional + Type of photometry to choose, skymapper or Panstarrs. The default is None. + """ + event_cutout((self.ra,self.dec),size,phot) def make_lc(self,aperture = None,bin_size=0,zeropoint=None,scale='counts',clip = False): """ @@ -1947,21 +1980,21 @@ def make_lc(self,aperture = None,bin_size=0,zeropoint=None,scale='counts',clip = Parameters ---------- - aperture : TYPE, optional - DESCRIPTION. The default is None. - bin_size : TYPE, optional + aperture : array_like, optional + An array of aperture sizes to use. The default is None. + bin_size : int, optional Number of points to average. The default is 0. - zeropoint : TYPE, optional - DESCRIPTION. The default is None. + zeropoint : float, optional + The calculated zeropoint of the data. The default is None. scale : bool, optional If True the light curve will be normalised to the median. The default is 'counts'. Valid options = [counts, magnitude, flux, normalise] - clip : TYPE, optional - DESCRIPTION. The default is False. + clip : bool, optional + Whether to clip the data. The default is False. Returns ------- - self.lc : array + self.lc : array_like light curve for the pixels defined by the aperture """ @@ -1987,7 +2020,7 @@ def make_lc(self,aperture = None,bin_size=0,zeropoint=None,scale='counts',clip = mask = ~sigma_mask(lc) lc[mask] = np.nan if bin_size > 1: - lc, t = bin_data(t,lc,bin_size) + lc, t = self.bin_data(t,lc,bin_size) lc = np.array([t,lc]) if (zeropoint is not None) & (scale=='mag'): lc[1,:] = -2.5*np.log10(lc[1,:]) + zeropoint @@ -2000,13 +2033,14 @@ def lc_events(self,lc = None,err=None,duration=10,sig=5): Parameters ---------- - lc : TYPE, optional - DESCRIPTION. The default is None. - err : numpy array, optional + lc : array_like, optional + lightcurve with the shape of (2,n), where the first index is time and the second is + flux. The default is None. + err : array_like, optional Flux error to be used in weighting of events. The default is None. duration : int, optional How long an event needs to last for before being detected. The default is 10. - sig : float64, optional + sig : Float, optional Significance of the detection above the background. The default is 5. Returns @@ -2044,7 +2078,7 @@ def event_plotter(self,**kwargs): Parameters ---------- - **kwargs : TYPE + **kwargs : Various Keyword arguments, takes arguments for lightcurve events. Returns @@ -2061,7 +2095,6 @@ def event_plotter(self,**kwargs): plt.xlabel('MJD') plt.ylabel('Flux') - def detrend_transient(self,lc=None,err=None,Mask=None,variable=False,sig = 5, sig_up = 3, sig_low = 10, tail_length='auto',plot=False): """ @@ -2070,20 +2103,20 @@ def detrend_transient(self,lc=None,err=None,Mask=None,variable=False,sig = 5, Parameters ---------- - lc : numpy array, optional + lc : array_like, optional lightcurve with the shape of (2,n), where the first index is time and the second is flux. The default is None. - err : TYPE, optional - DESCRIPTION. The default is None. - Mask : TYPE, optional - DESCRIPTION. The default is None. - variable : TYPE, optional - DESCRIPTION. The default is False. - sig : TYPE, optional - DESCRIPTION. The default is None. - sig_up : float64, optional + err : array_like, optional + Flux error to be used in weighting of events of size (n,). The default is None. + Mask : array_like, optional + 1d one dimensional mask of the lightcurve to not be included in the detrending. The default is None. + variable : bool, optional + Determine whether the object is variable. The default is False. + sig : float, optional + Significance of the event before it gets excluded. The default is None. + sig_up : Float, optional Upper sigma clip value . The default is 5. - sig_low : float64, optional + sig_low : Float, optional Lower sigma clip value. The default is 10. tail_length : str OR int, optional Option for setting the buffer zone of points after the peak. If it is 'auto' it @@ -2097,7 +2130,7 @@ def detrend_transient(self,lc=None,err=None,Mask=None,variable=False,sig = 5, Returns ------- - detrend : numpy array + detrend : array_like Lightcurve with the stellar trends subtracted. """ @@ -2212,20 +2245,20 @@ def detrend_stellar_var(self,lc=None,err=None,Mask=None,variable=False,sig = Non Parameters ---------- - lc : numpy array, optional + lc : array_like, optional lightcurve with the shape of (2,n), where the first index is time and the second is flux. The default is None. - err : TYPE, optional - DESCRIPTION. The default is None. - Mask : TYPE, optional - DESCRIPTION. The default is None. - variable : TYPE, optional - DESCRIPTION. The default is False. - sig : TYPE, optional - DESCRIPTION. The default is None. - sig_up : float64, optional + err : array_like, optional + Flux error to be used in weighting of events of size (n,). The default is None. + Mask : array_like, optional + 1d one dimensional mask of the lightcurve to not be included in the detrending. The default is None. + variable : bool, optional + Determine whether the object is variable. The default is False. + sig : float, optional + Significance of the event before it gets excluded. The default is None. + sig_up : Float, optional Upper sigma clip value . The default is 5. - sig_low : float64, optional + sig_low : Float, optional Lower sigma clip value. The default is 10. tail_length : str OR int, optional Option for setting the buffer zone of points after the peak. If it is 'auto' it @@ -2235,11 +2268,11 @@ def detrend_stellar_var(self,lc=None,err=None,Mask=None,variable=False,sig = Non Raises ------ ValueError - DESCRIPTION. + "tail_length must be either 'auto' or an integer". Returns ------- - detrend : numpy array + detrend : array_like Lightcurve with the stellar trends subtracted. """ @@ -2323,21 +2356,20 @@ def detrend_stellar_var(self,lc=None,err=None,Mask=None,variable=False,sig = Non detrend[1,:] = lc[1,:] - trends return detrend - def bin_interp(self,lc=None,time_bin=6/24): """ Grabs the binned data and interpolates it to the original time values. Parameters ---------- - lc : array, optional + lc : array_like, optional The lightcurve of the target in the form of three rows: mjd, flux, flux error. The default is None. time_bin : float, optional The time (in days) for each bin to be for averaging data. The default is 6/24. Returns ------- - smooth : array + smooth : array_like Binned data in the form of three rows: mjd, flux, flux error. """ @@ -2351,19 +2383,18 @@ def bin_interp(self,lc=None,time_bin=6/24): smooth = f1(lc[0]) return smooth - def detrend_star(self,lc=None): """ Removes trends, e.g. background or stellar variability from the lightcurve data. Parameters ---------- - lc : array, optional + lc : array_like, optional The lightcurve of the target in the form of three rows: mjd, flux, flux error. The default is None. Returns ------- - detrended : array + detrended : array_like The lightcurve data where datapoints with strict gradient changes are ignored and a savitsky-savgol filter is then applied to smooth out the data. @@ -2515,7 +2546,6 @@ def isolated_star_lcs(self): return final_flux, final_d - def field_calibrate(self,zp_single=True,plot=None,savename=None): """ In-situ flux calibration for TESSreduce light curves. This uses the @@ -2767,11 +2797,11 @@ def to_mag(self,zp=None,zp_e=0): zp : float, optional Zeropoint to use for conversion. If None, use the default zp from the object. The default is None. zp_e : float, optional - Error on the zeropoint to use for conversion. If None, use the default zp_e from the object.. The default is 0. + Error on the zeropoint to use for conversion. If None, use the default zp_e from the object. The default is 0. Returns ------- - lc : numpy array + lc : array_like Lightcurve in magnitude space. mjd, magnitude, magnitude_error. """ @@ -2820,12 +2850,12 @@ def to_flux(self,zp=None,zp_e=0,flux_type='mjy',plot=False): Raises ------ ValueError - DESCRIPTION. + Flux Type is not a valid option, please choose from:\njy\nmjy\ncgs/erg\ntess/counts'. Returns ------- None. - self.lc : array + self.lc : array_like Returns the lightcurve in flux units, with mjd, flux, flux error being the three rows. self.zp : float Returns the zeropoint used for the magnitude conversion.