From bed3e38a814e1311f5d3bff65e0e65e3d8fd5ea5 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Thu, 17 Aug 2023 19:25:58 -0700 Subject: [PATCH 01/86] initial commit --- modules/Utils/analyze_wls.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 modules/Utils/analyze_wls.py diff --git a/modules/Utils/analyze_wls.py b/modules/Utils/analyze_wls.py new file mode 100644 index 000000000..52b2b3fed --- /dev/null +++ b/modules/Utils/analyze_wls.py @@ -0,0 +1,32 @@ +import numpy as np +import matplotlib.pyplot as plt +from modules.Utils.kpf_parse import HeaderParse +from datetime import datetime + +class AnalyzeWLS: + + """ + Description: + This class contains functions to analyze wavelength solutions + (storing them as attributes) and functions to plot the results. + + Arguments: + L1 - an L1 object + + Attributes: + None so far + """ + + def __init__(self, L1, logger=None): + if logger: + self.logger = logger + self.logger.debug('Initializing AnalyzeWLS object.') + else: + self.logger = None + self.L1 = L1 + primary_header = HeaderParse(L1, 'PRIMARY') + self.header = primary_header.header + self.name = primary_header.get_name() + + #self.ObsID = primary_header.get_obsid() + # need a filename instead or in addition From e2e7ba8b63976a749b63daa56d2b17ea90f1c6d6 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Mon, 21 Aug 2023 21:01:57 -0700 Subject: [PATCH 02/86] bugfix for problem with df_GUIDER.timestamp = 0.0 --- modules/Utils/analyze_guider.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/modules/Utils/analyze_guider.py b/modules/Utils/analyze_guider.py index 7e8a29fb9..ef3e6d4c3 100644 --- a/modules/Utils/analyze_guider.py +++ b/modules/Utils/analyze_guider.py @@ -9,7 +9,7 @@ from astropy.time import Time from modules.Utils.kpf_parse import HeaderParse from modules.Utils.utils import get_moon_sep, get_sun_alt - + class AnalyzeGuider: """ @@ -27,10 +27,9 @@ class AnalyzeGuider: def __init__(self, L0, logger=None): if logger: self.logger = logger - self.logger.debug('AnalyzeGuider class constructor') + self.logger.debug('Initiating AnanalyzeGuider object') else: self.logger = None - self.logger.info('Initiating AnanalyzeGuider object.') self.L0 = L0 self.pixel_scale = 0.056 # arcsec per pixel for the CRED-2 imager on the KPF FIU @@ -53,6 +52,7 @@ def __init__(self, L0, logger=None): #self.df_GUIDER = Table.read(self.L0, hdu='guider_cube_origins').to_pandas() #self.df_GUIDER = self.L0['guider_cube_origins'] self.df_GUIDER = self.L0['GUIDER_CUBE_ORIGINS'] + self.df_GUIDER = self.df_GUIDER[self.df_GUIDER.timestamp != 0.0] # remove bogus lines self.good_fit = None @@ -222,7 +222,8 @@ def plot_guider_image(self, fig_path=None, show_plot=False): if fig_path != None: t0 = time.process_time() plt.savefig(fig_path, dpi=288, facecolor='w') - self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if self.logger: + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') if show_plot == True: plt.show() plt.close('all') @@ -315,7 +316,8 @@ def plot_guider_error_time_series_simple(self, fig_path=None, show_plot=False): if fig_path != None: t0 = time.process_time() plt.savefig(fig_path, dpi=144, facecolor='w') - self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if self.logger: + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') if show_plot == True: plt.show() plt.close('all') @@ -477,7 +479,8 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): if fig_path != None: t0 = time.process_time() plt.savefig(fig_path, dpi=144, facecolor='w') - self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if self.logger: + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') if show_plot == True: plt.show() plt.close('all') @@ -518,7 +521,8 @@ def plot_guider_flux_time_series(self, fig_path=None, show_plot=False): if fig_path != None: t0 = time.process_time() plt.savefig(fig_path, dpi=144, facecolor='w') - self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if self.logger: + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') if show_plot == True: plt.show() plt.close('all') @@ -559,7 +563,8 @@ def plot_guider_fwhm_time_series(self, fig_path=None, show_plot=False): if fig_path != None: t0 = time.process_time() plt.savefig(fig_path, dpi=144, facecolor='w') - self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if self.logger: + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') if show_plot == True: plt.show() plt.close('all') From 577e389bbc216956604e5cae4f542e925a7f1bcb Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Mon, 21 Aug 2023 21:03:27 -0700 Subject: [PATCH 03/86] bugfix --- modules/Utils/analyze_wls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/Utils/analyze_wls.py b/modules/Utils/analyze_wls.py index 52b2b3fed..a2c5c616b 100644 --- a/modules/Utils/analyze_wls.py +++ b/modules/Utils/analyze_wls.py @@ -18,7 +18,7 @@ class AnalyzeWLS: """ def __init__(self, L1, logger=None): - if logger: + if self.logger: self.logger = logger self.logger.debug('Initializing AnalyzeWLS object.') else: From 5be1b78307ffdad0492c76b3917dc05e50166ccf Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Mon, 21 Aug 2023 21:04:40 -0700 Subject: [PATCH 04/86] for testing --- configs/quicklook_match.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index f9dde63ad..6048799dd 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,7 +7,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/??/202?????/KP.20230813.53844.48*.fits' +fullpath = '/data/??/202?????/KP.20230820.30050.45*.fits' #fullpath = '/data/??/202?????/KP.20230813.51416.47*.fits' [MODULE_CONFIGS] From a165795470be9033c4987aea1c8b7e926e158f57 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Tue, 22 Aug 2023 10:43:29 -0700 Subject: [PATCH 05/86] tweak config objects on initialization --- kpfpipe/cli.py | 18 +++++++++++++++--- kpfpipe/config/pipeline_config.py | 4 ---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/kpfpipe/cli.py b/kpfpipe/cli.py index 863595fc6..6ab07fe1e 100755 --- a/kpfpipe/cli.py +++ b/kpfpipe/cli.py @@ -7,6 +7,7 @@ from copy import copy import argparse import traceback +import random from datetime import datetime import time import threading @@ -27,6 +28,7 @@ # This is the default framework configuration file path framework_config = 'configs/framework.cfg' +framework_config_multi = 'configs/framework_multi.cfg' framework_logcfg= 'configs/framework_logger.cfg' pipeline_logcfg = 'configs/logger.cfg' @@ -161,15 +163,25 @@ def main(): recipe = args.recipe datestr = datetime.now().strftime(format='%Y%m%d') + # randomize queue manager port to avoid crosstalk between pipeline instances + port = str(random.randint(50101, 51101)) + # Using the multiprocessing library, create the specified number of instances if args.watch and args.ncpus > 1: - frame_config = 'configs/framework_multi.cfg' + frame_config = ConfigClass(framework_config_multi) + print(f"Setting queue manager port to {port}") + frame_config['DEFAULT']['queue_manager_portnr'] = port for i in range(args.ncpus): # This could be done with a careful use of subprocess.Popen, if that's more your style p = Process(target=worker, args=(i, pipe_config, framework_logcfg, frame_config)) p.start() else: - frame_config = framework_config + frame_config = ConfigClass(framework_config) + + if args.reprocess: + print(f"Setting queue manager to shutdown after reprocessing.") + frame_config['DEFAULT']['event_timeout'] = '5' + frame_config['DEFAULT']['no_event_event'] = 'None' # Try to initialize the framework try: @@ -186,7 +198,7 @@ def main(): if args.watch != None: framework.logger.info("Starting queue manager only, no processing") - framework._get_queue_manager(ConfigClass(frame_config)) + framework._get_queue_manager(frame_config) framework.pipeline.logger.info("Waiting for files to appear in {}".format(args.watch)) framework.pipeline.logger.info("Getting existing file list.") diff --git a/kpfpipe/config/pipeline_config.py b/kpfpipe/config/pipeline_config.py index 5815c843f..fd57c6104 100644 --- a/kpfpipe/config/pipeline_config.py +++ b/kpfpipe/config/pipeline_config.py @@ -1,7 +1,3 @@ -# pipeline_config.py -""" -(explanation here) -""" import keckdrpframework.config.framework_config as fc From 1e6224bb41e1d0b93d4199e0af010ee996e3547e Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Tue, 22 Aug 2023 11:14:07 -0700 Subject: [PATCH 06/86] randomize port and add launch QLP script --- kpfpipe/cli.py | 2 +- scripts/launch_QLP.sh | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100755 scripts/launch_QLP.sh diff --git a/kpfpipe/cli.py b/kpfpipe/cli.py index 6ab07fe1e..0cdcdf88d 100755 --- a/kpfpipe/cli.py +++ b/kpfpipe/cli.py @@ -164,7 +164,7 @@ def main(): datestr = datetime.now().strftime(format='%Y%m%d') # randomize queue manager port to avoid crosstalk between pipeline instances - port = str(random.randint(50101, 51101)) + port = str(random.randint(50102, 50151)) # Using the multiprocessing library, create the specified number of instances if args.watch and args.ncpus > 1: diff --git a/scripts/launch_QLP.sh b/scripts/launch_QLP.sh new file mode 100755 index 000000000..a986cf18f --- /dev/null +++ b/scripts/launch_QLP.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# This script is designed to launch the 4 QLP instances with a single command + +ncpus=8 +data_levels=("L0" "2D" "L1" "L2") + +for lvl in "${data_levels[@]}"; do + cmd="kpf --ncpus=${ncpus} --watch ${KPFPIPE_DATA}/${lvl}/ -r recipes/quicklook_watch_dir.recipe -c configs/quicklook_watch_dir.cfg" + $cmd & +done From 9a86580b3b576bc98a816e8db7044331c3e8e770 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Tue, 22 Aug 2023 14:53:20 -0700 Subject: [PATCH 07/86] crosstalk under control now --- configs/framework_multi.cfg | 2 +- configs/kpf_drp.cfg | 2 +- configs/kpf_masters_l1.cfg | 2 +- kpfpipe/cli.py | 13 +++++-------- scripts/launch_QLP.sh | 2 +- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/configs/framework_multi.cfg b/configs/framework_multi.cfg index 0fb227313..c58960e4f 100644 --- a/configs/framework_multi.cfg +++ b/configs/framework_multi.cfg @@ -103,5 +103,5 @@ http_defaultFile = "" # want_multiprocessing = True queue_manager_hostname = 'localhost' -queue_manager_portnr = 50131 +queue_manager_portnr = 50101 queue_manager_auth_code = b"a very long authentication code" diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 7f2f5c49a..4243009e2 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -126,7 +126,7 @@ do_wavecopy_in_sp = True #do_qlp = True do_bk_subtraction = True do_bc = True -do_outlier_rejection = True +do_outlier_rejection = False # for L0->2D process [WATCHFOR_L0] diff --git a/configs/kpf_masters_l1.cfg b/configs/kpf_masters_l1.cfg index 99cca439a..98e236f07 100644 --- a/configs/kpf_masters_l1.cfg +++ b/configs/kpf_masters_l1.cfg @@ -22,7 +22,7 @@ input_dir_root = /data/masters/ do_l0_to_2d = False do_order_trace = True do_spectral_extraction = True -do_outlier_rejection = True +do_outlier_rejection = False do_rv = True do_rv_reweighting = False do_hk = True diff --git a/kpfpipe/cli.py b/kpfpipe/cli.py index 0cdcdf88d..303d079c0 100755 --- a/kpfpipe/cli.py +++ b/kpfpipe/cli.py @@ -47,7 +47,6 @@ def _parseArguments(in_args: list) -> argparse.Namespace: parser.add_argument('--date', dest='date', type=str, default=None, help="Date for the data to be processed.") parser.add_argument('-n', '--ncpus', dest='ncpus', type=int, default=1, help="Number of CPU cores to utilize.") - args = parser.parse_args(in_args[1:]) return args @@ -164,13 +163,14 @@ def main(): datestr = datetime.now().strftime(format='%Y%m%d') # randomize queue manager port to avoid crosstalk between pipeline instances - port = str(random.randint(50102, 50151)) + port = str(random.randint(50101, 60101)) + frame_config = ConfigClass(framework_config_multi) + print(f"Setting queue manager port to {port}") + frame_config['DEFAULT']['queue_manager_portnr'] = port + frame_config['DEFAULT']['queue_manager_auth_code'] = str(hash(port)) # Using the multiprocessing library, create the specified number of instances if args.watch and args.ncpus > 1: - frame_config = ConfigClass(framework_config_multi) - print(f"Setting queue manager port to {port}") - frame_config['DEFAULT']['queue_manager_portnr'] = port for i in range(args.ncpus): # This could be done with a careful use of subprocess.Popen, if that's more your style p = Process(target=worker, args=(i, pipe_config, framework_logcfg, frame_config)) @@ -197,9 +197,6 @@ def main(): if args.watch != None: - framework.logger.info("Starting queue manager only, no processing") - framework._get_queue_manager(frame_config) - framework.pipeline.logger.info("Waiting for files to appear in {}".format(args.watch)) framework.pipeline.logger.info("Getting existing file list.") infiles = sorted(glob(args.watch + "*.fits"), reverse=True) + \ diff --git a/scripts/launch_QLP.sh b/scripts/launch_QLP.sh index a986cf18f..77682e42d 100755 --- a/scripts/launch_QLP.sh +++ b/scripts/launch_QLP.sh @@ -2,7 +2,7 @@ # This script is designed to launch the 4 QLP instances with a single command -ncpus=8 +ncpus=1 data_levels=("L0" "2D" "L1" "L2") for lvl in "${data_levels[@]}"; do From bea6518e4785a9642b1c742c10d8591099540913 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Wed, 23 Aug 2023 16:44:04 -0700 Subject: [PATCH 08/86] removed Nightly QLP parts because these are not yet supported in the new QLP code --- recipes/wls_auto.recipe | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipes/wls_auto.recipe b/recipes/wls_auto.recipe index 5dc6224c0..db6c94343 100755 --- a/recipes/wls_auto.recipe +++ b/recipes/wls_auto.recipe @@ -1,6 +1,6 @@ from modules.wavelength_cal.src.wavelength_cal import WaveCalibrate from modules.Utils.string_proc import str_replace -from modules.quicklook.src.nightly_prim import Nightly_summary +#from modules.quicklook.src.nightly_prim import Nightly_summary # this is not supported in the updated QLP. It will be added back soon. masters_dir = config.ARGUMENT.masters_dir output_dir = config.ARGUMENT.output_dir + '/' @@ -73,4 +73,4 @@ for cal_type in ['ThAr', 'LFC']: if cal_type == 'Thar': master_wls_file = output_filename -Nightly_summary(date_dir) \ No newline at end of file +#Nightly_summary(date_dir) \ No newline at end of file From ef3084559d477c6cb1c407f2c0d17c1c4364adc8 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Wed, 23 Aug 2023 16:46:27 -0700 Subject: [PATCH 09/86] removed commented out lines re: QLP --- recipes/kpf_drp.recipe | 63 ++++++------------------------------------ 1 file changed, 9 insertions(+), 54 deletions(-) diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index 36ebcf47e..97f83faca 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -7,7 +7,6 @@ from modules.radial_velocity.src.radial_velocity import RadialVelocity from modules.radial_velocity.src.radial_velocity_reweighting_ref import RadialVelocityReweightingRef from modules.radial_velocoty.src.radial_velocity_reweighting import RadialVelocityReweighting from modules.ca_hk.src.ca_hk_extraction import CaHKExtraction -#from modules.quicklook.src.quick_prim import Quicklook from modules.Utils.string_proc import str_replace from modules.Utils.string_proc import date_from_kpffile from modules.Utils.data_handler import ExtCopy @@ -17,14 +16,11 @@ from modules.Utils.data_handler import SetHeaderValue from modules.Utils.data_handler import SelectObs from modules.Utils.orientation_ref_reader import OrientationReference from modules.Utils.overscan_subtract import OverscanSubtraction -from modules.Utils.amplifier_mask import AmplifierMask from modules.image_processing.src.image_process import ImageProcessing from modules.order_trace.src.order_mask import OrderMask from modules.spectral_extraction.src.bary_corr import BaryCorrTable from database.modules.query_db_nearest_master_files.src.query_db_nearest_master_files_framework import QueryDBNearestMasterFilesFramework from database.modules.query_db_one_nearest_master_file.src.query_db_one_nearest_master_file_framework import QueryDBOneNearestMasterFileFramework -from modules.quality_control_exposure.src.quality_control_exposure_framework import QualityControlExposureFramework -from modules.query_db_l0_file.src.query_db_l0_file_framework import QueryDBL0FileFramework # set the flags for each process # note: no rv reweighting is made here. @@ -39,7 +35,6 @@ do_hk = config.ARGUMENT.do_hk #do_qlp = config.ARGUMENT.do_qlp do_bk_subtraction = config.ARGUMENT.do_bk_subtraction do_bc = config.ARGUMENT.do_bc -do_outlier = config.ARGUMENT.do_outlier_rejection # data type of data model, 'KPF' for kpf data. data_type = config.ARGUMENT.data_type @@ -160,7 +155,7 @@ for wls in wls_list: ## do L0->2D conversion for watch mode -if do_l0_to_2d: +if context.watch and do_l0_to_2d: invoke_subrecipe("./recipes/watchfor_kpf_l0.recipe") @@ -190,7 +185,10 @@ if do_db_query_for_one_nearest_wls_master_file and do_spectral_extraction: db_wls_master_file = db_wls_rec[6] wave_fits = [] for wls in wls_list: - wave_fits = wave_fits + [db_wls_master_file] + # Remove /masters from path returned from QueryDBOneNearestMasterFileFramework + # since it is already included in output_dir. + modified_file_path = str_replace(db_wls_master_file, '/masters/masters', '/masters') + wave_fits = wave_fits + [output_dir + modified_file_path] #### variables related to input/output and process conditions for order_trace, spectral extraction, CA-HK, rv and qlp @@ -224,7 +222,6 @@ if lev0_file_path: lev0_science_pattern = lev0_file_path else: lev0_science_pattern = input_2d_dir + '*' + fits_ext - output_extraction = output_dir + config.ARGUMENT.output_extraction + date_dir wave_to_ext = config.ARGUMENT.wave_to_ext orderlet_widths_ccds = config.ARGUMENT.orderlet_widths_ccds @@ -289,16 +286,6 @@ hk_wavelength_tables = [] for hk_w in hk_wavelength_csvs: hk_wavelength_tables = hk_wavelength_tables + [input_hk_data_dir + hk_w] -## for qlp -output_qlp = output_dir + config.ARGUMENT.output_qlp + date_dir + "/" -end_of_night_summary = False - -#### process for order trace, spectral extraction and CA-HK, radial velocity, and qlp -## qlp are invoked at the following processing spots, -# - before spectral extraction -# - after spectral extraction -# - after radial velocity - ## do order trace # description: find traces from L0 flat data for each ccd, and produce rectified L0 flat data. # input: L0 flat @@ -420,7 +407,6 @@ if do_order_trace: # lev1_list = [] -outlier_path = None if do_spectral_extraction or do_hk or do_bc: lev0_flat_rect = None @@ -471,11 +457,11 @@ if do_spectral_extraction or do_hk or do_bc: output_lev1_file = output_extraction + lev1_stem + lev1_stem_suffix + fits_ext is_raw = 1 if rect_method in input_lev0_file: - is_raw = 0 + is_raw = 0 if lev1_stem_suffix in input_lev0_file: - is_raw = 0 + is_raw = 0 if lev2_stem_suffix in input_lev0_file: - is_raw = 0 + is_raw = 0 # produce level 1 by spectral extraction if overwrite or L1 file doesn't exist if (is_raw == 1) and (overwrite or not exists(output_lev1_file)): @@ -485,10 +471,6 @@ if do_spectral_extraction or do_hk or do_bc: if do_spectral_extraction and (lev0_flat_rect != None) and b_all_traces: b_level1_sp = True -# -# if do_qlp and (b_level1_sp or do_hk or do_bc): -# Quicklook(lev0_data, output_qlp, end_of_night_summary) - # loop through ccd to do spectral extraction if b_level1_sp: # do background subtraction @@ -509,11 +491,6 @@ if do_spectral_extraction or do_hk or do_bc: lev0_data = ImageProcessing(lev0_data, order_mask, ccd_list, data_type, None) op_data = None - if outlier_path == None: - outlier_rejection_file = '' - else: - outlier_rejection_file = outlier_path + short_lev0_file - for idx in ccd_idx: ccd = ccd_list[idx] order_names = orderlet_names[idx] @@ -528,8 +505,7 @@ if do_spectral_extraction or do_hk or do_bc: start_order=start_order[idx], ccd_index = idx, rectification_method=rect_method, extraction_method=extract_method, - clip_file=clip_file, data_extension=ccd, trace_file=trace_file, - do_outlier_rejection=do_outlier, outlier_file=outlier_rejection_file) + clip_file=clip_file, data_extension=ccd, trace_file=trace_file) if op_data != None: @@ -575,10 +551,6 @@ if do_spectral_extraction or do_hk or do_bc: if output_data != None: result = to_fits(output_data, output_lev1_file) - if exists(output_lev1_file): - lev1_list = lev1_list + [output_lev1_file] -# if do_qlp: -# Quicklook(lev0_data, output_qlp, end_of_night_summary) ## do radial velocity or rv reweighting: # description: do radial velocity on the specified extensions of L1 data and produce L2 data. @@ -640,10 +612,6 @@ if do_rv or do_rv_reweighting: rv_data = None if overwrite or not exists(output_lev2_file): lev1_data = kpf1_from_fits(input_lev1_file, data_type=data_type) - - # Mask bad amplifiers in 4 amp modes - lev1_data = AmplifierMask(lev1_data) - rv_init = RadialVelocityInit(start_time="2021-03-01", l1_data=lev1_data, bc_corr_path=bc_path, test_data_path=rv_star_dir) if rv_init != None: @@ -663,18 +631,6 @@ if do_rv or do_rv_reweighting: if rv_data != None: result = to_fits(rv_data, output_lev2_file) -# if do_qlp: -# # get lev0 w/o _2D suffix -# short_lev0 = str_replace(short_lev1, lev1_stem_suffix, "") -# input_lev0_file = input_2d_dir + short_lev0 -# if not find_files(input_lev0_file): -# short_lev0 = str_replace(short_lev1, lev1_stem_suffix, lev0_stem_suffix) -# input_lev0_file = input_2d_dir + short_lev0 -# if find_files(input_lev0_file): -# lev0_data = kpf0_from_fits(input_lev0_file,data_type=data_type) -# Quicklook(lev0_data, output_qlp, end_of_night_summary) - - # do reweighting on each L2 in case the rewighting directory is different from the unweighted L2. if 'static' in reweighting_method and do_rv_reweighting: if exists(output_lev2_file) and output_rv != output_rv_rw: @@ -695,4 +651,3 @@ if do_rv or do_rv_reweighting: reweighting_masks=rw_mask) if lev2_rv is not None: result = to_fits(lev2_rv, reweighted_output) - From 6a21036eb095b7c11ff44af560d3c3247570db7d Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Wed, 23 Aug 2023 17:02:49 -0700 Subject: [PATCH 10/86] removed mentions of QLP --- configs/kpf_drp.cfg | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 7f2f5c49a..aad3f5493 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -16,13 +16,12 @@ output_dir_flat = /data/ input_dir_root = /data/2D/ masters_dir = /data/masters/ -# the subdirectory containing order trace result, L1 data, L2 data, L2 reweighted data, qlp, bary data +# the subdirectory containing order trace result, L1 data, L2 data, L2 reweighted data, bary data # output_trace = order_trace/ output_trace = masters/ output_extraction = L1/ output_rv = L2/ output_rv_reweighting='' -output_qlp = QLP/ output_barycorr = bary/ # output_clip containing files with polygon clipping information when rectification_method is 'normal' or vertical' @@ -123,7 +122,6 @@ do_rv = True do_rv_reweighting = True do_hk = True do_wavecopy_in_sp = True -#do_qlp = True do_bk_subtraction = True do_bc = True do_outlier_rejection = True From af2f8c0962b345d66eb010af2c08fb2151598de8 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Wed, 23 Aug 2023 17:04:45 -0700 Subject: [PATCH 11/86] add back some lines - previous commit had old version of file --- recipes/kpf_drp.recipe | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index 97f83faca..4734962ac 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -16,11 +16,14 @@ from modules.Utils.data_handler import SetHeaderValue from modules.Utils.data_handler import SelectObs from modules.Utils.orientation_ref_reader import OrientationReference from modules.Utils.overscan_subtract import OverscanSubtraction +from modules.Utils.amplifier_mask import AmplifierMask from modules.image_processing.src.image_process import ImageProcessing from modules.order_trace.src.order_mask import OrderMask from modules.spectral_extraction.src.bary_corr import BaryCorrTable from database.modules.query_db_nearest_master_files.src.query_db_nearest_master_files_framework import QueryDBNearestMasterFilesFramework from database.modules.query_db_one_nearest_master_file.src.query_db_one_nearest_master_file_framework import QueryDBOneNearestMasterFileFramework +from modules.quality_control_exposure.src.quality_control_exposure_framework import QualityControlExposureFramework +from modules.query_db_l0_file.src.query_db_l0_file_framework import QueryDBL0FileFramework # set the flags for each process # note: no rv reweighting is made here. @@ -32,9 +35,9 @@ do_spectral_extraction = config.ARGUMENT.do_spectral_extraction do_rv = config.ARGUMENT.do_rv do_rv_reweighting = config.ARGUMENT.do_rv_reweighting do_hk = config.ARGUMENT.do_hk -#do_qlp = config.ARGUMENT.do_qlp do_bk_subtraction = config.ARGUMENT.do_bk_subtraction do_bc = config.ARGUMENT.do_bc +do_outlier = config.ARGUMENT.do_outlier_rejection # data type of data model, 'KPF' for kpf data. data_type = config.ARGUMENT.data_type @@ -155,7 +158,7 @@ for wls in wls_list: ## do L0->2D conversion for watch mode -if context.watch and do_l0_to_2d: +if do_l0_to_2d: invoke_subrecipe("./recipes/watchfor_kpf_l0.recipe") @@ -185,13 +188,10 @@ if do_db_query_for_one_nearest_wls_master_file and do_spectral_extraction: db_wls_master_file = db_wls_rec[6] wave_fits = [] for wls in wls_list: - # Remove /masters from path returned from QueryDBOneNearestMasterFileFramework - # since it is already included in output_dir. - modified_file_path = str_replace(db_wls_master_file, '/masters/masters', '/masters') - wave_fits = wave_fits + [output_dir + modified_file_path] + wave_fits = wave_fits + [db_wls_master_file] -#### variables related to input/output and process conditions for order_trace, spectral extraction, CA-HK, rv and qlp +#### variables related to input/output and process conditions for order_trace, spectral extraction, CA-HK, rv ## for order trace: input, output and image area # data_row_range, data_col_range: image area for order trace process: @@ -222,6 +222,7 @@ if lev0_file_path: lev0_science_pattern = lev0_file_path else: lev0_science_pattern = input_2d_dir + '*' + fits_ext + output_extraction = output_dir + config.ARGUMENT.output_extraction + date_dir wave_to_ext = config.ARGUMENT.wave_to_ext orderlet_widths_ccds = config.ARGUMENT.orderlet_widths_ccds @@ -407,6 +408,7 @@ if do_order_trace: # lev1_list = [] +outlier_path = None if do_spectral_extraction or do_hk or do_bc: lev0_flat_rect = None @@ -457,11 +459,11 @@ if do_spectral_extraction or do_hk or do_bc: output_lev1_file = output_extraction + lev1_stem + lev1_stem_suffix + fits_ext is_raw = 1 if rect_method in input_lev0_file: - is_raw = 0 + is_raw = 0 if lev1_stem_suffix in input_lev0_file: - is_raw = 0 + is_raw = 0 if lev2_stem_suffix in input_lev0_file: - is_raw = 0 + is_raw = 0 # produce level 1 by spectral extraction if overwrite or L1 file doesn't exist if (is_raw == 1) and (overwrite or not exists(output_lev1_file)): @@ -491,6 +493,11 @@ if do_spectral_extraction or do_hk or do_bc: lev0_data = ImageProcessing(lev0_data, order_mask, ccd_list, data_type, None) op_data = None + if outlier_path == None: + outlier_rejection_file = '' + else: + outlier_rejection_file = outlier_path + short_lev0_file + for idx in ccd_idx: ccd = ccd_list[idx] order_names = orderlet_names[idx] @@ -505,7 +512,8 @@ if do_spectral_extraction or do_hk or do_bc: start_order=start_order[idx], ccd_index = idx, rectification_method=rect_method, extraction_method=extract_method, - clip_file=clip_file, data_extension=ccd, trace_file=trace_file) + clip_file=clip_file, data_extension=ccd, trace_file=trace_file, + do_outlier_rejection=do_outlier, outlier_file=outlier_rejection_file) if op_data != None: @@ -551,6 +559,8 @@ if do_spectral_extraction or do_hk or do_bc: if output_data != None: result = to_fits(output_data, output_lev1_file) + if exists(output_lev1_file): + lev1_list = lev1_list + [output_lev1_file] ## do radial velocity or rv reweighting: # description: do radial velocity on the specified extensions of L1 data and produce L2 data. @@ -612,6 +622,10 @@ if do_rv or do_rv_reweighting: rv_data = None if overwrite or not exists(output_lev2_file): lev1_data = kpf1_from_fits(input_lev1_file, data_type=data_type) + + # Mask bad amplifiers in 4 amp modes + lev1_data = AmplifierMask(lev1_data) + rv_init = RadialVelocityInit(start_time="2021-03-01", l1_data=lev1_data, bc_corr_path=bc_path, test_data_path=rv_star_dir) if rv_init != None: From c4df67e5de222da6e44fe8ae252552182c26447a Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Thu, 24 Aug 2023 13:24:01 -0700 Subject: [PATCH 12/86] Added additional criterion for bad frame; peak flux tracked; cosmetic changes --- modules/Utils/analyze_guider.py | 64 ++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 20 deletions(-) diff --git a/modules/Utils/analyze_guider.py b/modules/Utils/analyze_guider.py index ef3e6d4c3..de7329059 100644 --- a/modules/Utils/analyze_guider.py +++ b/modules/Utils/analyze_guider.py @@ -52,7 +52,8 @@ def __init__(self, L0, logger=None): #self.df_GUIDER = Table.read(self.L0, hdu='guider_cube_origins').to_pandas() #self.df_GUIDER = self.L0['guider_cube_origins'] self.df_GUIDER = self.L0['GUIDER_CUBE_ORIGINS'] - self.df_GUIDER = self.df_GUIDER[self.df_GUIDER.timestamp != 0.0] # remove bogus lines + self.df_GUIDER = self.df_GUIDER[self.df_GUIDER.timestamp != 0.0] # remove bogus lines + self.df_GUIDER = self.df_GUIDER[self.df_GUIDER.object1_flux != 0.0] # remove bogus lines self.good_fit = None @@ -211,7 +212,7 @@ def plot_guider_image(self, fig_path=None, show_plot=False): title = 'Residuals to Moffat Function Model' axs[2].set_title(title, fontsize=12) else: - title = 'Unsuccessful fit to Moffat Function Model' + title = 'Unsuccessful fit with Moffat Function' axs[2].set_title(title, fontsize=12, color='r') # red to indicate unusual image axs[2].set_title(title, fontsize=12) axs[2].grid(True, linestyle='solid', linewidth=0.5, alpha=0.5) @@ -292,14 +293,14 @@ def plot_guider_error_time_series_simple(self, fig_path=None, show_plot=False): axes[1].set_ylim(-xylim, xylim) axes[1].set_aspect('equal') axes[1].set_title('r: ' + f'{int(r_rms*10)/10}' + ' mas (RMS)', fontsize=14) - axes[1].set_xlabel('Guiding Error - x (mas)', fontsize=14) - axes[1].set_ylabel('Guiding Error - y (mas)', fontsize=14) + axes[1].set_xlabel('Guiding Error - x (mas)', fontsize=14, alpha=0.5) + axes[1].set_ylabel('Guiding Error - y (mas)', fontsize=14, alpha=0.5) axes[1].grid(True, linestyle='solid', linewidth=0.5, alpha=0.5) cbar = plt.colorbar(im1[3]) cbar.set_label('Samples', fontsize=12) - axes[0].plot(t, x_mas, color='royalblue') - axes[0].plot(t, y_mas, color='orange') + axes[0].plot(t, x_mas, color='royalblue', alpha=0.5) + axes[0].plot(t, y_mas, color='orange', alpha=0.5) axes[0].set_title("Guiding Error Time Series: " + str(self.ObsID)+' - ' + self.name, fontsize=14) axes[0].set_xlabel("Time (sec)", fontsize=14) axes[0].set_ylabel("Guiding Error (mas)", fontsize=14) @@ -401,8 +402,8 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): axes[0,1].grid(True, linestyle='solid', linewidth=0.5, alpha=0.5) # Time series plot of guider errors - axes[0,0].plot(t, x_mas, color='royalblue') - axes[0,0].plot(t, y_mas, color='orange') + axes[0,0].plot(t, x_mas, color='royalblue', alpha=0.5) + axes[0,0].plot(t, y_mas, color='orange', alpha=0.5) axes[0,0].set_title("Guiding Error Time Series: " + str(self.ObsID)+' - ' + self.name, fontsize=14) axes[0,0].set_xlabel("Time (sec)", fontsize=14) axes[0,0].set_ylabel("Guiding Error (mas)", fontsize=14) @@ -418,8 +419,8 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): Pyy, freqs = mlab.psd(y_mas/1000, Fs=fps) Prr, freqs = mlab.psd(r_mas/1000, Fs=fps) # axes[1,0].step(freqs, Prr*1e6, where='mid', color='b', alpha=0.8, label='R - Guiding Errors') - axes[1,0].step(freqs, Pxx*1e6, where='mid', color='royalblue', label='X - Guiding errors', lw=2) - axes[1,0].step(freqs, Pyy*1e6, where='mid', color='orange', label='Y - Guiding errors', lw=2) + axes[1,0].step(freqs, Pxx*1e6, where='mid', color='royalblue', label='X - Guiding errors', lw=2, alpha=0.5) + axes[1,0].step(freqs, Pyy*1e6, where='mid', color='orange', label='Y - Guiding errors', lw=2, alpha=0.5) axes[1,0].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) axes[1,0].set_xlabel('Frequency [Hz]', fontsize=14) axes[1,0].set_ylabel('Guiding Error\n' + r'Power Spectral Density (mas$^2$/Hz)', fontsize=14) @@ -430,6 +431,10 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): # Blank - plot to the right of power spectral density strings = ["Sun's altitude below horizon = " + str(int(-get_sun_alt(self.date_mid))) + " deg"] strings.append("Lunar separation = " + str(int(get_moon_sep(self.date_mid, self.ra, self.dec))) + " deg") + strings.append('\n') + strings.append("J = " + f"{self.jmag:.2f}" + ", G = " + f"{self.gmag:.2f}") + strings.append(str(int(self.gcfps)) + ' fps, ' + str(self.gcgain) + ' gain') + #strings.append('\n') #strings.append('Nframes = ' + str(nframes)) #strings.append(' ' + str(nframes_0stars) + ' with 0 stars detected') @@ -441,7 +446,7 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): # Guider FWHM time series plot fwhm = (self.df_GUIDER.object1_a**2 + self.df_GUIDER.object1_b**2)**0.5 / self.pixel_scale * (2*(2*np.log(2))**0.5) - axes[2,0].plot(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp), fwhm, color='royalblue') + axes[2,0].plot(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp), fwhm, color='royalblue', alpha=0.5) axes[2,0].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) # axes[2,0].set_title('Nframes = ' + str(nframes) + '; median number of detected stars/frame=' + str(median_nstars) + ', ' + str(nframes_fewer_detections) + ' w/fewer, ' + str(nframes_extra_detections) + ' w/more', fontsize=14) axes[2,0].set_xlabel("Time (sec)", fontsize=14) @@ -450,30 +455,49 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): axes[2,0].legend([r'Guider FWHM ($\neq$ seeing)'], fontsize=12, loc='best') # Histogram of guider FWHM time series plot - axes[2,1].hist(fwhm, bins=30, color='royalblue', edgecolor='k') + axes[2,1].hist(fwhm, bins=30, color='royalblue', alpha=0.5) axes[2,1].set_xlabel("Guider FWHM (mas)", fontsize=14) - axes[2,1].set_ylabel("Frequency", fontsize=14) + axes[2,1].set_ylabel("Samples", fontsize=14) + axes[2,1].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) # Guider flux time series plot - flux = self.df_GUIDER.object1_flux/np.nanpercentile(self.df_GUIDER.object1_flux, 95) - axes[3,0].plot(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp), flux, color='royalblue') + flux = self.df_GUIDER.object1_flux # /np.nanpercentile(self.df_GUIDER.object1_flux, 95) + peak_flux = self.df_GUIDER.object1_peak + axes[3,0].plot(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp), flux, color='royalblue', alpha=0.5) + axesb = axes[3,0].twinx() + axesb.set_ylabel(r'Peak Flux (DN pixel$^{-1}$)', color='orange', fontsize=14) + axesb.plot(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp), peak_flux, color='orange', alpha=0.5) + axesb.grid(False) axes[3,0].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) axes[3,0].set_xlabel("Time (sec)", fontsize=14) - axes[3,0].set_ylabel("Guider Flux (fractional)", fontsize=14) + axes[3,0].set_ylabel("Flux (integrated)", fontsize=14, color='royalblue') axes[3,0].set_xlim(min(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp)), max(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp))) - axes[3,0].legend([r'Guider Flux (fractional, normalized by 95th percentile)'], fontsize=12, loc='best') + #axes[3,0].legend(['Guider Flux', 'Peak Guider Flux'], fontsize=12, loc='best') # Histogram of guider flux time series plot - axes[3,1].hist(flux, bins=30, color='royalblue', edgecolor='k') - axes[3,1].set_xlabel("Flux (fractional)", fontsize=14) - axes[3,1].set_ylabel("Frequency", fontsize=14) + axes[3,1].hist(flux, bins=30, color='royalblue', alpha=0.5) + axes[3,1].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) + axesc = axes[3,1].twiny() + axesc.hist(peak_flux, bins=30, color='orange', alpha=0.5) + axesc.set_xlabel(r'Peak Flux (DN pixel$^{-1}$; saturation $\approx$ 16,000)', color='orange', fontsize=14) + axesc.grid(False) + axes[3,1].set_xlabel("Flux (DN integrated)", fontsize=14, color='royalblue') + axes[3,1].set_ylabel("Samples", fontsize=14) # Set the font size of tick mark labels axes[0,0].tick_params(axis='both', which='major', labelsize=14) axes[0,1].tick_params(axis='both', which='major', labelsize=14) axes[1,0].tick_params(axis='both', which='major', labelsize=14) axes[2,0].tick_params(axis='both', which='major', labelsize=14) + axes[2,1].tick_params(axis='both', which='major', labelsize=10) axes[3,0].tick_params(axis='both', which='major', labelsize=14) + axes[3,1].tick_params(axis='both', which='major', labelsize=10) + axes[3,1].tick_params(axis='x', labelcolor='royalblue') + axesc.tick_params(axis='x', labelcolor='orange') + axes[3,0].tick_params(axis='y', labelcolor='royalblue') + axesb.tick_params(axis='y', labelcolor='orange') + axesb.tick_params(axis='both', which='major', labelsize=14) + axesc.tick_params(axis='both', which='major', labelsize=10) # Display the plot if fig_path != None: From 4a2b96736d182d1fd85688c619772195d7187ae3 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Fri, 25 Aug 2023 11:10:20 -0700 Subject: [PATCH 13/86] worked on alg_bary_corr.py but now table is empty --- configs/framework_multi.cfg | 2 +- kpfpipe/cli.py | 16 ++-- modules/Utils/amplifier_mask.py | 3 +- .../spectral_extraction/src/alg_bary_corr.py | 94 +++++++++++++++---- scripts/launch_QLP.sh | 2 +- 5 files changed, 90 insertions(+), 27 deletions(-) diff --git a/configs/framework_multi.cfg b/configs/framework_multi.cfg index c58960e4f..718f64343 100644 --- a/configs/framework_multi.cfg +++ b/configs/framework_multi.cfg @@ -103,5 +103,5 @@ http_defaultFile = "" # want_multiprocessing = True queue_manager_hostname = 'localhost' -queue_manager_portnr = 50101 +queue_manager_portnr = 50111 queue_manager_auth_code = b"a very long authentication code" diff --git a/kpfpipe/cli.py b/kpfpipe/cli.py index 303d079c0..a67151aa2 100755 --- a/kpfpipe/cli.py +++ b/kpfpipe/cli.py @@ -163,11 +163,16 @@ def main(): datestr = datetime.now().strftime(format='%Y%m%d') # randomize queue manager port to avoid crosstalk between pipeline instances - port = str(random.randint(50101, 60101)) + port = random.randint(50101, 60101) frame_config = ConfigClass(framework_config_multi) print(f"Setting queue manager port to {port}") - frame_config['DEFAULT']['queue_manager_portnr'] = port - frame_config['DEFAULT']['queue_manager_auth_code'] = str(hash(port)) + frame_config.set('DEFAULT', 'queue_manager_portnr', str(port)) + frame_config.set('DEFAULT', 'queue_manager_auth_code', str(hash(port))) + + if args.reprocess: + print(f"Setting queue manager to shutdown after reprocessing.") + frame_config.set('DEFAULT', 'event_timeout', '5') + frame_config.set('DEFAULT', 'no_event_event', 'None') # Using the multiprocessing library, create the specified number of instances if args.watch and args.ncpus > 1: @@ -178,11 +183,6 @@ def main(): else: frame_config = ConfigClass(framework_config) - if args.reprocess: - print(f"Setting queue manager to shutdown after reprocessing.") - frame_config['DEFAULT']['event_timeout'] = '5' - frame_config['DEFAULT']['no_event_event'] = 'None' - # Try to initialize the framework try: framework = Framework(pipe, frame_config) diff --git a/modules/Utils/amplifier_mask.py b/modules/Utils/amplifier_mask.py index 85457f259..6515ff93b 100644 --- a/modules/Utils/amplifier_mask.py +++ b/modules/Utils/amplifier_mask.py @@ -26,7 +26,8 @@ def __init__(self, action, context): self.namps = {'GREEN': self.header['GRNAMPS'], 'RED': self.header['REDAMPS']} - self.chips = ['RED', 'GREEN'] + # self.chips = ['RED', 'GREEN'] + self.chips = ['GREEN',] self.orderlets = ['CAL_FLUX', 'SCI_FLUX1', 'SCI_FLUX2', 'SCI_FLUX3', 'SKY_FLUX'] self.bad_regions = {'GREEN': (slice(0, 20), slice(0, 2040)), 'RED': (slice(16, 32), slice(0, 2040))} diff --git a/modules/spectral_extraction/src/alg_bary_corr.py b/modules/spectral_extraction/src/alg_bary_corr.py index 74638ae29..c57d0a496 100644 --- a/modules/spectral_extraction/src/alg_bary_corr.py +++ b/modules/spectral_extraction/src/alg_bary_corr.py @@ -1,5 +1,6 @@ import numpy as np import pandas as pd +import barycorrpy from astropy.time import Time from modules.radial_velocity.src.midpoint_photon_arrival import MidpointPhotonArrival @@ -53,13 +54,17 @@ class BaryCorrTableAlg(ModuleAlgBase): year_days = 365.25 name = 'BaryCorrTable' BC_col1 = 'GEOMID_UTC' - BC_col2 = 'GEOMID_BJD' - BC_col3 = 'PHOTON_BJD' - BC_col4 = 'BARYVEL' + BC_col2 = 'GEOMID_JD' + BC_col3 = 'PHOTON_JD' + BC_col4 = 'GEOMID_BJD' + BC_col5 = 'PHOTON_BJD' + BC_col6 = 'BARYVEL' BARY_TABLE_KEYS = {BC_col1: "geometric midpoint UTC", - BC_col2: "geometric midpoint BJD", - BC_col3: "weighted-photon midpoint BJD", - BC_col4: "barycentric velocity(m/sec)"} + BC_col2: "geometric midpoint JD UTC", + BC_col3: "weighted-photon midpoint JD UTC", + BC_col4: "geometric midpoint BJD_TDB", + BC_col5: "weighted-photon midpoint BJD_TDB", + BC_col6: "barycentric velocity(m/sec)"} def __init__(self, df_em, df_bc, pheader, wls_data, total_order, ccd_order, start_bary_index=0, config=None, logger=None): @@ -91,7 +96,8 @@ def __init__(self, df_em, df_bc, pheader, wls_data, total_order, ccd_order, columns = df_bc.columns self.bary_corr_table = dict() for cl in [BaryCorrTableAlg.BC_col1, BaryCorrTableAlg.BC_col2, - BaryCorrTableAlg.BC_col3, BaryCorrTableAlg.BC_col4]: + BaryCorrTableAlg.BC_col3, BaryCorrTableAlg.BC_col4, + BaryCorrTableAlg.BC_col5, BaryCorrTableAlg.BC_col6]: if cl == BaryCorrTableAlg.BC_col1: self.bary_corr_table[cl] = np.array(df_bc[cl]) if cl in columns \ else np.empty(self.total_table_order, dtype=object) @@ -103,7 +109,9 @@ def __init__(self, df_em, df_bc, pheader, wls_data, total_order, ccd_order, BaryCorrTableAlg.BC_col1: np.empty(self.total_table_order, dtype=object), BaryCorrTableAlg.BC_col2: np.zeros(self.total_table_order, dtype=float), BaryCorrTableAlg.BC_col3: np.zeros(self.total_table_order, dtype=float), - BaryCorrTableAlg.BC_col4: np.zeros(self.total_table_order, dtype=float)} + BaryCorrTableAlg.BC_col4: np.zeros(self.total_table_order, dtype=float), + BaryCorrTableAlg.BC_col5: np.zeros(self.total_table_order, dtype=float), + BaryCorrTableAlg.BC_col6: np.zeros(self.total_table_order, dtype=float)} else: self.bary_corr_table = None @@ -224,20 +232,20 @@ def get_rv_col(self, is_single_time=False): for od in range(self.start_bary_index, self.end_bary_index+1): if is_single_time and od > self.start_bary_index: - self.bary_corr_table[BaryCorrTableAlg.BC_col4][od] \ - = self.bary_corr_table[BaryCorrTableAlg.BC_col4][self.start_bary_index] + self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] \ + = self.bary_corr_table[BaryCorrTableAlg.BC_col6][self.start_bary_index] else: if RadialVelocityAlgInit.is_unknown_target(self.instrument, bc_config[RadialVelocityAlgInit.STARNAME], bc_config[RadialVelocityAlgInit.EPOCH]): # if bc_config[RadialVelocityAlgInit.EPOCH] is not None: # import pdb;pdb.set_trace() - self.bary_corr_table[BaryCorrTableAlg.BC_col4][od] = 0.0 + self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] = 0.0 else: bc_corr = BarycentricCorrectionAlg.get_zb_from_bc_corr(bc_config, self.bary_corr_table[BaryCorrTableAlg.BC_col3][od])[0] - self.bary_corr_table[BaryCorrTableAlg.BC_col4][od] = bc_corr # m/sec + self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] = bc_corr # m/sec - return self.bary_corr_table[BaryCorrTableAlg.BC_col4] + return self.bary_corr_table[BaryCorrTableAlg.BC_col6] def get_obs_utc(self, default=None): """ Get observation exposure time in UTC in string format @@ -283,15 +291,17 @@ def build_bary_corr_table(self): Pandas.DataFrame: BARY_CORR table containing columns of * geometric midpoint in UTC string, - * geometric midpoint in BJD, - * weighted-photon midpoint BJD, + * geometric midpoint in JD UTC, + * weighted-photon midpoint JD UTC, + * geometric midpoint in BJD TDB, + * weighted-photon midpoint BJD TDB, * barycentric velocity(m/sec) """ if self.bary_corr_table is None: return None - # fill in "GEOMID_UTC" and "GEOMID_BJD" columns + # fill in "GEOMID_UTC" and "GEOMID_JD" columns mid_utc = self.get_obs_utc() if mid_utc is None: @@ -299,6 +309,31 @@ def build_bary_corr_table(self): else: self.bary_corr_table[BaryCorrTableAlg.BC_col1][self.start_bary_index:self.end_bary_index+1] = mid_utc self.bary_corr_table[BaryCorrTableAlg.BC_col2][self.start_bary_index:self.end_bary_index+1] = Time(mid_utc).jd + if RadialVelocityAlgInit.STARNAME.lower() == 'sun': + GEOMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_HJDTDB(Time(mid_utc).jd, + ra=None, + dec=None, + epoch=None, + pmra=None, + pmdec=None, + px=None, + lat=RadialVelocityAlgInit.LAT, + longi=RadialVelocityAlgInit.LON, + alt=RadialVelocityAlgInit.ALT, + rv=None) + else: + GEOMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(Time(mid_utc).jd, + ra=RadialVelocityAlgInit.RA, + dec=RadialVelocityAlgInit.DEC, + epoch=RadialVelocityAlgInit.EPOCH, + pmra=RadialVelocityAlgInit.PMRA, + pmdec=RadialVelocityAlgInit.PMDEC, + px=RadialVelocityAlgInit.PX, + lat=RadialVelocityAlgInit.LAT, + longi=RadialVelocityAlgInit.LON, + alt=RadialVelocityAlgInit.ALT, + rv=RadialVelocityAlgInit.RV) + self.bary_corr_table[BaryCorrTableAlg.BC_col4][self.start_bary_index:self.end_bary_index+1] = GEOMID_BJD df_em = self.get_expmeter_science() @@ -325,10 +360,37 @@ def build_bary_corr_table(self): is_single_mid = True for i in range(midphoton.size): self.bary_corr_table[BaryCorrTableAlg.BC_col3][i+self.start_bary_index] = Time(midphoton[i]).jd + if RadialVelocityAlgInit.STARNAME.lower() == 'sun': + PHOTMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_HJDTDB(Time(midphoton[i]).jd, + ra=None, + dec=None, + epoch=None, + pmra=None, + pmdec=None, + px=None, + lat=RadialVelocityAlgInit.LAT, + longi=RadialVelocityAlgInit.LON, + alt=RadialVelocityAlgInit.ALT, + rv=None) + else: + PHOTMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(Time(midphoton[i]).jd, + ra=RadialVelocityAlgInit.RA, + dec=RadialVelocityAlgInit.DEC, + epoch=RadialVelocityAlgInit.EPOCH, + pmra=RadialVelocityAlgInit.PMRA, + pmdec=RadialVelocityAlgInit.PMDEC, + px=RadialVelocityAlgInit.PX, + lat=RadialVelocityAlgInit.LAT, + longi=RadialVelocityAlgInit.LON, + alt=RadialVelocityAlgInit.ALT, + rv=RadialVelocityAlgInit.RV) + + self.bary_corr_table[BaryCorrTableAlg.BC_col5][i+self.start_bary_index] = PHOTMID_BJD if self.is_bc_calculated(): self.get_rv_col(is_single_mid) bc_df_res = pd.DataFrame(self.bary_corr_table) bc_df_res.attrs['BCV_UNIT'] = 'm/sec' + return bc_df_res diff --git a/scripts/launch_QLP.sh b/scripts/launch_QLP.sh index 77682e42d..152d637f0 100755 --- a/scripts/launch_QLP.sh +++ b/scripts/launch_QLP.sh @@ -2,7 +2,7 @@ # This script is designed to launch the 4 QLP instances with a single command -ncpus=1 +ncpus=10 data_levels=("L0" "2D" "L1" "L2") for lvl in "${data_levels[@]}"; do From ed88894fd233b5c6b131b1a291e78e0be0cc6f23 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Fri, 25 Aug 2023 12:03:18 -0700 Subject: [PATCH 14/86] bary corr table now has BJD columns --- .../radial_velocity/src/radial_velocity.py | 2 +- .../spectral_extraction/src/alg_bary_corr.py | 94 ++++++++++--------- 2 files changed, 53 insertions(+), 43 deletions(-) diff --git a/modules/radial_velocity/src/radial_velocity.py b/modules/radial_velocity/src/radial_velocity.py index f689ec524..8cbe8ece9 100644 --- a/modules/radial_velocity/src/radial_velocity.py +++ b/modules/radial_velocity/src/radial_velocity.py @@ -796,7 +796,7 @@ def f_decimal(num): results.attrs['ccd_rv'] = (f_decimal(final_rv - cal_rv), unit) \ if do_corr and final_rv != 0.0 else (f_decimal(final_rv), unit) results.attrs['ccd_erv'] = f_decimal(final_rv_err) - results.attrs['ccd_jd'] = jd[0] + results.attrs['ccd_jd'] = np.mean(jd) results.attrs['star_rv'] = starrv results.attrs['do_rv_corr'] = do_corr diff --git a/modules/spectral_extraction/src/alg_bary_corr.py b/modules/spectral_extraction/src/alg_bary_corr.py index c57d0a496..f16e62921 100644 --- a/modules/spectral_extraction/src/alg_bary_corr.py +++ b/modules/spectral_extraction/src/alg_bary_corr.py @@ -115,6 +115,8 @@ def __init__(self, df_em, df_bc, pheader, wls_data, total_order, ccd_order, else: self.bary_corr_table = None + self.configure() + def get_value_from_config(self, prop, default='', config=None): """ Get value of specific parameter from the configuration file. @@ -202,6 +204,28 @@ def get_rv_col(self, is_single_time=False): Returns: numpy.ndarray: Barycentric correction velocity of all orders """ + bc_config = self.bc_config + for od in range(self.start_bary_index, self.end_bary_index+1): + if is_single_time and od > self.start_bary_index: + self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] \ + = self.bary_corr_table[BaryCorrTableAlg.BC_col6][self.start_bary_index] + else: + if RadialVelocityAlgInit.is_unknown_target(self.instrument, bc_config[RadialVelocityAlgInit.STARNAME], + bc_config[RadialVelocityAlgInit.EPOCH]): + # if bc_config[RadialVelocityAlgInit.EPOCH] is not None: + # import pdb;pdb.set_trace() + self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] = 0.0 + else: + bc_corr = BarycentricCorrectionAlg.get_zb_from_bc_corr(bc_config, + self.bary_corr_table[BaryCorrTableAlg.BC_col3][od])[0] + self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] = bc_corr # m/sec + + return self.bary_corr_table[BaryCorrTableAlg.BC_col6] + + def configure(self): + """ + Extract neccesary information from the header and config file. + """ bc_config = dict() header_keys = [RadialVelocityAlgInit.RA, RadialVelocityAlgInit.DEC, RadialVelocityAlgInit.PMRA, RadialVelocityAlgInit.PMDEC, @@ -230,22 +254,7 @@ def get_rv_col(self, is_single_time=False): bc_config[n_key] = h_key_data bc_config[RadialVelocityAlgInit.SPEC] = self.instrument - for od in range(self.start_bary_index, self.end_bary_index+1): - if is_single_time and od > self.start_bary_index: - self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] \ - = self.bary_corr_table[BaryCorrTableAlg.BC_col6][self.start_bary_index] - else: - if RadialVelocityAlgInit.is_unknown_target(self.instrument, bc_config[RadialVelocityAlgInit.STARNAME], - bc_config[RadialVelocityAlgInit.EPOCH]): - # if bc_config[RadialVelocityAlgInit.EPOCH] is not None: - # import pdb;pdb.set_trace() - self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] = 0.0 - else: - bc_corr = BarycentricCorrectionAlg.get_zb_from_bc_corr(bc_config, - self.bary_corr_table[BaryCorrTableAlg.BC_col3][od])[0] - self.bary_corr_table[BaryCorrTableAlg.BC_col6][od] = bc_corr # m/sec - - return self.bary_corr_table[BaryCorrTableAlg.BC_col6] + self.bc_config = bc_config def get_obs_utc(self, default=None): """ Get observation exposure time in UTC in string format @@ -309,6 +318,7 @@ def build_bary_corr_table(self): else: self.bary_corr_table[BaryCorrTableAlg.BC_col1][self.start_bary_index:self.end_bary_index+1] = mid_utc self.bary_corr_table[BaryCorrTableAlg.BC_col2][self.start_bary_index:self.end_bary_index+1] = Time(mid_utc).jd + if RadialVelocityAlgInit.STARNAME.lower() == 'sun': GEOMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_HJDTDB(Time(mid_utc).jd, ra=None, @@ -317,22 +327,22 @@ def build_bary_corr_table(self): pmra=None, pmdec=None, px=None, - lat=RadialVelocityAlgInit.LAT, - longi=RadialVelocityAlgInit.LON, - alt=RadialVelocityAlgInit.ALT, + lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], + longi=self.bc_config[RadialVelocityAlgInit.OBSLON], + alt=self.bc_config[RadialVelocityAlgInit.OBSALT], rv=None) else: GEOMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(Time(mid_utc).jd, - ra=RadialVelocityAlgInit.RA, - dec=RadialVelocityAlgInit.DEC, - epoch=RadialVelocityAlgInit.EPOCH, - pmra=RadialVelocityAlgInit.PMRA, - pmdec=RadialVelocityAlgInit.PMDEC, - px=RadialVelocityAlgInit.PX, - lat=RadialVelocityAlgInit.LAT, - longi=RadialVelocityAlgInit.LON, - alt=RadialVelocityAlgInit.ALT, - rv=RadialVelocityAlgInit.RV) + ra=self.bc_config[RadialVelocityAlgInit.RA], + dec=self.bc_config[RadialVelocityAlgInit.DEC], + epoch=self.bc_config[RadialVelocityAlgInit.EPOCH], + pmra=self.bc_config[RadialVelocityAlgInit.PMRA], + pmdec=self.bc_config[RadialVelocityAlgInit.PMDEC], + px=self.bc_config[RadialVelocityAlgInit.PARALLAX], + lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], + longi=self.bc_config[RadialVelocityAlgInit.OBSLON], + alt=self.bc_config[RadialVelocityAlgInit.OBSALT], + rv=self.bc_config[RadialVelocityAlgInit.STAR_RV]) self.bary_corr_table[BaryCorrTableAlg.BC_col4][self.start_bary_index:self.end_bary_index+1] = GEOMID_BJD df_em = self.get_expmeter_science() @@ -368,22 +378,22 @@ def build_bary_corr_table(self): pmra=None, pmdec=None, px=None, - lat=RadialVelocityAlgInit.LAT, - longi=RadialVelocityAlgInit.LON, - alt=RadialVelocityAlgInit.ALT, + lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], + longi=self.bc_config[RadialVelocityAlgInit.OBSLON], + alt=self.bc_config[RadialVelocityAlgInit.OBSALT], rv=None) else: PHOTMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(Time(midphoton[i]).jd, - ra=RadialVelocityAlgInit.RA, - dec=RadialVelocityAlgInit.DEC, - epoch=RadialVelocityAlgInit.EPOCH, - pmra=RadialVelocityAlgInit.PMRA, - pmdec=RadialVelocityAlgInit.PMDEC, - px=RadialVelocityAlgInit.PX, - lat=RadialVelocityAlgInit.LAT, - longi=RadialVelocityAlgInit.LON, - alt=RadialVelocityAlgInit.ALT, - rv=RadialVelocityAlgInit.RV) + ra=self.bc_config[RadialVelocityAlgInit.RA], + dec=self.bc_config[RadialVelocityAlgInit.DEC], + epoch=self.bc_config[RadialVelocityAlgInit.EPOCH], + pmra=self.bc_config[RadialVelocityAlgInit.PMRA], + pmdec=self.bc_config[RadialVelocityAlgInit.PMDEC], + px=self.bc_config[RadialVelocityAlgInit.PARALLAX], + lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], + longi=self.bc_config[RadialVelocityAlgInit.OBSLON], + alt=self.bc_config[RadialVelocityAlgInit.OBSALT], + rv=self.bc_config[RadialVelocityAlgInit.STAR_RV]) self.bary_corr_table[BaryCorrTableAlg.BC_col5][i+self.start_bary_index] = PHOTMID_BJD From fc4259da166e40b042aa1d5e446cbc6f8950ac46 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Fri, 25 Aug 2023 17:01:27 -0700 Subject: [PATCH 15/86] numerous tuneups of guider plots --- modules/Utils/analyze_guider.py | 63 ++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 20 deletions(-) diff --git a/modules/Utils/analyze_guider.py b/modules/Utils/analyze_guider.py index de7329059..c1887748d 100644 --- a/modules/Utils/analyze_guider.py +++ b/modules/Utils/analyze_guider.py @@ -135,11 +135,11 @@ def plot_guider_image(self, fig_path=None, show_plot=False): guider_im_zoom = self.guider_image[255-50:255+50, 320-50:320+50] if self.good_fit: resid_im = self.guider_image - self.image_fit - resid_im_zoom = resid_im[255-38:255+38, 320-38:320+38] + resid_im_zoom = resid_im[255-50:255+50, 320-50:320+50] else: - resid_im_zoom = 0*self.guider_image[255-38:255+38, 320-38:320+38] + resid_im_zoom = 0*self.guider_image[255-50:255+50, 320-50:320+50] - fig, axs = plt.subplots(1, 3, figsize=(15, 5), tight_layout=True) + fig, axs = plt.subplots(1, 3, figsize=(16, 5), tight_layout=True) # Left panel - full image im1 = axs[0].imshow(self.guider_image, cmap='viridis', origin='lower', vmin=0, vmax=np.nanpercentile(guider_im_zoom,99.9)) @@ -169,6 +169,12 @@ def plot_guider_image(self, fig_path=None, show_plot=False): # Middle panel - zoomed image im2 = axs[1].imshow(guider_im_zoom, cmap='viridis', origin='lower', vmin=0, vmax=np.nanpercentile(guider_im_zoom,99.9)) + axs[1].contour(guider_im_zoom, + levels=[0.33*np.nanpercentile(guider_im_zoom,99.9), + 0.66*np.nanpercentile(guider_im_zoom,99.9)], + colors='gray', + linewidths = [0.5, 0.5], + extent=[0, guider_im_zoom.shape[0], 0, guider_im_zoom.shape[1]]) axs[1].set_aspect('equal') image_size_pixels = guider_im_zoom.shape image_size_arcsec = (image_size_pixels[1] * self.pixel_scale, image_size_pixels[0] * self.pixel_scale) @@ -190,10 +196,24 @@ def plot_guider_image(self, fig_path=None, show_plot=False): axs[1].set_title(title, fontsize=12) axs[1].grid(True, linestyle='solid', linewidth=0.5, alpha=0.5) cbar2 = plt.colorbar(im2, ax=axs[1], shrink=0.7) - #axs[1].set_facecolor(cmap(0)) + cbar2.set_label('Intensity', fontsize=10) # Right panel - zoomed image of residuals to model - im2 = axs[2].imshow(resid_im_zoom, cmap='viridis', origin='lower', vmin=0, vmax=np.nanpercentile(guider_im_zoom,99.9)) + scaled_resid_im_zoom = resid_im_zoom/np.nanpercentile(guider_im_zoom,99.9) + #minmax = max([-np.nanpercentile(scaled_resid_im_zoom, 0.1), np.nanpercentile(scaled_resid_im_zoom,99.9)]) + minmax = 0.1 # use fixed range + im2 = axs[2].imshow(scaled_resid_im_zoom, + cmap='twilight',#'viridis', + origin='lower', + vmin=-minmax, + vmax= minmax, + extent=[0, scaled_resid_im_zoom.shape[0], 0, scaled_resid_im_zoom.shape[1]]) + axs[2].contour(guider_im_zoom, + levels=[0.33*np.nanpercentile(guider_im_zoom,99.9), + 0.66*np.nanpercentile(guider_im_zoom,99.9)], + colors='gray', + linewidths = [1, 1], + extent=[0, scaled_resid_im_zoom.shape[0], 0, scaled_resid_im_zoom.shape[1]]) image_size_pixels = guider_im_zoom.shape image_size_arcsec = (image_size_pixels[1] * self.pixel_scale, image_size_pixels[0] * self.pixel_scale) x_tick_locator = ticker.MultipleLocator(0.5/(self.pixel_scale-0.001)) # Set tick every 0.5 arcsec @@ -210,14 +230,14 @@ def plot_guider_image(self, fig_path=None, show_plot=False): axs[2].set_ylabel('Arcseconds', fontsize=12) if self.good_fit: title = 'Residuals to Moffat Function Model' - axs[2].set_title(title, fontsize=12) + axs[2].set_title(title, fontsize=10) else: title = 'Unsuccessful fit with Moffat Function' axs[2].set_title(title, fontsize=12, color='r') # red to indicate unusual image axs[2].set_title(title, fontsize=12) axs[2].grid(True, linestyle='solid', linewidth=0.5, alpha=0.5) cbar3 = plt.colorbar(im2, ax=axs[2], shrink=0.7) - #axs[0].set_facecolor(cmap(0)) + cbar3.set_label('Fractional Residuals (scaled to peak of guider image)', fontsize=10) # Display the plot if fig_path != None: @@ -342,7 +362,7 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): """ # Create the figure and subplots - fig, axes = plt.subplots(4, 2, figsize=(16, 16), gridspec_kw={'width_ratios': [2, 1]}, tight_layout=True) + fig, axes = plt.subplots(4, 2, figsize=(16, 15), gridspec_kw={'width_ratios': [2, 1]}, tight_layout=True) plt.style.use('seaborn-whitegrid') # Count number of stars @@ -387,7 +407,7 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): # Histogram of guider errors hist = axes[0,1].hist2d(x_mas, y_mas, bins=hist_bins, cmap='viridis') cbar_ax = fig.add_axes([0.95, 0.775, 0.01, 0.20]) # Adjust these values to properly position your colorbar - fig.colorbar(hist[3], cax=cbar_ax, label='Samples')#, fontsize=12) + fig.colorbar(hist[3], cax=cbar_ax, label='Samples', shrink=0.6)#, fontsize=12) #cbar = plt.colorbar(hist[3]) #cbar_ax.set_label('Samples', fontsize=12) cmap = plt.get_cmap('viridis') @@ -403,7 +423,7 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): # Time series plot of guider errors axes[0,0].plot(t, x_mas, color='royalblue', alpha=0.5) - axes[0,0].plot(t, y_mas, color='orange', alpha=0.5) + axes[0,0].plot(t, y_mas, color='orange', alpha=0.5) axes[0,0].set_title("Guiding Error Time Series: " + str(self.ObsID)+' - ' + self.name, fontsize=14) axes[0,0].set_xlabel("Time (sec)", fontsize=14) axes[0,0].set_ylabel("Guiding Error (mas)", fontsize=14) @@ -418,7 +438,6 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): Pxx, freqs = mlab.psd(x_mas/1000, Fs=fps) Pyy, freqs = mlab.psd(y_mas/1000, Fs=fps) Prr, freqs = mlab.psd(r_mas/1000, Fs=fps) -# axes[1,0].step(freqs, Prr*1e6, where='mid', color='b', alpha=0.8, label='R - Guiding Errors') axes[1,0].step(freqs, Pxx*1e6, where='mid', color='royalblue', label='X - Guiding errors', lw=2, alpha=0.5) axes[1,0].step(freqs, Pyy*1e6, where='mid', color='orange', label='Y - Guiding errors', lw=2, alpha=0.5) axes[1,0].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) @@ -429,12 +448,16 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): axes[1,0].legend(fontsize=12) # Blank - plot to the right of power spectral density - strings = ["Sun's altitude below horizon = " + str(int(-get_sun_alt(self.date_mid))) + " deg"] - strings.append("Lunar separation = " + str(int(get_moon_sep(self.date_mid, self.ra, self.dec))) + " deg") - strings.append('\n') + strings = [] strings.append("J = " + f"{self.jmag:.2f}" + ", G = " + f"{self.gmag:.2f}") strings.append(str(int(self.gcfps)) + ' fps, ' + str(self.gcgain) + ' gain') - + strings.append('\n') + strings.append(str(self.df_GUIDER.shape[0]) + ' guider frames. Fraction with:') + strings.append(' saturated pixels: ' + "%.2f" % (100*(self.df_GUIDER['object1_peak'] > 15800).sum() / self.df_GUIDER.shape[0]) + '%') + strings.append(' pixels at >90% saturation: ' + "%.2f" % (100*(self.df_GUIDER['object1_peak'] > 15830*0.9).sum() / self.df_GUIDER.shape[0]) + '%') + strings.append('\n') + strings.append("Sun's altitude below horizon = " + str(int(-get_sun_alt(self.date_mid))) + " deg") + strings.append("Lunar separation = " + str(int(get_moon_sep(self.date_mid, self.ra, self.dec))) + " deg") #strings.append('\n') #strings.append('Nframes = ' + str(nframes)) #strings.append(' ' + str(nframes_0stars) + ' with 0 stars detected') @@ -442,7 +465,7 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): #strings.append(' ' + str(nframes_2stars) + ' with 2 stars detected') #strings.append(' ' + str(nframes_3stars) + ' with 3 stars detected') axes[1,1].axis('off') - axes[1,1].text(0.03, 0.9, '\n'.join(strings), fontsize=14, ha='left', va='top') + axes[1,1].text(0.01, 1.0, '\n'.join(strings), fontsize=14, ha='left', va='top') # Guider FWHM time series plot fwhm = (self.df_GUIDER.object1_a**2 + self.df_GUIDER.object1_b**2)**0.5 / self.pixel_scale * (2*(2*np.log(2))**0.5) @@ -470,7 +493,7 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): axesb.grid(False) axes[3,0].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) axes[3,0].set_xlabel("Time (sec)", fontsize=14) - axes[3,0].set_ylabel("Flux (integrated)", fontsize=14, color='royalblue') + axes[3,0].set_ylabel("Integrated Flux (DN)", fontsize=14, color='royalblue') axes[3,0].set_xlim(min(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp)), max(self.df_GUIDER.timestamp-min(self.df_GUIDER.timestamp))) #axes[3,0].legend(['Guider Flux', 'Peak Guider Flux'], fontsize=12, loc='best') @@ -479,14 +502,14 @@ def plot_guider_error_time_series(self, fig_path=None, show_plot=False): axes[3,1].grid(True, linestyle='dashed', linewidth=1, alpha=0.5) axesc = axes[3,1].twiny() axesc.hist(peak_flux, bins=30, color='orange', alpha=0.5) - axesc.set_xlabel(r'Peak Flux (DN pixel$^{-1}$; saturation $\approx$ 16,000)', color='orange', fontsize=14) + axesc.set_xlabel(r'Peak Flux (DN pixel$^{-1}$; saturation = 15,830)', color='orange', fontsize=14) axesc.grid(False) - axes[3,1].set_xlabel("Flux (DN integrated)", fontsize=14, color='royalblue') + axes[3,1].set_xlabel("Integrated Flux (DN)", fontsize=14, color='royalblue') axes[3,1].set_ylabel("Samples", fontsize=14) # Set the font size of tick mark labels axes[0,0].tick_params(axis='both', which='major', labelsize=14) - axes[0,1].tick_params(axis='both', which='major', labelsize=14) + axes[0,1].tick_params(axis='both', which='major', labelsize=10) axes[1,0].tick_params(axis='both', which='major', labelsize=14) axes[2,0].tick_params(axis='both', which='major', labelsize=14) axes[2,1].tick_params(axis='both', which='major', labelsize=10) From 6ecfa8417af97b8b1ba899d7bbad8543d4b0dc4e Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 26 Aug 2023 12:05:25 -0700 Subject: [PATCH 16/86] Added DummyLogger() class to print log statements when a logger isn't specified --- modules/Utils/utils.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/modules/Utils/utils.py b/modules/Utils/utils.py index 7bc664b98..c503476f2 100644 --- a/modules/Utils/utils.py +++ b/modules/Utils/utils.py @@ -1,5 +1,5 @@ -# This file contains assorted utility functions for computing astronomical -# quantities associated with KPF data. +# This file contains assorted utility functions that are mostly +# for computing astronomical quantities associated with KPF data. from astropy.time import Time from astropy import units as u @@ -43,3 +43,29 @@ def get_moon_sep(UTdatetime, RA, dec): sep = target.separation(moon) return sep.deg # in degrees + + +class DummyLogger: + """ + Make a dummy logger that prints messages in case a Python logger object is + not provided. + + Usage: + in def __init__(self, logger=None) for some class, include this line: + self.logger = logger if logger is not None else DummyLogger() + """ + def info(self, msg): + print(f"INFO: {msg}") + + def debug(self, msg): + print(f"DEBUG: {msg}") + + def warning(self, msg): + print(f"WARNING: {msg}") + + def error(self, msg): + print(f"ERROR: {msg}") + + def critical(self, msg): + print(f"CRITICAL: {msg}") + From cec9bd6327c70583b7ca2de318926e05f30e56a6 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 26 Aug 2023 12:05:53 -0700 Subject: [PATCH 17/86] starting master QLP --- modules/quicklook/src/quick_prim.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/quicklook/src/quick_prim.py b/modules/quicklook/src/quick_prim.py index f60ada232..628e819b8 100644 --- a/modules/quicklook/src/quick_prim.py +++ b/modules/quicklook/src/quick_prim.py @@ -60,6 +60,8 @@ def _perform(self) -> None: self.alg.qlp_L1(self.input_file, self.output_dir) elif self.qlp_level == 'L2': self.alg.qlp_L2(self.input_file, self.output_dir) + elif self.qlp_level == 'master': + self.alg.qlp_mastser(self.input_file, self.output_dir) #input_file is actually a datecode except Exception as e: # Allow recipe to continue if QLP fails self.logger.error(f"Failure in L0 quicklook pipeline: {e}\n{traceback.format_exc()}") From f9def3bbf9c149ced958c37a6acb7e03701c3e61 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 26 Aug 2023 21:56:52 -0700 Subject: [PATCH 18/86] QLP updates --- modules/quicklook/src/quick_prim.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/quicklook/src/quick_prim.py b/modules/quicklook/src/quick_prim.py index 628e819b8..2a1a00e72 100644 --- a/modules/quicklook/src/quick_prim.py +++ b/modules/quicklook/src/quick_prim.py @@ -61,8 +61,8 @@ def _perform(self) -> None: elif self.qlp_level == 'L2': self.alg.qlp_L2(self.input_file, self.output_dir) elif self.qlp_level == 'master': - self.alg.qlp_mastser(self.input_file, self.output_dir) #input_file is actually a datecode + self.alg.qlp_master(self.input_file, self.output_dir) except Exception as e: # Allow recipe to continue if QLP fails - self.logger.error(f"Failure in L0 quicklook pipeline: {e}\n{traceback.format_exc()}") + self.logger.error(f"Failure in Quicklook pipeline: {e}\n{traceback.format_exc()}") pass From b81c5942a8b78dedba99054ef50740e09d71c5a4 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 26 Aug 2023 22:00:40 -0700 Subject: [PATCH 19/86] Add DummyLogger --- modules/Utils/analyze_2d.py | 24 +++++++----------------- modules/Utils/analyze_l0.py | 8 +++----- 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/modules/Utils/analyze_2d.py b/modules/Utils/analyze_2d.py index 09b03f58d..e7833a34b 100644 --- a/modules/Utils/analyze_2d.py +++ b/modules/Utils/analyze_2d.py @@ -2,11 +2,11 @@ import numpy as np import pandas as pd import matplotlib.pyplot as plt - from matplotlib.patches import Rectangle from scipy.stats import norm from scipy.stats import median_abs_deviation from modules.Utils.kpf_parse import HeaderParse +from modules.Utils.utils import DummyLogger from astropy.table import Table from datetime import datetime #import emcee @@ -41,11 +41,8 @@ class Analyze2D: """ def __init__(self, D2, logger=None): - if logger: - self.logger = logger - self.logger.debug('Initializing Analyze2D object') - else: - self.logger = None + self.logger = logger if logger is not None else DummyLogger() + self.logger.debug('Initializing Analyze2D object') self.D2 = D2 # use D2 instead of 2D because variable names can't start with a number self.df_telemetry = self.D2['TELEMETRY'] # read as Table for astropy.io version of FITS primary_header = HeaderParse(D2, 'PRIMARY') @@ -134,8 +131,8 @@ def plot_2D_image(self, chip=None, overplot_dark_current=False, dark current or bias measurements in preset regions, if commanded. Args: - overlay_dark_current - if True, dark current measurements are over-plotted chip (string) - "green" or "red" + overlay_dark_current - if True, dark current measurements are over-plotted fig_path (string) - set to the path for the file to be generated. show_plot (boolean) - show the plot in the current environment. @@ -169,7 +166,6 @@ def plot_2D_image(self, chip=None, overplot_dark_current=False, image = self.D2[CHIP + '_CCD'].data else: self.logger.debug('chip not supplied. Exiting plot_2D_image') - print('chip not supplied. Exiting plot_2D_image') return # Generate 2D image @@ -281,7 +277,6 @@ def plot_2D_image_zoom(self, chip=None, fig_path=None, show_plot=False, image = self.D2[CHIP + '_CCD'].data else: self.logger.debug('chip not supplied. Exiting plot_2D_image') - print('chip not supplied. Exiting plot_2D_image') return # Plot and annotate @@ -341,8 +336,7 @@ def plot_2D_image_zoom_3x3(self, chip=None, fig_path=None, show_plot=False): #image = self.D2[CHIP + '_CCD'].data image = np.array(self.D2[CHIP + '_CCD'].data) else: - self.logger.debug('chip not supplied. Exiting plot_2D_image') - print('chip not supplied. Exiting plot_2D_image') + self.logger.info('chip not supplied. Exiting plot_2D_image') return # Calculate center of the image and define offsets @@ -627,7 +621,7 @@ def plot_2D_image_histogram(self, chip=None, fig_path=None, show_plot=False, # Flatten the image array for speed in histrogram computation flatten_image = image.flatten() else: - self.logger.debug('chip not supplied. Exiting plot_2D_image') + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') print('chip not supplied. Exiting plot_2D_image') return @@ -650,7 +644,6 @@ def plot_2D_image_histogram(self, chip=None, fig_path=None, show_plot=False, density = False, range = (np.nanpercentile(flatten_image, 0.005), np.nanpercentile(flatten_image, 99.995))) - #if master_file != 'None' and len(master_flatten_counts)>1: plt.hist(master_flatten_counts, bins = 50,alpha =0.5, label = 'Master Median: '+ '%4.1f' % np.nanmedian(master_flatten_counts)+'; Std: ' + '%4.1f' % np.nanstd(master_flatten_counts), histtype='step',density = False, color = 'orange', linewidth = 1 , range = (np.nanpercentile(master_flatten_counts,0.005),np.nanpercentile(master_flatten_counts,99.995))) #[master_flatten_counts1: plt.plot(master_counts[:,which_column],alpha = 0.5,linewidth = 0.5, label = 'Master', color = 'Orange') plt.title('Column Cuts though 2D ' + chip_title + ' CCD: ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) plt.ylabel('e-', fontsize=16) plt.xlabel('Row Number', fontsize=16) @@ -758,4 +749,3 @@ def plot_2D_column_cut(self, chip=None, fig_path=None, show_plot=False, if show_plot == True: plt.show() plt.close('all') - diff --git a/modules/Utils/analyze_l0.py b/modules/Utils/analyze_l0.py index 74d9e3c4c..8ebf349ec 100644 --- a/modules/Utils/analyze_l0.py +++ b/modules/Utils/analyze_l0.py @@ -2,6 +2,7 @@ import numpy as np import matplotlib.pyplot as plt from modules.Utils.kpf_parse import HeaderParse +from modules.Utils.utils import DummyLogger from datetime import datetime class AnalyzeL0: @@ -19,11 +20,8 @@ class AnalyzeL0: """ def __init__(self, L0, logger=None): - if logger: - self.logger = logger - self.logger.debug('Initializing AnalyzeL0 object.') - else: - self.logger = None + self.logger = logger if logger is not None else DummyLogger() + self.logger.debug('Initializing AnalyzeL0 object.') self.L0 = L0 primary_header = HeaderParse(L0, 'PRIMARY') self.header = primary_header.header From 83ae1dd8a2ea4b28df921253f9b3d77612ce2f32 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 26 Aug 2023 22:01:47 -0700 Subject: [PATCH 20/86] Added 2D Masters to QLP --- modules/quicklook/src/alg.py | 208 ++++++++++++++++++++++++++++++++--- 1 file changed, 191 insertions(+), 17 deletions(-) diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index eb45a8c71..c13c92bd3 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -91,8 +91,6 @@ def qlp_L0(self, kpf0, output_dir): Arguments: kpf0 - a L0 object output_dir - directory for output QLP files (if show_plot=False) - show_plot - plots are generated inline (e.g., for Jupyter Notebooks) - instead of saving files Attributes: None @@ -177,8 +175,7 @@ def qlp_L0(self, kpf0, output_dir): os.makedirs(savedir, exist_ok=True) # make directories if needed filename = savedir + self.ObsID + '_L0_stitched_image_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - myL0.plot_L0_stitched_image(fig_path=filename, - chip=chip, show_plot=False) + myL0.plot_L0_stitched_image(fig_path=filename, chip=chip, show_plot=False) except Exception as e: self.logger.error(f"Failure in L0 quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -194,8 +191,6 @@ def qlp_2D(self, kpf2d, output_dir): Arguments: kpf2d - a 2D object output_dir - directory for output QLP files (if show_plot=False) - show_plot - plots are generated inline (e.g., for Jupyter Notebooks) - instead of saving files Attributes: None @@ -251,14 +246,13 @@ def qlp_2D(self, kpf2d, output_dir): try: savedir = D2_QLP_file_base +'2D/' os.makedirs(savedir, exist_ok=True) # make directories if needed - my_2D = AnalyzeL0(kpf2d, logger=self.logger) + my_2D = AnalyzeL0(kpf2d, logger=self.logger) # should this be Analyze2D instead? for chip in chips: # next line not working yet #Analyze2D.measure_2D_dark_current(self, chip=chip) filename = savedir + self.ObsID + '_2D_image_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - Analyze2D.plot_2D_image(self, chip=chip, fig_path=filename, - show_plot=False) + my_2D.plot_2D_image(self, chip=chip, fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -273,8 +267,7 @@ def qlp_2D(self, kpf2d, output_dir): for chip in chips: filename = savedir + self.ObsID + '_2D_image_3x3zoom_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - Analyze2D.plot_2D_image_zoom_3x3(self, chip=chip, fig_path=filename, - show_plot=False) + my_2D.plot_2D_image_zoom_3x3(chip=chip, fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -290,8 +283,7 @@ def qlp_2D(self, kpf2d, output_dir): for chip in chips: filename = savedir + self.ObsID + '_2D_histogram_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - Analyze2D.plot_2D_image_histogram(self, chip=chip, fig_path=filename, - show_plot=False) + my_2D.plot_2D_image_histogram(self, chip=chip, fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -299,11 +291,11 @@ def qlp_2D(self, kpf2d, output_dir): # Make 2D column cuts if chips != []: try: + my_2D = Analyze2D(kpf2d, logger=self.logger) for chip in chips: filename = savedir + self.ObsID + '_2D_column_cut_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - Analyze2D.plot_2D_column_cut(self, chip=chip, fig_path=filename, - show_plot=False) + my_2D.plot_2D_column_cut(chip=chip, fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -320,8 +312,6 @@ def qlp_L1(self, kpf1, output_dir): Arguments: kpf1 - a L1 object output_dir - directory for output QLP files (if show_plot=False) - show_plot - plots are generated inline (e.g., for Jupyter Notebooks) - instead of saving files Attributes: None @@ -497,3 +487,187 @@ def qlp_L2(self, kpf2, output_dir): except Exception as e: self.logger.error(f"Failure in CCF quicklook pipeline: {e}\n{traceback.format_exc()}") + + + ####################### + ##### QLP MASTERS ##### + ####################### + def qlp_master(self, input_file, output_dir): + """ + Description: + Generates the standard quicklook data product for a master files. + + Arguments: + master_dir (string) - the directory where master files are stored + output_dir - directory for output QLP files (if show_plot=False) + + Attributes: + None + """ + + self.input_file = input_file + self.output_dir = output_dir + master_type, data_type = determine_master_type(self.input_file) + self.logger.info('The master file ' + str(self.input_file) + ' was determined to be a ' + + str(data_type) + ' ' + str(master_type) + ' file.') + kpf2d = KPF0.from_fits(self.input_file) + self.data_products = get_data_products_2D(kpf2d) + chips = [] + if 'Green' in self.data_products: chips.append('green') + if 'Red' in self.data_products: chips.append('red') + + # Make directory, if needed + try: + if master_type == 'bias': + savedir = self.output_dir + 'Bias/' + elif master_type == 'dark': + savedir = self.output_dir + 'Dark/' + elif master_type == 'flat': + savedir = self.output_dir + 'Flat/' + elif master_type == 'lfc': + savedir = self.output_dir + 'LFC/' + elif master_type == 'etalon': + savedir = self.output_dir + 'Etalon/' + elif master_type == 'thar': + savedir = self.output_dir + 'ThAr/' + elif master_type == 'une': + savedir = self.output_dir + 'UNe/' + else: + self.logger.error(f"Couldn't determine data type to create directory in Master quicklook pipeline.") + savedir = self.output_dir + 'Unidentified/' + os.makedirs(savedir, exist_ok=True) + + except Exception as e: + self.logger.error(f"Failure creating base output diretory in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + ### 2D Masters ### + if data_type == '2D': + if chips != []: + + # Make 2D images + try: + my_2D = Analyze2D(kpf2d, logger=self.logger) + for chip in chips: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_image_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + my_2D.plot_2D_image(chip=chip, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make 2D images - 3x3 arrays + try: + my_2D = Analyze2D(kpf2d, logger=self.logger) + for chip in chips: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_image_3x3zoom_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + my_2D.plot_2D_image_zoom_3x3(chip=chip, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make 2D image histograms + try: + my_2D = Analyze2D(kpf2d, logger=self.logger) + for chip in chips: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_histogram_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + my_2D.plot_2D_image_histogram(chip=chip, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make 2D column cuts + try: + my_2D = Analyze2D(kpf2d, logger=self.logger) + for chip in chips: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_column_cut_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + my_2D.plot_2D_column_cut(chip=chip, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") + + +def determine_master_type(fullpath): + """ + Description: + Generates the standard quicklook data products for all of the master files in + a directory. + + Arguments: + fullpath (string) - full path to master file (usually a .fits) + + Outputs: + master_type - possible values: None, 'bias', dark, 'flat', 'thar', 'une', 'lfc' + data_type - possible values: None, 'L0', '2D', 'L1', 'L2', 'WLS' + + Attributes: + None + """ + fullpath = fullpath.lower() + master_type = None + data_type = None + + # Bias + if (('bias' in fullpath) and fullpath.endswith('.fits')): + master_type = 'bias' + if 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + # Dark + if (('dark' in fullpath) and fullpath.endswith('.fits')): + master_type = 'dark' + if 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + # Flat + if (('flat' in fullpath) and fullpath.endswith('.fits')): + master_type = 'flat' + if 'l2' in fullpath: + data_type = 'L2' + elif 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + # LFC + if (('lfc' in fullpath) and fullpath.endswith('.fits')): + master_type = 'lfc' + if 'l2' in fullpath: + data_type = 'L2' + elif 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + # Etalon + if (('etalon' in fullpath) and fullpath.endswith('.fits')): + master_type = 'etalon' + if 'l2' in fullpath: + data_type = 'L2' + elif 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + # ThAr + if (('thar' in fullpath) and fullpath.endswith('.fits')): + master_type = 'thar' + if 'l2' in fullpath: + data_type = 'L2' + elif 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + # UNe + if (('une' in fullpath) and fullpath.endswith('.fits')): + master_type = 'une' + if 'l2' in fullpath: + data_type = 'L2' + elif 'l1' in fullpath: + data_type = 'L1' + else: + data_type = '2D' + + return master_type, data_type + From accb4cab8e60fe0fa49ffbfba62bb4ca626ac1d8 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 26 Aug 2023 22:27:39 -0700 Subject: [PATCH 21/86] add date_from_path method --- modules/Utils/string_proc.py | 46 ++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/modules/Utils/string_proc.py b/modules/Utils/string_proc.py index b17434fcc..2987bdd49 100644 --- a/modules/Utils/string_proc.py +++ b/modules/Utils/string_proc.py @@ -27,7 +27,6 @@ def __init__(self, def _pre_condition(self) -> bool: success = len(self.action.args) >= 3 and isinstance(self.action.args[0], str) and \ isinstance(self.action.args[1], str) and isinstance(self.action.args[2], str) - return success def _post_condition(self) -> bool: @@ -37,7 +36,6 @@ def _perform(self): original_value = self.action.args[0] old_value = self.action.args[1] new_value = self.action.args[2] - new_string = original_value.replace(old_value, new_value) return Arguments(new_string) @@ -92,6 +90,50 @@ def _perform(self): return Arguments(date_str) +class date_from_path(KPF_Primitive): + """ + This primitive determines the datecode (YYYYMMDD) from a path + (e.g., '/data/masters/20230711/kpf_20230711_master_arclamp_autocal-etalon-all-eve.fits'). + + Description: + - `action (keckdrpframework.models.action.Action)`: `action.args` contains positional arguments and + keyword arguments passed by the `date_from_path` event issued in the recipe: + + - `action.args[0] (string)`: path + """ + + def __init__(self, + action: Action, + context: ProcessingContext) -> None: + KPF_Primitive.__init__(self, action, context) + args_keys = [item for item in action.args.iter_kw() if item != "name"] + self.logger = self.context.logger + + def _pre_condition(self) -> bool: + success = len(self.action.args) == 1 and isinstance(self.action.args[0], str) + return success + + def _post_condition(self) -> bool: + return True + + def _perform(self): + # This algorithm finds the first 8-digit string and returns it. + path = self.action.args[0] + digit_count = 0 + potential_match = "" + for char in path: + if char.isdigit(): + digit_count += 1 + potential_match += char + if digit_count == 8: + return Arguments(potential_match) + else: + digit_count = 0 + potential_match = "" + + return Arguments('99999999') # Error code indicating that no match was found + + class level_from_kpffile(KPF_Primitive): """ This primitive determines the KPF data level (L0, 2D, L1, L2, None) of a KPF file. From 5ec0d9f5f63feb215704eb32901659ff26d6b67c Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 27 Aug 2023 12:49:16 -0700 Subject: [PATCH 22/86] Added masters to the watch and match recipes --- configs/quicklook_match.cfg | 4 ++-- recipes/quicklook_match.recipe | 36 +++++++++++++++++++----------- recipes/quicklook_watch_dir.recipe | 24 ++++++++++++-------- 3 files changed, 40 insertions(+), 24 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index 6048799dd..1309d5edb 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,8 +7,8 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/??/202?????/KP.20230820.30050.45*.fits' -#fullpath = '/data/??/202?????/KP.20230813.51416.47*.fits' +#fullpath = '/data/2D/202?????/KP.20230826.[3-5]????.??*.fits' +fullpath = '/data/masters/2023060?/*.fits' [MODULE_CONFIGS] quicklook = modules/quicklook/configs/default.cfg diff --git a/recipes/quicklook_match.recipe b/recipes/quicklook_match.recipe index a9d48f537..aeea2eef1 100644 --- a/recipes/quicklook_match.recipe +++ b/recipes/quicklook_match.recipe @@ -1,6 +1,6 @@ # This recipe is used to manually produce a set of QLP outputs that match the # fullpath config variable. It can be used to produce QLP for a single data -# level (L0, 2D, L1, L2) for a single datecode (YYYYDDMM) or combinations. +# level (L0, 2D, L1, L2, masters) for a single datecode (YYYYDDMM) or combinations. # All of the examples below are executed using the command # > kpf -c configs/quicklook_match.cfg -r recipes/quicklook_match.recipe # but with different values for the config variable 'fullpath'. @@ -16,21 +16,31 @@ # # Example - compute L0/2D/L1/L2 data products for all ObsID on a range of ten dates: # fullpath = '/data/??/2023072?/KP.*.fits' +# +# Example - compute masters data products for all files on a range of ten dates: +# fullpath = '/data/masters/2023072?/*.fits' from modules.Utils.string_proc import level_from_kpffile from modules.Utils.string_proc import date_from_kpffile +from modules.Utils.string_proc import date_from_path from modules.quicklook.src.quick_prim import Quicklook for input_file in find_files(config.ARGUMENT.fullpath): - level = level_from_kpffile(input_file) - datecode = date_from_kpffile(input_file) - output_dir= '/data/QLP/' + datecode + '/' - if level == 'L0': - open_file = kpf0_from_fits(input_file, data_type='KPF') - if level == '2D': - open_file = kpf0_from_fits(input_file, data_type='KPF') - if level == 'L1': - open_file = kpf1_from_fits(input_file, data_type='KPF') - if level == 'L2': - open_file = kpf2_from_fits(input_file, data_type='KPF') - Quicklook(open_file, output_dir, level) + if 'masters' in input_file: # Masters + datecode = date_from_path(input_file) + output_dir= '/data/QLP/' + datecode + '/Masters/' + Quicklook(input_file, output_dir, 'master') + else: # L0/2D/L1/L2 + datecode = date_from_kpffile(input_file) + level = level_from_kpffile(input_file) + output_dir= '/data/QLP/' + datecode + '/' + if level == 'L0': + open_file = kpf0_from_fits(input_file, data_type='KPF') + if level == '2D': + open_file = kpf0_from_fits(input_file, data_type='KPF') + if level == 'L1': + open_file = kpf1_from_fits(input_file, data_type='KPF') + if level == 'L2': + open_file = kpf2_from_fits(input_file, data_type='KPF') + Quicklook(open_file, output_dir, level) + diff --git a/recipes/quicklook_watch_dir.recipe b/recipes/quicklook_watch_dir.recipe index 213e95690..699994b0a 100644 --- a/recipes/quicklook_watch_dir.recipe +++ b/recipes/quicklook_watch_dir.recipe @@ -1,20 +1,26 @@ -# This recipe is used to watch a [L0/2D/L1/L2] directory for modified files and -# then to run the appropriate section of the QLP (L0/2D/L1/L2) to generate standard +# This recipe is used to watch a [L0/2D/L1/L2/masters] directory for modified files and +# then to run the appropriate section of the QLP (L0/2D/L1/L2/masters) to generate standard # data products. It must be run in watch mode. Separate instances should to be run -# for L0, 2D, L1, and L2 data directories. +# for L0, 2D, L1, L2, and masters data directories. # # Example: # > kpf --watch /data/2D/20230711/ -c configs/quicklook_watch_dir.cfg -r recipes/quicklook_watch_dir.recipe from modules.Utils.string_proc import level_from_kpffile from modules.Utils.string_proc import date_from_kpffile +from modules.Utils.string_proc import date_from_path from modules.quicklook.src.quick_prim import Quicklook +# New version file_path = context.file_path # from context in watch mode, e.g. /data/2D/20230711/KP.20230711.00415.52_2D.fits -level = level_from_kpffile(file_path) # 'L0', '2D', 'L1', 'L2', None -datecode = date_from_kpffile(file_path) # e.g. 20230711 -output_dir= '/data/QLP/' + datecode + '/' +datecode = date_from_path(file_path) -if level != None: - open_file = kpf0_from_fits(file_path, data_type='KPF') - Quicklook(open_file, output_dir, level) +if 'masters' in file_path: # Masters + output_dir= '/data/QLP/' + datecode + '/Masters/' + Quicklook(file_path, output_dir, 'master') +else: # L0/2D/L1/L2 + level = level_from_kpffile(file_path) # 'L0', '2D', 'L1', 'L2', None + output_dir= '/data/QLP/' + datecode + '/' + if level != None: + open_file = kpf0_from_fits(file_path, data_type='KPF') + Quicklook(open_file, output_dir, level) From ac38255c674f165983de297db95389fe4bf125d9 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 27 Aug 2023 12:50:01 -0700 Subject: [PATCH 23/86] added L1 master QLP processing --- modules/quicklook/src/alg.py | 167 ++++++++++++++++++++++++++++++++--- 1 file changed, 153 insertions(+), 14 deletions(-) diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index c13c92bd3..8d8fa53c6 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -5,6 +5,7 @@ from modules.Utils.config_parser import ConfigHandler from kpfpipe.models.level0 import KPF0 from kpfpipe.models.level1 import KPF1 +from kpfpipe.models.level2 import KPF2 from keckdrpframework.models.arguments import Arguments import traceback import os @@ -22,7 +23,7 @@ from modules.Utils.analyze_l1 import AnalyzeL1 from modules.Utils.analyze_l2 import AnalyzeL2 from modules.Utils.kpf_parse import HeaderParse -import kpfpipe.pipelines.fits_primitives as fits_primitives +#import kpfpipe.pipelines.fits_primitives as fits_primitives from modules.Utils.kpf_parse import get_data_products_L0 from modules.Utils.kpf_parse import get_data_products_2D from modules.Utils.kpf_parse import get_data_products_L1 @@ -39,7 +40,7 @@ class QuicklookAlg: The following recipes in KPF-Pipeline/recipes/ are useful for generating QLP data products: - quicklook_watch.recipe -- this recipe watches a directory (recursively, if needed) + quicklook_watch_dir.recipe -- this recipe watches a directory (recursively, if needed) and triggers the QLP on file modification events. It must be run in watch mode. Separate instances should to be run for L0, 2D, L1, and L2 data directories. Example: @@ -495,10 +496,10 @@ def qlp_L2(self, kpf2, output_dir): def qlp_master(self, input_file, output_dir): """ Description: - Generates the standard quicklook data product for a master files. + Generates the standard quicklook data product for a master file. Arguments: - master_dir (string) - the directory where master files are stored + input_file (string) - full path to the master file to be processed output_dir - directory for output QLP files (if show_plot=False) Attributes: @@ -510,11 +511,19 @@ def qlp_master(self, input_file, output_dir): master_type, data_type = determine_master_type(self.input_file) self.logger.info('The master file ' + str(self.input_file) + ' was determined to be a ' + str(data_type) + ' ' + str(master_type) + ' file.') - kpf2d = KPF0.from_fits(self.input_file) - self.data_products = get_data_products_2D(kpf2d) + if data_type == '2D': + kpf2d = KPF0.from_fits(self.input_file) + self.data_products = get_data_products_2D(kpf2d) + if data_type == 'L1': + kpf1 = KPF1.from_fits(self.input_file) + self.data_products = get_data_products_L1(kpf1) + if data_type == 'L2': + kpf2 = KPF2.from_fits(self.input_file) + self.data_products = get_data_products_L2(kpf2) chips = [] - if 'Green' in self.data_products: chips.append('green') - if 'Red' in self.data_products: chips.append('red') + if master_type != None: + if 'Green' in self.data_products: chips.append('green') + if 'Red' in self.data_products: chips.append('red') # Make directory, if needed try: @@ -548,7 +557,8 @@ def qlp_master(self, input_file, output_dir): try: my_2D = Analyze2D(kpf2d, logger=self.logger) for chip in chips: - filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_image_' + chip + '_zoomable.png' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_2D_image_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) my_2D.plot_2D_image(chip=chip, fig_path=filename, show_plot=False) @@ -559,7 +569,8 @@ def qlp_master(self, input_file, output_dir): try: my_2D = Analyze2D(kpf2d, logger=self.logger) for chip in chips: - filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_image_3x3zoom_' + chip + '_zoomable.png' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_2D_image_3x3zoom_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) my_2D.plot_2D_image_zoom_3x3(chip=chip, fig_path=filename, show_plot=False) @@ -570,23 +581,151 @@ def qlp_master(self, input_file, output_dir): try: my_2D = Analyze2D(kpf2d, logger=self.logger) for chip in chips: - filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_histogram_' + chip + '_zoomable.png' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_2D_histogram_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) my_2D.plot_2D_image_histogram(chip=chip, fig_path=filename, show_plot=False) except Exception as e: - self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") # Make 2D column cuts try: my_2D = Analyze2D(kpf2d, logger=self.logger) for chip in chips: - filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + '_2D_column_cut_' + chip + '_zoomable.png' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_2D_column_cut_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) my_2D.plot_2D_column_cut(chip=chip, fig_path=filename, show_plot=False) except Exception as e: - self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + + ### L1 Masters ### + if data_type == 'L1': + + # Make L1 SNR plot + try: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_SNR_' + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1 = AnalyzeL1(kpf1, logger=self.logger) + myL1.measure_L1_snr() + myL1.plot_L1_snr(fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make L1 spectra plots + try: + for oo, orderlet in enumerate(['SCI1', 'SCI2', 'SCI3', 'CAL', 'SKY']): + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_' + orderlet + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_L1_spectrum(orderlet=orderlet, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make L1 spectra plots (single order) + if chips != []: + try: + myL1 = AnalyzeL1(kpf1, logger=self.logger) + if 'green' in chips: # don't use 'for chip in chips:' here so that the file creation order is correct for Jump to display in a certain order + chip = 'green' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_SCI_order11_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_1D_spectrum_single_order(chip=chip, order=11, ylog=False, + orderlet=['SCI1', 'SCI2', 'SCI3'], + fig_path=filename, show_plot=False) + if 'red' in chips: + chip = 'red' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_SCI_order11_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_1D_spectrum_single_order(chip=chip, order=11, ylog=False, + orderlet=['SCI1', 'SCI2', 'SCI3'], + fig_path=filename, show_plot=False) + if 'green' in chips: + chip = 'green' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_SKY_order11_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_1D_spectrum_single_order(chip=chip, order=11, ylog=False, + orderlet=['SKY'], + fig_path=filename, show_plot=False) + if 'red' in chips: + chip = 'red' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_SKY_order11_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_1D_spectrum_single_order(chip=chip, order=11, ylog=False, + orderlet=['SKY'], + fig_path=filename, show_plot=False) + if 'green' in chips: + chip = 'green' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_CAL_order11_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_1D_spectrum_single_order(chip=chip, order=11, ylog=False, + orderlet=['CAL'], + fig_path=filename, show_plot=False) + if 'red' in chips: + chip = 'red' + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_spectrum_CAL_order11_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1.plot_1D_spectrum_single_order(chip=chip, order=11, ylog=False, + orderlet=['CAL'], + fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make order ratio grid plots + if chips != []: + try: + for chip in chips: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_orderlet_flux_ratios_grid_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL1 = AnalyzeL1(kpf1, logger=self.logger) + myL1.measure_orderlet_flux_ratios() + myL1.plot_orderlet_flux_ratios_grid(chip=chip, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + # Make order ratio plot + if chips != []: + try: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_L1_orderlet_flux_ratios_zoomable.png' + self.logger.info('Measuring orderlet flux ratios for ' + filename) + self.logger.info('Generating QLP image ' + filename) + myL1.plot_orderlet_flux_ratios(fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in Master quicklook pipeline: {e}\n{traceback.format_exc()}") + + + ### L2 Masters ### + if data_type == 'L2': + + # Make CCF grid plots + if chips != []: + try: + myL2 = AnalyzeL2(kpf2, logger=self.logger) + for chip in chips: + filename = savedir + self.input_file.split('/')[-1].replace('.fits', '') + \ + '_CCF_grid_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myL2.plot_CCF_grid(chip=chip, fig_path=filename, show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in CCF quicklook pipeline: {e}\n{traceback.format_exc()}") def determine_master_type(fullpath): From 7b51661128cbddcf688f446b64bdbbd8121db2fe Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 27 Aug 2023 20:59:33 -0700 Subject: [PATCH 24/86] updated QLP script --- scripts/launch_QLP.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/launch_QLP.sh b/scripts/launch_QLP.sh index 77682e42d..cb017fa51 100755 --- a/scripts/launch_QLP.sh +++ b/scripts/launch_QLP.sh @@ -1,9 +1,9 @@ #!/bin/bash -# This script is designed to launch the 4 QLP instances with a single command +# This script is designed to launch the 5 QLP instances with a single command ncpus=1 -data_levels=("L0" "2D" "L1" "L2") +data_levels=("L0" "2D" "L1" "L2" "masters") for lvl in "${data_levels[@]}"; do cmd="kpf --ncpus=${ncpus} --watch ${KPFPIPE_DATA}/${lvl}/ -r recipes/quicklook_watch_dir.recipe -c configs/quicklook_watch_dir.cfg" From 4cd4768ffadb6fe4a76c19d5a2d1b10bc43f68ae Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 27 Aug 2023 20:59:50 -0700 Subject: [PATCH 25/86] minor --- configs/quicklook_match.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index 1309d5edb..1272c607c 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -8,7 +8,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath #fullpath = '/data/2D/202?????/KP.20230826.[3-5]????.??*.fits' -fullpath = '/data/masters/2023060?/*.fits' +fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] quicklook = modules/quicklook/configs/default.cfg From c7c8bb25030203dde016bd415773c1fe3546470d Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Tue, 29 Aug 2023 12:36:05 -0700 Subject: [PATCH 26/86] define peak fitting window in config --- modules/wavelength_cal/configs/KPF.cfg | 1 + modules/wavelength_cal/src/alg.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/wavelength_cal/configs/KPF.cfg b/modules/wavelength_cal/configs/KPF.cfg index c3459390e..e9a3b4309 100644 --- a/modules/wavelength_cal/configs/KPF.cfg +++ b/modules/wavelength_cal/configs/KPF.cfg @@ -16,6 +16,7 @@ clip_below_median = 1 peak_height_threshold = 0.3 sigma_clip = 2.5 fit_iterations = 5 +fit_width = 10 # width in pixels of the fitting window for Gauss fits to emission lines # skip these orders when solving for the LFC WLS and instead use a ThAr solution green_skip_orders = 0,1,2,3,4,5,6,7,8,9,10 diff --git a/modules/wavelength_cal/src/alg.py b/modules/wavelength_cal/src/alg.py index b2d7a3737..0aa7fc2d9 100644 --- a/modules/wavelength_cal/src/alg.py +++ b/modules/wavelength_cal/src/alg.py @@ -64,6 +64,7 @@ def __init__( self.peak_height_threshold = configpull.get_config_value('peak_height_threshold',1.5) self.sigma_clip = configpull.get_config_value('sigma_clip',2.1) self.fit_iterations = configpull.get_config_value('fit_iterations',5) + self.fit_width = configpull.get_config_value('fit_width', 10) self.logger = logger def run_wavelength_cal( @@ -595,7 +596,8 @@ def find_peaks(self, order_flux, peak_height_threshold=1.5): # fit peaks with Gaussian to get accurate position fitted_peaks = detected_peaks.astype(float) gauss_coeffs = np.empty((4, len(detected_peaks))) - width = np.mean(np.diff(detected_peaks)) // 2 + # width = np.mean(np.diff(detected_peaks)) // 2 + width = self.fit_width for j, p in enumerate(detected_peaks): idx = p + np.arange(-width, width + 1, 1) From 459eccdfb613f3201feca8b8122ae2f9cf3237be Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Tue, 29 Aug 2023 14:18:11 -0700 Subject: [PATCH 27/86] make the peak fitting window a config parameter --- modules/wavelength_cal/configs/KPF.cfg | 3 ++- modules/wavelength_cal/src/alg.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/wavelength_cal/configs/KPF.cfg b/modules/wavelength_cal/configs/KPF.cfg index e9a3b4309..b62bffe2e 100644 --- a/modules/wavelength_cal/configs/KPF.cfg +++ b/modules/wavelength_cal/configs/KPF.cfg @@ -16,7 +16,8 @@ clip_below_median = 1 peak_height_threshold = 0.3 sigma_clip = 2.5 fit_iterations = 5 -fit_width = 10 # width in pixels of the fitting window for Gauss fits to emission lines +# width in pixels of the fitting window for Gauss fits to emission lines +fit_width = 25 # skip these orders when solving for the LFC WLS and instead use a ThAr solution green_skip_orders = 0,1,2,3,4,5,6,7,8,9,10 diff --git a/modules/wavelength_cal/src/alg.py b/modules/wavelength_cal/src/alg.py index 0aa7fc2d9..2882f8b27 100644 --- a/modules/wavelength_cal/src/alg.py +++ b/modules/wavelength_cal/src/alg.py @@ -597,7 +597,7 @@ def find_peaks(self, order_flux, peak_height_threshold=1.5): fitted_peaks = detected_peaks.astype(float) gauss_coeffs = np.empty((4, len(detected_peaks))) # width = np.mean(np.diff(detected_peaks)) // 2 - width = self.fit_width + width = self.fit_width // 2 for j, p in enumerate(detected_peaks): idx = p + np.arange(-width, width + 1, 1) From 771bfe80eb3f39195bb7baf43a97432ae2ebee74 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Tue, 29 Aug 2023 21:11:45 -0700 Subject: [PATCH 28/86] bug fix --- modules/quicklook/src/alg.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index 8d8fa53c6..9e8f9aeac 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -247,13 +247,13 @@ def qlp_2D(self, kpf2d, output_dir): try: savedir = D2_QLP_file_base +'2D/' os.makedirs(savedir, exist_ok=True) # make directories if needed - my_2D = AnalyzeL0(kpf2d, logger=self.logger) # should this be Analyze2D instead? + my_2D = Analyze2D(kpf2d, logger=self.logger) for chip in chips: # next line not working yet #Analyze2D.measure_2D_dark_current(self, chip=chip) filename = savedir + self.ObsID + '_2D_image_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - my_2D.plot_2D_image(self, chip=chip, fig_path=filename, show_plot=False) + my_2D.plot_2D_image(chip=chip, fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -284,7 +284,7 @@ def qlp_2D(self, kpf2d, output_dir): for chip in chips: filename = savedir + self.ObsID + '_2D_histogram_' + chip + '_zoomable.png' self.logger.info('Generating QLP image ' + filename) - my_2D.plot_2D_image_histogram(self, chip=chip, fig_path=filename, show_plot=False) + my_2D.plot_2D_image_histogram(chip=chip, fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in 2D quicklook pipeline: {e}\n{traceback.format_exc()}") From d8ee61b519e479ce971f68dc9be929bd0f12ed4c Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 29 Aug 2023 14:50:01 -0700 Subject: [PATCH 29/86] convert jd to bjd on rv columns and header keywords. add jd t0 bjd conversion to barycentric correction module. --- .../src/alg_barycentric_corr.py | 24 ++++++++ modules/radial_velocity/src/alg.py | 33 ++++++++-- .../src/midpoint_photon_arrival.py | 7 +-- .../radial_velocity/src/radial_velocity.py | 13 ++-- .../src/radial_velocity_reweighting.py | 3 +- .../spectral_extraction/src/alg_bary_corr.py | 60 +++---------------- 6 files changed, 74 insertions(+), 66 deletions(-) diff --git a/modules/barycentric_correction/src/alg_barycentric_corr.py b/modules/barycentric_correction/src/alg_barycentric_corr.py index abbe2913f..683e8f247 100644 --- a/modules/barycentric_correction/src/alg_barycentric_corr.py +++ b/modules/barycentric_correction/src/alg_barycentric_corr.py @@ -3,6 +3,7 @@ import os.path import logging from barycorrpy import get_BC_vel +import barycorrpy from astropy.utils import iers import pandas as pd from astropy import constants as const @@ -318,3 +319,26 @@ def get_bc_corr(obs_config, jd): alt=obs_config[BarycentricCorrectionAlg.ALT], rv=obs_config[BarycentricCorrectionAlg.RV]) return bc_obj[0][0] + + @staticmethod + def get_bjd(bc_config, jdtime): + star = bc_config[BarycentricCorrectionAlg.STARNAME].lower() + if star == 'sun': + V_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_HJDTDB(jdtime, + lat=bc_config[BarycentricCorrectionAlg.LAT], + longi=bc_config[BarycentricCorrectionAlg.LON], + alt=bc_config[BarycentricCorrectionAlg.ALT]) + else: + V_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(jdtime, + ra=bc_config[BarycentricCorrectionAlg.RA], + dec=bc_config[BarycentricCorrectionAlg.DEC], + epoch=bc_config[BarycentricCorrectionAlg.EPOCH], + pmra=bc_config[BarycentricCorrectionAlg.PMRA], + pmdec=bc_config[BarycentricCorrectionAlg.PMDEC], + px=bc_config[BarycentricCorrectionAlg.PX], + lat=bc_config[BarycentricCorrectionAlg.LAT], + longi=bc_config[BarycentricCorrectionAlg.LON], + alt=bc_config[BarycentricCorrectionAlg.ALT], + rv=bc_config[BarycentricCorrectionAlg.RV]) + + return V_BJD[0], warn, stat \ No newline at end of file diff --git a/modules/radial_velocity/src/alg.py b/modules/radial_velocity/src/alg.py index 916371121..5b687f6cb 100644 --- a/modules/radial_velocity/src/alg.py +++ b/modules/radial_velocity/src/alg.py @@ -161,6 +161,7 @@ def __init__(self, spectrum_data, header, init_rv, wave_cal=None, config=None, l self.orderlet_mask_line = None self.vel_span_pixel = vel_span_pixel if vel_span_pixel is not None else \ RadialVelocityAlg.comp_velocity_span_pixel(init_data, config, self.spectro) + self.obs_bjd = None @staticmethod def comp_velocity_span_pixel(init_data, config, instrument): @@ -348,6 +349,7 @@ def get_obs_time(self, default=None, seg=0): if self.obs_jd is None: self.obs_jd = np.zeros(self.get_total_segments(), dtype=float) + self.obs_bjd = np.zeros(self.get_total_segments(), dtype=float) if self.spectro == 'neid': self.obs_jd[:] = self.get_obs_time_neid(default=default) @@ -355,11 +357,12 @@ def get_obs_time(self, default=None, seg=0): self.obs_jd[:]= self.get_obs_time_harps(default=default) elif self.spectro == 'kpf': for s in range(self.get_total_segments()): - self.obs_jd[s] = self.get_obs_time_kpf(default=default, seg=s) + self.obs_jd[s], self.obs_bjd[s] = self.get_obs_time_kpf(default=default, seg=s) if seg < 0: if self.spectro == 'kpf': - return self.get_obs_time_kpf(seg=-1) + obs_bj, _ = self.get_obs_time_kpf(seg=-1) + return obs_bj else: seg = 0 @@ -389,15 +392,17 @@ def get_obs_time_kpf(self, default=None, seg=0): else: ord_idx = 0 + obs_bjd = None if seg >= 0 and self.bary_corr_table is not None and not self.bary_corr_table.empty and \ np.shape(self.bary_corr_table.values)[0] > ord_idx: - obs_time = np.array(self.bary_corr_table['PHOTON_BJD'])[ord_idx+self.start_bary_index] + obs_time = np.array(self.bary_corr_table['PHOTON_JD'])[ord_idx+self.start_bary_index] + obs_bjd = np.array(self.bary_corr_table['PHOTON_BJD'])[ord_idx+self.start_bary_index] elif 'MJD-OBS' in self.header and 'ELAPSED' in self.header: obs_time = self.header['MJD-OBS'] + 2400000.5 else: obs_time = default - return obs_time + return obs_time, obs_bjd # get redshift for kpf on specific segment def get_redshift_kpf(self, seg=0, default=None): @@ -1135,6 +1140,14 @@ def f_decimal(num): else: results.attrs['CCFJDSUM'] = self.get_obs_time(seg=-1) # exposure time from the header results.attrs['CCFJDSEG'] = self.obs_jd # an array for all segments + + obs_bjd = np.zeros_like(self.obs_jd) if self.obs_bjd is None else self.obs_bjd + if obs_bjd[0] == 0.0: + bc_config = self.configure() + for i in range(obs_bjd.size): + obs_bjd[i], _, _ = BarycentricCorrectionAlg.get_bjd(bc_config, self.obs_jd[i]) + + results.attrs['CCFBJDSEG'] = obs_bjd results.attrs['CCF-RVC'] = (f_decimal(rv_result), 'BaryC RV (km/s)') # rv based on CCF summation results.attrs['CCFSTART'] = self.init_data[RadialVelocityAlgInit.VELOCITY_LOOP][0] results.attrs['CCFSTEP'] = self.rv_config[RadialVelocityAlgInit.STEP] @@ -1154,6 +1167,18 @@ def f_decimal(num): return results + def configure(self): + bc_config = dict() + bc_keys = [RadialVelocityAlgInit.RA, RadialVelocityAlgInit.DEC, + RadialVelocityAlgInit.PMRA, RadialVelocityAlgInit.PMDEC, + RadialVelocityAlgInit.EPOCH, RadialVelocityAlgInit.PARALLAX, + RadialVelocityAlgInit.STARNAME, RadialVelocityAlgInit.STAR_RV, + RadialVelocityAlgInit.OBSLAT, RadialVelocityAlgInit.OBSLON, RadialVelocityAlgInit.OBSALT] + for k in bc_keys: + bc_config[k] = self.init_data[RadialVelocityAlgInit.RV_CONFIG][k] + bc_config[RadialVelocityAlgInit.SPEC] = self.spectro + return bc_config + def is_none_fiberobject(self, fiberobj_key): """ Check if fiber object is None diff --git a/modules/radial_velocity/src/midpoint_photon_arrival.py b/modules/radial_velocity/src/midpoint_photon_arrival.py index b9cdadea5..795ad3d92 100644 --- a/modules/radial_velocity/src/midpoint_photon_arrival.py +++ b/modules/radial_velocity/src/midpoint_photon_arrival.py @@ -288,7 +288,7 @@ def midPoint(self,start,finish,exposures,expBeg,expEnd,clip=True): It is essential that exposures, expBeg, and expEnd all be arrays of the same length. """ - + if np.min(exposures)<0: ### Make sure all the expsosures are positive exposures=exposures-np.min(exposures)+1 @@ -330,7 +330,7 @@ def midPoint(self,start,finish,exposures,expBeg,expEnd,clip=True): indxHigh=np.min(np.where(dummyVarEnd>=midVal)) indxLow=np.max(np.where(dummyVarBeg<=midVal)) - + ####Use the rate to estimate the time the middle number of photons was recieved if indxLow==indxHigh: rate=exposures[indxLow]/(expEnd[indxLow]-expBeg[indxLow]).astype(float) @@ -392,5 +392,4 @@ def orderedMidPoint(self): self.orderMid=df_EM_bin.columns.astype(float) self.midPhoto=midBand - return (self.orderMid,self.midPhoto) - + return (self.orderMid,self.midPhoto) diff --git a/modules/radial_velocity/src/radial_velocity.py b/modules/radial_velocity/src/radial_velocity.py index 8cbe8ece9..23fbbdb47 100644 --- a/modules/radial_velocity/src/radial_velocity.py +++ b/modules/radial_velocity/src/radial_velocity.py @@ -180,6 +180,7 @@ class RadialVelocity(KPF1_Primitive): RV_COL_CAL_ERR = 'CAL error' RV_COL_SKY_ERR = 'SKY error' RV_COL_CCFJD = 'CCFJD' + RV_COL_CCFBJD = 'CCFBJD' RV_COL_BARY = 'Bary_RVC' RV_COL_SOURCE = 'source' RV_COL_CAL = 'CAL RV' @@ -189,7 +190,7 @@ class RadialVelocity(KPF1_Primitive): RV_WEIGHTS = 'CCF Weights' rv_col_names = [RV_COL_ORDERLET, RV_COL_START_W, RV_COL_END_W, RV_COL_SEG_NO, RV_COL_ORD_NO, RV_COL_RV, RV_COL_RV_ERR, RV_COL_CAL, RV_COL_CAL_ERR, RV_COL_SKY, RV_COL_SKY_ERR, - RV_COL_CCFJD, RV_COL_BARY, + RV_COL_CCFBJD, RV_COL_BARY, RV_COL_SOURCE, RV_COL_CAL_SOURCE, RV_COL_SKY_SOURCE, RV_WEIGHTS] rv_col_on_orderlet = [RV_COL_ORDERLET, RV_COL_SOURCE] @@ -565,7 +566,8 @@ def construct_level2_data(self, output_df): self.output_level2.header[self.rv_ext][ccd_p+'rv'] = new_rv_table.attrs[ccd_+'rv'] self.output_level2.header[self.rv_ext][ccd_p+'erv'] = new_rv_table.attrs[ccd_+'erv'] - self.output_level2.header[self.rv_ext][ccd_p+'jd'] = new_rv_table.attrs[ccd_+'jd'] + # self.output_level2.header[self.rv_ext][ccd_p+'jd'] = new_rv_table.attrs[ccd_+'jd'] + self.output_level2.header[self.rv_ext][ccd_p + 'bjd'] = new_rv_table.attrs[ccd_ + 'bjd'] self.output_level2.header[self.rv_ext]['rv'+str(self.rv_set_idx+1)+'corr'] = 'T' \ if new_rv_table.attrs['do_rv_corr'] else 'F' @@ -698,6 +700,7 @@ def f_decimal(num): if jd is None: jd = output_df[od_name].attrs['CCFJDSEG'] + bjd = output_df[od_name].attrs['CCFBJDSEG'] if bary is None: bary = output_df[od_name].attrs['BARY'] if starrv is None: @@ -762,7 +765,8 @@ def f_decimal(num): rv_table[self.RV_COL_END_W] = segment_table[s_seg:e_seg, RadialVelocityAlg.SEGMENT_W2] rv_table[self.RV_COL_SEG_NO] = segment_table[s_seg:e_seg, RadialVelocityAlg.SEGMENT_IDX].astype(int) rv_table[self.RV_COL_ORD_NO] = segment_table[s_seg:e_seg, RadialVelocityAlg.SEGMENT_ORD].astype(int) - rv_table[self.RV_COL_CCFJD] = np.ones(total_segment) * jd + # rv_table[self.RV_COL_CCFJD] = np.ones(total_segment) * jd + rv_table[self.RV_COL_CCFBJD] = np.ones(total_segment) * bjd rv_table[self.RV_COL_BARY] = np.ones(total_segment) * bary rv_table[self.RV_WEIGHTS] = np.ones(total_segment) if sci_ccf_ratio is None else sci_ccf_ratio[:, -1][0:total_segment] @@ -796,7 +800,8 @@ def f_decimal(num): results.attrs['ccd_rv'] = (f_decimal(final_rv - cal_rv), unit) \ if do_corr and final_rv != 0.0 else (f_decimal(final_rv), unit) results.attrs['ccd_erv'] = f_decimal(final_rv_err) - results.attrs['ccd_jd'] = np.mean(jd) + # results.attrs['ccd_jd'] = np.mean(jd) + results.attrs['ccd_bjd'] = np.mean(bjd) results.attrs['star_rv'] = starrv results.attrs['do_rv_corr'] = do_corr diff --git a/modules/radial_velocity/src/radial_velocity_reweighting.py b/modules/radial_velocity/src/radial_velocity_reweighting.py index fff3766a3..988618f09 100644 --- a/modules/radial_velocity/src/radial_velocity_reweighting.py +++ b/modules/radial_velocity/src/radial_velocity_reweighting.py @@ -129,7 +129,8 @@ class RadialVelocityReweighting(KPF2_Primitive): RV_COL_RV_ERR = RV.RV_COL_RV_ERR RV_COL_CAL_ERR = RV.RV_COL_CAL_ERR RV_COL_SKY_ERR = RV.RV_COL_SKY_ERR - RV_COL_CCFJD = RV.RV_COL_CCFJD + #RV_COL_CCFJD = RV.RV_COL_CCFJD + RV_COL_CCFBJD = RV.RV_COL_CCFBJD RV_COL_BARY = RV.RV_COL_BARY RV_COL_SOURCE = RV.RV_COL_SOURCE RV_COL_CAL = RV.RV_COL_CAL diff --git a/modules/spectral_extraction/src/alg_bary_corr.py b/modules/spectral_extraction/src/alg_bary_corr.py index f16e62921..6d4790116 100644 --- a/modules/spectral_extraction/src/alg_bary_corr.py +++ b/modules/spectral_extraction/src/alg_bary_corr.py @@ -224,7 +224,7 @@ def get_rv_col(self, is_single_time=False): def configure(self): """ - Extract neccesary information from the header and config file. + Extract necessary information from the header and config file. """ bc_config = dict() header_keys = [RadialVelocityAlgInit.RA, RadialVelocityAlgInit.DEC, @@ -318,31 +318,7 @@ def build_bary_corr_table(self): else: self.bary_corr_table[BaryCorrTableAlg.BC_col1][self.start_bary_index:self.end_bary_index+1] = mid_utc self.bary_corr_table[BaryCorrTableAlg.BC_col2][self.start_bary_index:self.end_bary_index+1] = Time(mid_utc).jd - - if RadialVelocityAlgInit.STARNAME.lower() == 'sun': - GEOMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_HJDTDB(Time(mid_utc).jd, - ra=None, - dec=None, - epoch=None, - pmra=None, - pmdec=None, - px=None, - lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], - longi=self.bc_config[RadialVelocityAlgInit.OBSLON], - alt=self.bc_config[RadialVelocityAlgInit.OBSALT], - rv=None) - else: - GEOMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(Time(mid_utc).jd, - ra=self.bc_config[RadialVelocityAlgInit.RA], - dec=self.bc_config[RadialVelocityAlgInit.DEC], - epoch=self.bc_config[RadialVelocityAlgInit.EPOCH], - pmra=self.bc_config[RadialVelocityAlgInit.PMRA], - pmdec=self.bc_config[RadialVelocityAlgInit.PMDEC], - px=self.bc_config[RadialVelocityAlgInit.PARALLAX], - lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], - longi=self.bc_config[RadialVelocityAlgInit.OBSLON], - alt=self.bc_config[RadialVelocityAlgInit.OBSALT], - rv=self.bc_config[RadialVelocityAlgInit.STAR_RV]) + GEOMID_BJD, warn, stat = BarycentricCorrectionAlg.get_bjd(self.bc_config, Time(mid_utc).jd) self.bary_corr_table[BaryCorrTableAlg.BC_col4][self.start_bary_index:self.end_bary_index+1] = GEOMID_BJD df_em = self.get_expmeter_science() @@ -368,33 +344,11 @@ def build_bary_corr_table(self): else: # using observation time for midpoint arrival time if no df_em exists midphoton = self.bary_corr_table[BaryCorrTableAlg.BC_col1][self.start_bary_index:self.end_bary_index+1] is_single_mid = True + for i in range(midphoton.size): self.bary_corr_table[BaryCorrTableAlg.BC_col3][i+self.start_bary_index] = Time(midphoton[i]).jd - if RadialVelocityAlgInit.STARNAME.lower() == 'sun': - PHOTMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_HJDTDB(Time(midphoton[i]).jd, - ra=None, - dec=None, - epoch=None, - pmra=None, - pmdec=None, - px=None, - lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], - longi=self.bc_config[RadialVelocityAlgInit.OBSLON], - alt=self.bc_config[RadialVelocityAlgInit.OBSALT], - rv=None) - else: - PHOTMID_BJD, warn, stat = barycorrpy.utc_tdb.JDUTC_to_BJDTDB(Time(midphoton[i]).jd, - ra=self.bc_config[RadialVelocityAlgInit.RA], - dec=self.bc_config[RadialVelocityAlgInit.DEC], - epoch=self.bc_config[RadialVelocityAlgInit.EPOCH], - pmra=self.bc_config[RadialVelocityAlgInit.PMRA], - pmdec=self.bc_config[RadialVelocityAlgInit.PMDEC], - px=self.bc_config[RadialVelocityAlgInit.PARALLAX], - lat=self.bc_config[RadialVelocityAlgInit.OBSLAT], - longi=self.bc_config[RadialVelocityAlgInit.OBSLON], - alt=self.bc_config[RadialVelocityAlgInit.OBSALT], - rv=self.bc_config[RadialVelocityAlgInit.STAR_RV]) - + PHOTMID_BJD, warn, stat = BarycentricCorrectionAlg.get_bjd(self.bc_config, Time(midphoton[i]).jd) + self.bary_corr_table[BaryCorrTableAlg.BC_col5][i+self.start_bary_index] = PHOTMID_BJD if self.is_bc_calculated(): @@ -402,5 +356,5 @@ def build_bary_corr_table(self): bc_df_res = pd.DataFrame(self.bary_corr_table) bc_df_res.attrs['BCV_UNIT'] = 'm/sec' - - return bc_df_res + + return bc_df_res \ No newline at end of file From 425a43f4b93e7d54f31d03cf04ce3c95690c890c Mon Sep 17 00:00:00 2001 From: Russ Date: Tue, 5 Sep 2023 08:12:07 -0700 Subject: [PATCH 30/86] Added new modules/quality_control. --- modules/quality_control/__init__.py | 0 modules/quality_control/configs/default.cfg | 12 + modules/quality_control/src/__init__.py | 0 .../quality_control/src/quality_control.py | 453 ++++++++++++++++++ .../src/quality_control_framework.py | 120 +++++ 5 files changed, 585 insertions(+) create mode 100644 modules/quality_control/__init__.py create mode 100644 modules/quality_control/configs/default.cfg create mode 100644 modules/quality_control/src/__init__.py create mode 100644 modules/quality_control/src/quality_control.py create mode 100644 modules/quality_control/src/quality_control_framework.py diff --git a/modules/quality_control/__init__.py b/modules/quality_control/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/quality_control/configs/default.cfg b/modules/quality_control/configs/default.cfg new file mode 100644 index 000000000..5ab21904d --- /dev/null +++ b/modules/quality_control/configs/default.cfg @@ -0,0 +1,12 @@ +## Default configuration for QualityControlFramework primitive +[LOGGER] +start_log = True +log_path = logs/quality_control_framework_debug.log +log_level = debug +log_verbose = True + + +## Module related parameters +[PARAM] +debug_level = 0 + diff --git a/modules/quality_control/src/__init__.py b/modules/quality_control/src/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/quality_control/src/quality_control.py b/modules/quality_control/src/quality_control.py new file mode 100644 index 000000000..a0de73ce7 --- /dev/null +++ b/modules/quality_control/src/quality_control.py @@ -0,0 +1,453 @@ +import numpy as np +import numpy.ma as ma + +""" +This module contains classes for KPF data quality control (QC). Various QC metrics are defined in +class QCDefinitions. Other classes QCL0, QCL1, and QCL0 contain methods to compute QC values, +which are with the QC metrics, for specific data products, and then store them in the primary header +of the corresponding FITS file. Normally QC values are stored first in the FITS header, but storage +in the KPF pipeline-operations database may be set up later by the database administrator, depending +upon the special requirements for some QC metrics. +""" + +iam = 'quality_control' +version = '1.2' + +""" +The following are methods common across data levels, which are given at the beginning +of this module, before the QC classes are defined. + +Includes helper functions that compute statistics of data of arbitrary shape. +""" + +##################################### +# Module helper functions. +##################################### + +# +# Print software version. +# +def what_am_i(): + print('Software version:',iam + ' ' + version) + +# +# Statistics with outlier rejection (n-sigma data-trimming), ignoring NaNs, across all data array dimensions. +# +def avg_data_with_clipping(data_array,n_sigma = 3.0): + + a = np.array(data_array) + + med = np.nanmedian(a) + p16 = np.nanpercentile(a,16) + p84 = np.nanpercentile(a,84) + sigma = 0.5 * (p84 - p16) + mdmsg = med - n_sigma * sigma + b = np.less(a,mdmsg) + mdpsg = med + n_sigma * sigma + c = np.greater(a,mdpsg) + d = np.where(np.isnan(a),True,False) + mask = b | c | d + mx = ma.masked_array(a, mask) + avg = ma.getdata(mx.mean()) + std = ma.getdata(mx.std()) + cnt = ma.getdata(mx.count()) + + return avg,std,cnt + +# +# Check whether a file is not junk (i.e., = False if junk) +# +def not_junk_check(kpfobs, junk_ObsIDs_csv='/code/KPF-Pipeline/Junk_Observations_for_KPF.csv', debug=False): + """ + This Quality Control function checks if the input (possibly an array) is in the list of junked files. + + Args: + kpfobs - possible formats: 1. a single ObsID (string) (e.g. 'KP.20230621.27498.77') + 2. a list of ObsIDs (e.g., ['KP.20230621.27611.73', 'KP.20230621.27498.77]) + 3. a single KPF L0/2D/L1/L2 object + 4. a list of KPF L0/2D/L1/L2 objects + junk_ObsIDs_csv - a CSV with ObsIDs (e.g., 'KP.20230621.27498.77') in the first column + and a column header of 'observation_id'. + That is, the first few lines of the file will look like this: + observation_id + KP.20230621.27498.77 + KP.20230621.27611.73 + KP.20220516.57354.11 + + debug - an optional flag. If True, verbose output will be printed. + + Returns: + QC_pass - a boolean signifying that the input(s) are not junk (i.e., = False if junk) + """ + + import os + import numpy as np + import pandas as pd + + QC_pass = True # Assume not junk unless explicitly listed in junk_ObsIDs_csv + + # read list of junk files + if os.path.exists(junk_ObsIDs_csv): + df_junk = pd.read_csv(junk_ObsIDs_csv) + if debug: + print(f'Read the junk file {junk_ObsIDs_csv}.') + else: + print(f"The file {junk_ObsIDs_csv} does not exist.") + return QC_pass + + # initialize variables + if not type(kpfobs) is list: # convert input to list if necessary + kpfobs = [kpfobs] + input_not_list = True # note for later that the input was not a list + else: + input_not_list = False + + # convert inputs to ObsIDs (strings), if needed + for i in range(len(kpfobs)): + if not (type(kpfobs[i]) is str): + kpfobs[i] = (kpfobs[i].filename).replace('.fits', '') # this line assumes that kpfobs[i] is a L0/2D/L1/L2 object + kpfobs[i] = kpfobs[i].replace('_2D', '') # drop _2D suffix, if needed + kpfobs[i] = kpfobs[i].replace('_L1', '') # drop _L1 suffix, if needed + kpfobs[i] = kpfobs[i].replace('_L2', '') # drop _L2 suffix, if needed + + # loop through inputs and determine junk status of each + QC_pass = np.ones(len(kpfobs), dtype=bool) # starting QC values + for i, obs in enumerate(kpfobs): + QC_pass[i] = not (df_junk['observation_id'].isin([obs])).any() + if debug: + print(f'{obs} is a Junk file: ' + str(not QC_pass[i])) + + # remove list format if input was a single element + if input_not_list: + QC_pass = QC_pass[0] + + return QC_pass + + +##################################################################### + +class QCDefinitions: + + """ + Description: + This class defines QC metrics in a standard format. + Dictionaries are used to associate unique metric names with various metric metadata. + Modify this class to add new metrics. Do not remove any metrics (we deprecate metrics + simply by not using them any more). When adding metrics to this class, ensure the length + of the names list is equal to the number of dictionary entries. + + Class Attributes: + names (list of strings): Each element is a unique and descriptive name for the metric. No spaces allowed. + descriptions (dictionary of strings): Each dictionary entry specifies a short description of the metric + Try to keep it under 50 characters for brevity (this is not enforced but recommended). + data_types (dictionary of strings): Each entry specifies the Python data type of the metric. + Only string, int, float are allowed. Use 0/1 for boolean. + fits_keywords (dictionary of strings): Each entry specifies the FITS-header keyword for the metric. + Must be 8 characters or less, following the FITS standard. + fits_comments (dictionary of strings): Each entry specifies the FITS-header comment for the metric. + Must be a short string for brevity (say, under 35 characters), following the FITS standard. + db_columns (dictionary of strings): Each entry specifies either database_table.column if applicable, + or None if not. + methods (dictionary of lists): Each entry specifies a list of methods that apply to the metric. + """ + + def __init__(self): + + self.names = [] + self.descriptions = {} + self.data_types = {} + self.fits_keywords = {} + self.fits_comments = {} + self.db_columns = {} + self.methods = {} + + # Define the QC metrics here. + + name1 = 'jarque_bera_test_red_amp1' + self.names.append(name1) + self.descriptions[name1] = 'Jarque-Bera test of pixel values for RED AMP-1 detector.' + self.data_types[name1] = 'float' + self.fits_keywords[name1] = 'JBTRED1' + self.fits_comments[name1] = 'J-B test for RED AMP-1 detector' + self.db_columns[name1] = None + self.methods[name1] = ["add_qc_keyword_to_header"] + + name2 = 'monotonic_wavelength_solution_check' + self.names.append(name2) + self.descriptions[name2] = 'Check if wavelength solution is monotonic.' + self.data_types[name2] = 'int' + self.fits_keywords[name2] = 'MONOTWLS' + self.fits_comments[name2] = 'Monotonic wavelength-solution check' + self.db_columns[name2] = None + self.methods[name2] = ["add_qc_keyword_to_header","monotonic_check","add_qc_keyword_to_header_for_monotonic_wls"] + + name3 = 'not_junk_data_check' + self.names.append(name3) + self.descriptions[name3] = 'Check if data in file are not junk.' + self.data_types[name3] = 'int' + self.fits_keywords[name3] = 'JUNKDATA' + self.fits_comments[name3] = 'Not-junk check' + self.db_columns[name3] = None + self.methods[name3] = ["add_qc_keyword_to_header"] + + # Integrity checks. + + if len(self.names) != len(self.descriptions): + raise ValueError("Length of names list does not equal number of entries in descriptions dictionary.") + + if len(self.names) != len(self.data_types): + raise ValueError("Length of names list does not equal number of entries in data_types dictionary.") + + if len(self.names) != len(self.fits_keywords): + raise ValueError("Length of names list does not equal number of entries in fits_keywords dictionary.") + + if len(self.names) != len(self.db_columns): + raise ValueError("Length of names list does not equal number of entries in db_columns dictionary.") + + keys_list = self.data_types.keys() + for key in keys_list: + dt = self.data_types[key] + if dt not in ['string','int','float']: + err_str = "Error in data type: " + dt + raise ValueError(err_str) + + + def list_qc_metrics(self): + + print("name | data_type | keyword | comment | methods | db_column | description |") + + qc_names = self.names + + for qc_name in qc_names: + + data_type = self.data_types[qc_name] + keyword = self.fits_keywords[qc_name] + comment = self.fits_comments[qc_name] + methods = self.methods[qc_name] + db_column = self.db_columns[qc_name] + description = self.descriptions[qc_name] + + print(qc_name,"|",data_type,"|",keyword,"|",comment,"|",methods,"|",db_column,"|",description,"|") + + + +##################################################################### +# +# Superclass QC is normally not to be called directly (although it is not an abstract class, per se). +# + +class QC: + + """ + Description: + This superclass defines QC functions in general and has common methods across + subclasses QCL0, QC2D, QCL1, and QCL2. + + Class Attributes: + fits_object (astropy.io object): Returned from function KPF0.from_fits(fits_filename,data_type), + which is wrapped by function read_fits in this module. + qcdefinitions (QCDefinitions object): Returned from constructor of QCDefinitions class. + + """ + + def __init__(self,fits_object): + self.fits_object = fits_object + self.qcdefinitions = QCDefinitions() + + def add_qc_keyword_to_header(self,qc_name,value): + + keyword = self.qcdefinitions.fits_keywords[qc_name] + comment = self.qcdefinitions.fits_comments[qc_name] + + self.fits_object.header['PRIMARY'][keyword] = (value,comment) + + + +##################################################################### + +class QCL0(QC): + + """ + Description: + This class inherits QC superclass and defines QC functions for L0 files. + Since the policy is to not modify an L0 FITS file in the archive location + /data/kpf/L0/yyyymmdd, the class operates on the FITS object that will + elevate to a higher data level. The QC info is inherited via the FITS header + and will be prograted downstream in the data-reduction pipeline, and will + eventually be written to an output FITS file. + + Class Attributes: + data_type (string): Data type in terms of project (e.g., KPF). + fits_filename (string): Input FITS filename (include absolute path). + fits_object (astropy.io object): Returned from function KPF0.from_fits(fits_filename,data_type), + which is wrapped by function read_fits in this module. + qcdefinitions (QCDefinitions object): Returned from constructor of QCDefinitions class. + + Example python code to illustrate usage of this module in calling program: + + import modules.Utils.quality_control as qc + + qc.what_am_i() + + in_file = '/code/KPF-Pipeline/KP.20230828.40579.55.fits' + out_file = '/code/KPF-Pipeline/junk.fits' + + + fits_object = from_fits('KPF',in_file) + qcl0 = qc.QCL0(fits_object) + name = 'jarque_bera_test_red_amp1' + value = 3.14159256 + qcl0.add_qc_keyword_to_header(name,value) + to_fits(qcl0.fits_object,out_file) + """ + + # Call superclass. + + def __init__(self,fits_object): + super().__init__(fits_object) + + + +##################################################################### + +class QC2D(QC): + + """ + Description: + This class inherits QC superclass and defines QC functions for 2D files. + + Class Attributes: + fits_object (astropy.io object): Returned from function KPF0.from_fits(fits_filename,data_type), + which is wrapped by function read_fits in this module. + qcdefinitions (QCDefinitions object): Returned from constructor of QCDefinitions class. + """ + + # Call superclass. + + def __init__(self,fits_object): + super().__init__(fits_object) + + + +##################################################################### + +class QCL1(QC): + + """ + Description: + This class inherits QC superclass and defines QC functions for L1 files. + + Class Attributes: + fits_object (astropy.io object): Returned from function KPF0.from_fits(fits_filename,data_type), + which is wrapped by function read_fits in this module. + qcdefinitions (QCDefinitions object): Returned from constructor of QCDefinitions class. + + Example python code to illustrate usage of this module in calling program: + + import modules.Utils.quality_control as qc + + qc.what_am_i() + + l1_file = '/code/KPF-Pipeline/kpf_20230814_master_arclamp_autocal-une-sky-eve_L1.fits' + + fits_object = from_fits('KPF',l1_file) + qcl1 = qc.QCL1(fits_object) + qcl1.add_qc_keyword_to_header_for_monotonic_wls() + to_fits(qcl1.fits_object,l1_file) + """ + + # Call superclass. + + def __init__(self,fits_object): + super().__init__(fits_object) + + + def add_qc_keyword_to_header_for_monotonic_wls(self,qc_name): + + keyword = self.qcdefinitions.fits_keywords[qc_name] + comment = self.qcdefinitions.fits_comments[qc_name] + qc_pass = self.monotonic_check(self.fits_filename) + + if qc_pass: + value = 1 + else: + value = 0 + + self.fits_object.header['PRIMARY'][keyword] = (value,comment) + + def monotonic_check(self,L1,debug=False): + """ + This Quality Control function checks to see if a wavelength solution is + monotonic, specifically if wavelength decreases (or stays constant) with + increasing array index. + + Args: + L1 - an L1 file that the QC check is to be run on + debug - an optional flag. If True, nonmonotonic orders/orderlets will be noted with + print statements and plots. + + Returns: + QC_pass - a boolean signifying that every order/orderlet is monotonic (or not) + bad_orders - an array of strings listing the nonmonotonic orders and orderlets + """ + + #self.L1 = L1 + QC_pass = True + bad_orders = [] + + import numpy as np + if debug: + import matplotlib.pyplot as plt + + # Define wavelength extensions in L1 + extensions = [p + s for p in ["GREEN_", "RED_"] + for s in ["SCI_WAVE1", "SCI_WAVE2", "SCI_WAVE3", "SKY_WAVE", "CAL_WAVE"]] + + # Iterate over extensions (orderlets) and orders to check for monotonicity in each combination. + for ext in extensions: + if ext in L1: # Check if extension exists (e.g., if RED isn't processed) + norders = L1[ext].shape[0] + for o in range(norders): + WLS = L1[ext].data[o,:] # wavelength solution of the current order/orderlet + isMonotonic = np.all(WLS[:-1] >= WLS[1:]) # this expression determines monotonicity for the orderlet/order + if not isMonotonic: + QC_pass = False # the QC test fails if one order/orderlet is not monotonic + bad_orders.append(ext + '(' + str(o)+')') # append the bad order/orderlet to the list + if debug: + print('L1[' + ext + ']['+ str(o) +']: monotonic = ' + str(isMonotonic)) + plt.plot(WLS) + plt.title('L1[' + ext + '] (order = '+ str(o) +') -- not monotonic') + plt.show() + if debug: + try: # using a try/except statement because sometimes OFNAME isn't defined + print("File: " + L1['PRIMARY'].header['OFNAME']) + except: + pass + print("Monotonic = " + str(QC_pass)) + + return QC_pass, bad_orders + + + + +##################################################################### + +class QCL2(QC): + + """ + Description: + This class inherits QC superclass and defines QC functions for L2 files. + + Class Attributes: + fits_object (astropy.io object): Returned from function KPF0.from_fits(fits_filename,data_type), + which is wrapped by function read_fits in this module. + qcdefinitions (QCDefinitions object): Returned from constructor of QCDefinitions class. + + """ + + # Call superclass. + + def __init__(self,fits_object): + super().__init__(fits_object) + + diff --git a/modules/quality_control/src/quality_control_framework.py b/modules/quality_control/src/quality_control_framework.py new file mode 100644 index 000000000..80af91de4 --- /dev/null +++ b/modules/quality_control/src/quality_control_framework.py @@ -0,0 +1,120 @@ +import ast +import configparser as cp +import modules.quality_control.src.quality_control as qc + +# Pipeline dependencies +from kpfpipe.logger import * +from kpfpipe.primitives.level0 import KPF0_Primitive +from keckdrpframework.models.arguments import Arguments + +# Global read-only variables +DEFAULT_CFG_PATH = 'modules/quality_control/configs/default.cfg' + +class QualityControlFramework(KPF0_Primitive): + + """ + Description: + Performs quality control on a FITS file. Includes logic for automatically determining the data level. + + Arguments: + data_type (str): Type of data (e.g., KPF). + data_level_str (str): L0, 2D, L1, L2 are possible choices. + fits_filename (str): Input FITS filename. + fits_object (str): + actual_dir (str): Prefix of actual directory outside container that maps to /data (e.g., /data/kpf) + + + """ + + def __init__(self, action, context): + + KPF0_Primitive.__init__(self, action, context) + + self.data_type = self.action.args[0] + self.data_level_str = self.action.args[1] + self.fits_filename = self.action.args[2] + self.fits_object = self.action.args[3] + self.qc_list_flag = self.action.args[4] + + try: + self.module_config_path = context.config_path['quality_control'] + print("--->",self.__class__.__name__,": self.module_config_path =",self.module_config_path) + except: + self.module_config_path = DEFAULT_CFG_PATH + + print("{} class: self.module_config_path = {}".format(self.__class__.__name__,self.module_config_path)) + + print("Starting logger...") + self.logger = start_logger(self.__class__.__name__, self.module_config_path) + + if self.logger is not None: + print("--->self.logger is not None...") + else: + print("--->self.logger is None...") + + self.logger.info('Started {}'.format(self.__class__.__name__)) + self.logger.debug('module_config_path = {}'.format(self.module_config_path)) + + self.logger.info('self.data_type = {}'.format(self.data_type)) + self.logger.info('self.fits_filename = {}'.format(self.fits_filename)) + + module_config_obj = cp.ConfigParser() + res = module_config_obj.read(self.module_config_path) + if res == []: + raise IOError('failed to read {}'.format(self.module_config_path)) + + module_param_cfg = module_config_obj['PARAM'] + + debug_level_cfg_str = module_param_cfg.get('debug_level') + self.debug_level_cfg = ast.literal_eval(debug_level_cfg_str) + + self.logger.info('self.debug_level_cfg = {}'.format(self.debug_level_cfg)) + + self.logger.info('Type of self.debug_level_cfg = {}'.format(type(self.debug_level_cfg))) + + + def _perform(self): + + """ + Returns exitcode: + 0 = Normal + """ + + quality_control_exit_code = 0 + + + # Perform quality control. + + self.logger.info('Performing quality control on {}'.format(self.fits_filename)) + + if 'L0' in self.data_level_str: + qc_obj = qc.QCL0(self.fits_object) + name = 'jarque_bera_test_red_amp1' + value = 3.14159256 + qc_obj.add_qc_keyword_to_header(name,value) + elif '2D' in self.data_level_str: + qc_obj = qc.QC2D(self.fits_object) + name = 'jarque_bera_test_red_amp1' + value = 3.14159256 + qc_obj.add_qc_keyword_to_header(name,value) + elif 'L1' in self.data_level_str: + qc_obj = qc.QCL1(self.fits_object) + name = 'jarque_bera_test_red_amp1' + value = 3.14159256 + qc_obj.add_qc_keyword_to_header(name,value) + elif 'L2' in self.data_level_str: + qc_obj = qc.QCL2(self.fits_object) + name = 'jarque_bera_test_red_amp1' + value = 3.14159256 + qc_obj.add_qc_keyword_to_header(name,value) + + # Optionally list QC metrics. + + if self.qc_list_flag == 1: + qc_obj.qcdefinitions.list_qc_metrics() + + # Finish. + + self.logger.info('Finished {}'.format(self.__class__.__name__)) + + return Arguments([quality_control_exit_code, self.fits_object]) From b2948f08b07655c5ba497591f8f962980b85f75c Mon Sep 17 00:00:00 2001 From: Russ Date: Tue, 5 Sep 2023 08:12:19 -0700 Subject: [PATCH 31/86] Added example recipe and config for new modules/quality_control. --- configs/quality_control_example.cfg | 20 +++++++++++++++++ recipes/quality_control_example.recipe | 30 ++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 configs/quality_control_example.cfg create mode 100644 recipes/quality_control_example.recipe diff --git a/configs/quality_control_example.cfg b/configs/quality_control_example.cfg new file mode 100644 index 000000000..151091686 --- /dev/null +++ b/configs/quality_control_example.cfg @@ -0,0 +1,20 @@ +# Pipeline logger configurations +[LOGGER] +start_log = True +log_path = pipeline.log +log_level = info +log_verbose = True +log_directory = /data/logs/ + +[ARGUMENT] +data_type = KPF +# L0, 2D, L1, L2 are possible choices for data_level_str. +data_level_str = L0 +input_fits_filename = /data/L0/20230430/KP.20230430.05981.93.fits +output_fits_filename = /data/2D/20230430/KP.20230430.05981.93_2D.fits + +[MODULE_CONFIGS] +quality_control = modules/quality_control/configs/default.cfg + + + diff --git a/recipes/quality_control_example.recipe b/recipes/quality_control_example.recipe new file mode 100644 index 000000000..977464ebb --- /dev/null +++ b/recipes/quality_control_example.recipe @@ -0,0 +1,30 @@ +from modules.quality_control.src.quality_control_framework import QualityControlFramework + +data_type = config.ARGUMENT.data_type +data_level_str = config.ARGUMENT.data_level_str +input_fits_filename = config.ARGUMENT.input_fits_filename +output_fits_filename = config.ARGUMENT.output_fits_filename + +qc_list_flag = 1 + +if exists(input_fits_filename): + + if 'L0' in data_level_str: + fits_object = kpf0_from_fits(input_fits_filename, data_type = data_type) + elif '2D' in data_level_str: + fits_object = kpf0_from_fits(input_fits_filename, data_type = data_type) + elif 'L1' in data_level_str: + fits_object = kpf1_from_fits(input_fits_filename, data_type = data_type) + elif 'L2' in data_level_str: + fits_object = kpf2_from_fits(input_fits_filename, data_type = data_type) + + quality_control_return_list = QualityControlFramework(data_type, + data_level_str, + input_fits_filename, + fits_object, + qc_list_flag) + + exit_code = quality_control_return_list[0] + output_fits_object = quality_control_return_list[1] + + result = to_fits(output_fits_object, output_fits_filename) \ No newline at end of file From 53105a2ecc975122f5234b34a3af247e7e64ba03 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 12 Sep 2023 12:27:57 -0700 Subject: [PATCH 32/86] feature/order_rectification_on_stack add flat extenction to OrderRectification and SpectralExtraction to indicate the extension for flat data in master flat 2D. --- .../src/order_rectification.py | 27 +++++++++++-------- .../src/spectral_extraction.py | 11 +++++--- recipes/kpf_drp.recipe | 5 +++- 3 files changed, 27 insertions(+), 16 deletions(-) diff --git a/modules/spectral_extraction/src/order_rectification.py b/modules/spectral_extraction/src/order_rectification.py index dbfc28e3c..91ad4051b 100644 --- a/modules/spectral_extraction/src/order_rectification.py +++ b/modules/spectral_extraction/src/order_rectification.py @@ -23,6 +23,8 @@ - `action.args['rectification_method'] (str, optional)`: Rectification method, '`norect`', '`vertial`', or '`normal`', to rectify the curved order trace. Defaults to '`norect`', meaning no rectification. + - `action.args['data_extension']: (str, optional)`: the name of the extension in spectrum containing data. + - `action.args['flat_extension']: (str, optional)`: the name of the extension in flat containing data. - `action.args['clip_file'] (str, optional)`: Prefix of clip file path. Defaults to None. Clip file is used to store the polygon clip data for the rectification method which is not NoRECT. @@ -96,6 +98,7 @@ class OrderRectification(KPF0_Primitive): 'rectification_method': 'norect', # 'norect', 'normal', 'vertical', 'clip_file': None, 'data_extension': 'DATA', + 'flat_extension': 'DATA', 'trace_extension': None, 'trace_file': None, 'poly_degree': 3, @@ -131,6 +134,8 @@ def __init__(self, order_trace_ext = self.get_args_value('trace_extension', action.args, args_keys) order_trace_file = self.get_args_value('trace_file', action.args, args_keys) + self.flat_ext = self.get_args_value('flat_extension', action.args, args_keys) + # input configuration self.config = configparser.ConfigParser() try: @@ -151,11 +156,10 @@ def __init__(self, spec_header = self.input_spectrum.header[self.data_ext] \ if (self.input_spectrum is not None and hasattr(self.input_spectrum, self.data_ext)) else None - flat_data = self.input_flat[self.data_ext] \ - if self.input_flat is not None and hasattr(self.input_flat, self.data_ext) else None - flat_header = self.input_flat.header[self.data_ext] \ - if (self.input_flat is not None and hasattr(self.input_flat, self.data_ext)) else None - + flat_data = self.input_flat[self.flat_ext] \ + if self.input_flat is not None and hasattr(self.input_flat, self.flat_ext) else None + flat_header = self.input_flat.header[self.flat_ext] \ + if (self.input_flat is not None and hasattr(self.input_flat, self.flat_ext)) else None self.order_trace_data = None if order_trace_file: @@ -218,11 +222,11 @@ def _perform(self): self.logger.info("OrderRectification: the order of the spectrum is rectified already") return Arguments(self.input_spectrum) else: - if SpectralExtractionAlg.RECTIFYKEY in self.input_flat.header[self.data_ext]: + if SpectralExtractionAlg.RECTIFYKEY in self.input_flat.header[self.flat_ext]: self.logger.info("OrderRectification: the order of the flat is rectified already") return Arguments(self.input_flat) # no spectrum data case - if self.input_flat is not None and self.input_flat[self.data_ext].size == 0: + if self.input_flat is not None and self.input_flat[self.flat_ext].size == 0: if self.logger: self.logger.info("OrderRectification: no spectrum data to rectify") return Arguments(None) @@ -264,7 +268,7 @@ def _perform(self): level0_obj = self.input_spectrum if rect_on == 'spectrum' else self.input_flat - self.update_level0_data(data_df, level0_obj) + self.update_level0_data(data_df, level0_obj, rect_on) level0_obj.receipt_add_entry('OrderRectification', self.__module__, f'order trace is rectified', 'PASS') if self.logger: @@ -275,11 +279,12 @@ def _perform(self): return Arguments(level0_obj) - def update_level0_data(self, data_result, lev0_obj): + def update_level0_data(self, data_result, lev0_obj, rect_on): # img_d = np.where(np.isnan(data_result.values), 0.0, data_result.values) - lev0_obj[self.data_ext] = data_result.values + ext = self.data_ext if rect_on == 'spectrum' else self.flat_ext + lev0_obj[ext] = data_result.values for att in data_result.attrs: - lev0_obj.header[self.data_ext][att] = data_result.attrs[att] + lev0_obj.header[ext][att] = data_result.attrs[att] def get_args_value(self, key: str, args: Arguments, args_keys: list): diff --git a/modules/spectral_extraction/src/spectral_extraction.py b/modules/spectral_extraction/src/spectral_extraction.py index 8c00f3372..0314b6aee 100644 --- a/modules/spectral_extraction/src/spectral_extraction.py +++ b/modules/spectral_extraction/src/spectral_extraction.py @@ -40,7 +40,8 @@ Clip file is used to store the polygon clip data for the rectification method which is not NoRECT. - `action.args['total_order_per_ccd']: (int, optional)`: total order per ccd. Defaults to False. - - `action.args['data_extension']: (str, optional)`: the name of the extension containing data. + - `action.args['data_extension']: (str, optional)`: the name of the extension in spectrum containing data. + - `action.args['flat_extension']: (str, optional)`: the name of the extension in flat containing data. - `action.args['trace_extension']: (str, optional)`: the name of the extension containing order trace results. - `action.args['trace_file']: (str, optional)`: the name file containing order trace results. @@ -142,6 +143,7 @@ class SpectralExtraction(KPF0_Primitive): 'to_set_wavelength_cal': False, 'clip_file': None, 'data_extension': 'DATA', + 'flat_extension': 'DATA', 'poly_degree': 3, 'origin': [0, 0], 'trace_extension': None, @@ -184,6 +186,7 @@ def __init__(self, self.total_order_per_ccd = self.get_args_value('total_order_per_ccd', action.args, args_keys) data_ext = self.get_args_value('data_extension', action.args, args_keys) + flat_ext = self.get_args_value('flat_extension', action.args, args_keys) self.data_ext = data_ext order_trace_ext = self.get_args_value('trace_extension', action.args, args_keys) order_trace_file = self.get_args_value('trace_file', action.args, args_keys) @@ -237,8 +240,8 @@ def __init__(self, if self.outlier_lev0 is not None and hasattr(self.outlier_lev0, data_ext) else None try: - self.alg = SpectralExtractionAlg(self.input_flat[data_ext] if hasattr(self.input_flat, data_ext) else None, - self.input_flat.header[data_ext] if hasattr(self.input_flat, data_ext) else None, + self.alg = SpectralExtractionAlg(self.input_flat[flat_ext] if hasattr(self.input_flat, flat_ext) else None, + self.input_flat.header[flat_ext] if hasattr(self.input_flat, flat_ext) else None, self.spec_flux, self.spec_header, self.order_trace_data, @@ -342,7 +345,7 @@ def _perform(self): SpectralExtractionAlg.extracting_method[self.extraction_method] + " extraction on " + order_name + " of " + str(o_set.size) + " orders") - opt_ext_result = self.alg.extract_spectrum(order_set=o_set, first_index=first_index) + opt_ext_result = self.alg.extract_spectrum(order_set=o_set, first_index=first_index, order_name = order_name) assert('spectral_extraction_result' in opt_ext_result and isinstance(opt_ext_result['spectral_extraction_result'], pd.DataFrame)) diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index 36ebcf47e..e8eecdbcd 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -327,6 +327,7 @@ flat_stem, flat_ext = splitext(short_flat_file) trace_list = [] b_all_traces = False lev0_flat_rect = None +ccd_stack = '_STACK' if do_order_trace: trace_list = [] @@ -346,7 +347,7 @@ if do_order_trace: output_lev0_trace_csv = str_replace(input_flat_file, fits_ext, '_' + ccd + csv_ext); trace_list = trace_list + [output_lev0_trace_csv] if not exists(output_lev0_trace_csv): - order_result_data = OrderTrace(flat_data, data_extension=ccd + '_STACK', + order_result_data = OrderTrace(flat_data, data_extension=ccd+ccd_stack, result_path=output_lev0_trace_csv, is_output_file=True, data_col_range=data_col_range, data_row_range=data_row_range, fitting_poly_degree=poly_degree) @@ -375,6 +376,7 @@ if do_order_trace: lev0_flat_rect = OrderRectification(None, lev0_flat_rect, orderlet_names=orderlet_names[idx], trace_file=trace_file, data_extension=ccd, + flat_extension = ccd+ccd_stack, rectification_method=rect_method, clip_file=clip_file, origin=origin, poly_degree=poly_degree) @@ -529,6 +531,7 @@ if do_spectral_extraction or do_hk or do_bc: ccd_index = idx, rectification_method=rect_method, extraction_method=extract_method, clip_file=clip_file, data_extension=ccd, trace_file=trace_file, + flat_extension=ccd+ccd_stack, do_outlier_rejection=do_outlier, outlier_file=outlier_rejection_file) From 6f8660445d4295de61c788be2364a9e9f302b255 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Thu, 14 Sep 2023 13:10:16 -0700 Subject: [PATCH 33/86] check for existence of corr columns --- modules/radial_velocity/src/midpoint_photon_arrival.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/radial_velocity/src/midpoint_photon_arrival.py b/modules/radial_velocity/src/midpoint_photon_arrival.py index 795ad3d92..ca9698e89 100644 --- a/modules/radial_velocity/src/midpoint_photon_arrival.py +++ b/modules/radial_velocity/src/midpoint_photon_arrival.py @@ -377,8 +377,13 @@ def orderedMidPoint(self): df_EM_norm = self.df_EM[wav_str] * EM_gain /disp # define time arrays - date_beg = np.array(self.df_EM["Date-Beg-Corr"], dtype=np.datetime64) - date_end = np.array(self.df_EM["Date-End-Corr"], dtype=np.datetime64) + startkey = "Date-Beg" + endkey = "Date-End" + if "Date-Beg-Corr" in self.df_EM.columns: + startkey += "-Corr" + endkey += "-Corr" + date_beg = np.array(self.df_EM[startkey], dtype=np.datetime64) + date_end = np.array(self.df_EM[endkey], dtype=np.datetime64) # Bin wavelengths into orders df_EM_bin=self.binOrder(df_EM_norm,self.segmentMin,self.segmentMax) From 2e7224d867a8d7fb17ff63a5ca7611b22785ee65 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Thu, 14 Sep 2023 16:06:42 -0700 Subject: [PATCH 34/86] added L1b --- modules/Utils/analyze_wls.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/Utils/analyze_wls.py b/modules/Utils/analyze_wls.py index a2c5c616b..685ab6624 100644 --- a/modules/Utils/analyze_wls.py +++ b/modules/Utils/analyze_wls.py @@ -12,18 +12,20 @@ class AnalyzeWLS: Arguments: L1 - an L1 object + L1b (optiona) - a second L1 object to compare to L1 Attributes: None so far """ - def __init__(self, L1, logger=None): + def __init__(self, L1, L1b=None, logger=None): if self.logger: self.logger = logger self.logger.debug('Initializing AnalyzeWLS object.') else: self.logger = None self.L1 = L1 + self.L1b = L1b primary_header = HeaderParse(L1, 'PRIMARY') self.header = primary_header.header self.name = primary_header.get_name() From 492a126e0c3c30e9095f89f56b8713ab0b1a08a9 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Thu, 14 Sep 2023 17:03:01 -0700 Subject: [PATCH 35/86] added plot_WLS_orderlet_diff --- configs/quicklook_match.cfg | 4 +- modules/Utils/analyze_wls.py | 108 +++++++++++++++++++++++++++++++++-- modules/quicklook/src/alg.py | 18 ++++++ 3 files changed, 124 insertions(+), 6 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index 1272c607c..6a7415009 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,8 +7,8 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -#fullpath = '/data/2D/202?????/KP.20230826.[3-5]????.??*.fits' -fullpath = '/data/masters/20230419/*.fits' +fullpath = '/data/L1/202?????/KP.20230701.36172.51*.fits' +#fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] quicklook = modules/quicklook/configs/default.cfg diff --git a/modules/Utils/analyze_wls.py b/modules/Utils/analyze_wls.py index 685ab6624..59f62ef45 100644 --- a/modules/Utils/analyze_wls.py +++ b/modules/Utils/analyze_wls.py @@ -1,3 +1,4 @@ +import time import numpy as np import matplotlib.pyplot as plt from modules.Utils.kpf_parse import HeaderParse @@ -18,17 +19,116 @@ class AnalyzeWLS: None so far """ - def __init__(self, L1, L1b=None, logger=None): - if self.logger: + def __init__(self, L1, logger=None): + if logger: self.logger = logger - self.logger.debug('Initializing AnalyzeWLS object.') + self.logger.debug('Initializing AnalyzeWLS object') else: self.logger = None self.L1 = L1 - self.L1b = L1b + #self.header = L1['PRIMARY'].header primary_header = HeaderParse(L1, 'PRIMARY') self.header = primary_header.header self.name = primary_header.get_name() #self.ObsID = primary_header.get_obsid() # need a filename instead or in addition + + +# def __init__(self, L1, logger=None): +# if self.logger: +# self.logger = logger +# self.logger.debug('Initializing AnalyzeWLS object.') +# else: +# self.logger = None +# self.L1 = L1 +# #self.L1b = L1b +# primary_header = HeaderParse(L1, 'PRIMARY') +# self.header = primary_header.header +# self.name = primary_header.get_name() +# +# #self.ObsID = primary_header.get_obsid() +# # need a filename instead or in addition + + + def plot_WLS_orderlet_diff(self, chip=None, fig_path=None, show_plot=False): + """ + Make a plot of differences between the wavelength solutions for the orders. + + Args: + chip (string) - "green" or "red" + fig_path (string) - set to the path for a SNR vs. wavelength file + to be generated. + show_plot (boolean) - show the plot in the current environment + + Returns: + PNG plot in fig_path or shows the plot it the current environment + (e.g., in a Jupyter Notebook). + """ + # Set parameters based on the chip selected + if chip == 'green' or chip == 'red': + if chip == 'green': + CHIP = 'GREEN' + chip_title = 'Green' + if chip == 'red': + CHIP = 'RED' + chip_title = 'Red' + else: + self.logger.debug('chip not supplied. Exiting plot_WLS_orderlet_diff') + print('chip not supplied. Exiting plot_WLS_orderlet_diff') + return + +# placeholder line + fig = plt.plot(np.arange(10)) + +# Substitute good plotting stuff here. +# # Make 3-panel plot. First, create the figure and subplots +# fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(10,10), tight_layout=True) +# +# # Plot the data on each subplot +# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,5], marker="8", color='darkgreen', label='SCI1+SCI2+SCI3') +# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,1], marker=">", color='darkgreen', label='SCI1') +# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,2], marker="s", color='darkgreen', label='SCI2') +# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,3], marker="<", color='darkgreen', label='SCI3') +# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,5], marker="8", color='r', label='SCI1+SCI2+SCI3') +# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,1], marker=">", color='r', label='SCI1') +# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,2], marker="s", color='r', label='SCI2') +# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,3], marker="<", color='r', label='SCI3') +# ax1.yaxis.set_major_locator(MaxNLocator(nbins=12)) +# ax1.grid() +# ax2.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,4], marker="D", color='darkgreen', label='SKY') +# ax2.scatter(self.RED_SNR_WAV, self.RED_SNR[:,4], marker="D", color='r', label='SKY') +# ax2.yaxis.set_major_locator(MaxNLocator(nbins=12)) +# ax2.grid() +# ax3.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,0], marker="D", color='darkgreen', label='CAL') +# ax3.scatter(self.RED_SNR_WAV, self.RED_SNR[:,0], marker="D", color='r', label='CAL') +# ax3.yaxis.set_major_locator(MaxNLocator(nbins=12)) +# ax3.grid() +# ax3.set_xlim(4450,8700) +# +# # Add legend +# ax1.legend(["SCI1+SCI2+SCI3","SCI1","SCI2","SCI3"], ncol=4) +# +# # Set titles and labels for each subplot +# ax1.set_title(self.ObsID + ' - ' + self.name + ': ' + r'$\mathrm{SNR}_{'+str(self.snr_percentile)+'}$ = '+str(self.snr_percentile)+'th percentile (Signal / $\sqrt{\mathrm{Variance}}$)', fontsize=16) +# ax3.set_xlabel('Wavelength (Ang)', fontsize=14) +# ax1.set_ylabel('SNR - SCI', fontsize=14) +# ax2.set_ylabel('SNR - SKY', fontsize=14) +# ax3.set_ylabel('SNR - CAL', fontsize=14) +# ax3.xaxis.set_tick_params(labelsize=14) +# ax1.yaxis.set_tick_params(labelsize=14) +# ax2.yaxis.set_tick_params(labelsize=14) +# ax3.yaxis.set_tick_params(labelsize=14) +# +# # Adjust spacing between subplots +# plt.subplots_adjust(hspace=0) +# plt.tight_layout() + + # Display the plot + if fig_path != None: + t0 = time.process_time() + plt.savefig(fig_path, dpi=300, facecolor='w') + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if show_plot == True: + plt.show() + plt.close('all') diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index 9e8f9aeac..8c8327872 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -21,6 +21,7 @@ from modules.Utils.analyze_hk import AnalyzeHK from modules.Utils.analyze_2d import Analyze2D from modules.Utils.analyze_l1 import AnalyzeL1 +from modules.Utils.analyze_wls import AnalyzeWLS from modules.Utils.analyze_l2 import AnalyzeL2 from modules.Utils.kpf_parse import HeaderParse #import kpfpipe.pipelines.fits_primitives as fits_primitives @@ -339,6 +340,23 @@ def qlp_L1(self, kpf1, output_dir): except Exception as e: self.logger.error(f"Failure creating base output diretory in Exposure Meter quicklook pipeline: {e}\n{traceback.format_exc()}") + # Make WLS plots + try: + savedir = L1_QLP_file_base +'WLS/' + os.makedirs(savedir, exist_ok=True) # make directories if needed + if chips != []: + try: + for chip in chips: + filename = savedir + self.ObsID + '_WLS_orderlet_diff_' + chip + '_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myWLS = AnalyzeWLS(kpf1, logger=self.logger) + myWLS.plot_WLS_orderlet_diff(chip=chip, fig_path=filename, show_plot=False) + except Exception as e: + self.logger.error(f"Failure in L1 quicklook pipeline: {e}\n{traceback.format_exc()}") + + except Exception as e: + self.logger.error(f"Failure in L1 quicklook pipeline: {e}\n{traceback.format_exc()}") + # Make L1 SNR plot try: savedir = L1_QLP_file_base +'L1/' From 66a623d04b3b017e35a46502fc6809f7a907c857 Mon Sep 17 00:00:00 2001 From: Greg Gilbert Date: Thu, 14 Sep 2023 17:18:58 -0700 Subject: [PATCH 36/86] added L1 interorderlet wls plot --- modules/Utils/analyze_wls.py | 86 +++++++++++++++++------------------- 1 file changed, 41 insertions(+), 45 deletions(-) diff --git a/modules/Utils/analyze_wls.py b/modules/Utils/analyze_wls.py index 59f62ef45..5e590daa1 100644 --- a/modules/Utils/analyze_wls.py +++ b/modules/Utils/analyze_wls.py @@ -1,6 +1,7 @@ import time import numpy as np import matplotlib.pyplot as plt +import matplotlib.cm as cm from modules.Utils.kpf_parse import HeaderParse from datetime import datetime @@ -78,52 +79,47 @@ def plot_WLS_orderlet_diff(self, chip=None, fig_path=None, show_plot=False): print('chip not supplied. Exiting plot_WLS_orderlet_diff') return -# placeholder line - fig = plt.plot(np.arange(10)) - -# Substitute good plotting stuff here. -# # Make 3-panel plot. First, create the figure and subplots -# fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(10,10), tight_layout=True) -# -# # Plot the data on each subplot -# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,5], marker="8", color='darkgreen', label='SCI1+SCI2+SCI3') -# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,1], marker=">", color='darkgreen', label='SCI1') -# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,2], marker="s", color='darkgreen', label='SCI2') -# ax1.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,3], marker="<", color='darkgreen', label='SCI3') -# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,5], marker="8", color='r', label='SCI1+SCI2+SCI3') -# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,1], marker=">", color='r', label='SCI1') -# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,2], marker="s", color='r', label='SCI2') -# ax1.scatter(self.RED_SNR_WAV, self.RED_SNR[:,3], marker="<", color='r', label='SCI3') -# ax1.yaxis.set_major_locator(MaxNLocator(nbins=12)) -# ax1.grid() -# ax2.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,4], marker="D", color='darkgreen', label='SKY') -# ax2.scatter(self.RED_SNR_WAV, self.RED_SNR[:,4], marker="D", color='r', label='SKY') -# ax2.yaxis.set_major_locator(MaxNLocator(nbins=12)) -# ax2.grid() -# ax3.scatter(self.GREEN_SNR_WAV, self.GREEN_SNR[:,0], marker="D", color='darkgreen', label='CAL') -# ax3.scatter(self.RED_SNR_WAV, self.RED_SNR[:,0], marker="D", color='r', label='CAL') -# ax3.yaxis.set_major_locator(MaxNLocator(nbins=12)) -# ax3.grid() -# ax3.set_xlim(4450,8700) -# -# # Add legend -# ax1.legend(["SCI1+SCI2+SCI3","SCI1","SCI2","SCI3"], ncol=4) -# -# # Set titles and labels for each subplot -# ax1.set_title(self.ObsID + ' - ' + self.name + ': ' + r'$\mathrm{SNR}_{'+str(self.snr_percentile)+'}$ = '+str(self.snr_percentile)+'th percentile (Signal / $\sqrt{\mathrm{Variance}}$)', fontsize=16) -# ax3.set_xlabel('Wavelength (Ang)', fontsize=14) -# ax1.set_ylabel('SNR - SCI', fontsize=14) -# ax2.set_ylabel('SNR - SKY', fontsize=14) -# ax3.set_ylabel('SNR - CAL', fontsize=14) -# ax3.xaxis.set_tick_params(labelsize=14) -# ax1.yaxis.set_tick_params(labelsize=14) -# ax2.yaxis.set_tick_params(labelsize=14) -# ax3.yaxis.set_tick_params(labelsize=14) -# -# # Adjust spacing between subplots -# plt.subplots_adjust(hspace=0) -# plt.tight_layout() + if chip == 'green': + wav1 = self.L1.GREEN_SCI_WAVE1 + wav2 = self.L1.GREEN_SCI_WAVE2 + wav3 = self.L1.GREEN_SCI_WAVE3 + cal = self.L1.GREEN_CAL_WAVE + sky = self.L1.GREEN_SKY_WAVE + elif chip == 'red': + wav1 = self.L1.RED_SCI_WAVE1 + wav2 = self.L1.RED_SCI_WAVE2 + wav3 = self.L1.RED_SCI_WAVE3 + cal = self.L1.RED_CAL_WAVE + sky = self.L1.RED_SKY_WAVE + + + wls = [wav1, wav3, cal, sky] + labels = ['WLS1', 'WL3', 'CAL', 'SKY'] + num_orders = len(wav2) + + fig, ax = plt.subplots(4, 1, sharex='col', sharey='row', figsize=(18, 12)) + + for i, w in enumerate(wls): + # plot the data + for order in range(num_orders): + ax[i].plot(wav2[order,:], w[order,:]-wav2[order,:], c=cm.gist_rainbow(0.9*(1-order/num_orders)), lw=3) + + # make the axes pretty + y02 = np.percentile(w-wav2, 2.5) + y50 = np.percentile(w-wav2, 50) + y98 = np.percentile(w-wav2, 97.5) + dy = 0.5*((y98-y50)+(y50-y02)) + + ax[i].tick_params(axis='both', which='major', labelsize=14) + ax[i].set_xlim(wav2.min()-25,wav2.max()+25) + ax[i].set_ylim(y50-dy, y50+dy) + ax[i].set_ylabel('{0}-WLS2'.format(labels[i]), fontsize=18) + + title = "{0} Chip: {1}".format(CHIP, self.L1.header['PRIMARY']['OFNAME']) + ax[0].set_title(title, fontsize=22) + plt.xlabel('Wavelength (Ang)', fontsize=18) + # Display the plot if fig_path != None: t0 = time.process_time() From 0857b5ed5b6b0edbe0176720d8a528da15cde229 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Thu, 14 Sep 2023 17:28:14 -0700 Subject: [PATCH 37/86] trivial change --- recipes/quicklook_match.recipe | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/quicklook_match.recipe b/recipes/quicklook_match.recipe index aeea2eef1..fbd769920 100644 --- a/recipes/quicklook_match.recipe +++ b/recipes/quicklook_match.recipe @@ -1,7 +1,7 @@ # This recipe is used to manually produce a set of QLP outputs that match the # fullpath config variable. It can be used to produce QLP for a single data # level (L0, 2D, L1, L2, masters) for a single datecode (YYYYDDMM) or combinations. -# All of the examples below are executed using the command +# All of the examples below are executed using the command # > kpf -c configs/quicklook_match.cfg -r recipes/quicklook_match.recipe # but with different values for the config variable 'fullpath'. # From 8a3118a091c11f7ecfb00851e8fea499c934ddb9 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Fri, 15 Sep 2023 09:42:59 -0700 Subject: [PATCH 38/86] pay attention to legendre/spline fit type in config --- modules/wavelength_cal/configs/KPF.cfg | 3 ++- modules/wavelength_cal/src/alg.py | 20 +++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/modules/wavelength_cal/configs/KPF.cfg b/modules/wavelength_cal/configs/KPF.cfg index b62bffe2e..c000ca2dc 100644 --- a/modules/wavelength_cal/configs/KPF.cfg +++ b/modules/wavelength_cal/configs/KPF.cfg @@ -9,7 +9,8 @@ quicklook_steps = 1 min_wave = 3800 max_wave = 9300 fit_order = 5 -fit_type = Legendre +# "Legendre" and "spline" fit types are currently implemented +fit_type = spline n_sections = 20 clip_peaks = 1 clip_below_median = 1 diff --git a/modules/wavelength_cal/src/alg.py b/modules/wavelength_cal/src/alg.py index 2882f8b27..885055c6f 100644 --- a/modules/wavelength_cal/src/alg.py +++ b/modules/wavelength_cal/src/alg.py @@ -1213,7 +1213,10 @@ def fit_polynomial(self, wls, n_pixels, fitted_peak_pixels, fit_iterations=5, si returns the Legendre polynomial wavelength solutions """ weights = 1 / np.sqrt(peak_heights) - if self.fit_type == 'Legendre': + if self.fit_type.lower() not in ['legendre', 'spline']: + raise NotImplementedError("Fit type must be either legendre or spline") + + if self.fit_type.lower() == 'legendre' or self.fit_type.lower() == 'spline': _, unique_idx, count = np.unique(fitted_peak_pixels, return_index=True, return_counts=True) unclipped_idx = np.where( @@ -1224,14 +1227,13 @@ def fit_polynomial(self, wls, n_pixels, fitted_peak_pixels, fit_iterations=5, si sorted_idx = np.argsort(fitted_peak_pixels[unclipped_idx]) x, y, w = fitted_peak_pixels[unclipped_idx][sorted_idx], wls[unclipped_idx][sorted_idx], weights[unclipped_idx][sorted_idx] for i in range(fit_iterations): - # leg_out = Legendre.fit(x, y, self.fit_order, w=w) - # our_wavelength_solution_for_order = leg_out(np.arange(n_pixels)) - - leg_out = UnivariateSpline(x, y, w, k=5) - our_wavelength_solution_for_order = leg_out(np.arange(n_pixels)) - - # leg_out = UnivariateSpline(x, y, w, k=5) - # our_wavelength_solution_for_order = leg_out(np.arange(n_pixels)) + if self.fit_type.lower() == 'legendre': + leg_out = Legendre.fit(x, y, self.fit_order, w=w) + our_wavelength_solution_for_order = leg_out(np.arange(n_pixels)) + + if self.fit_type == 'spline': + leg_out = UnivariateSpline(x, y, w, k=5) + our_wavelength_solution_for_order = leg_out(np.arange(n_pixels)) res = y - leg_out(x) good = np.where(np.abs(res) <= sigma_clip*np.std(res)) From f8674605725ba430a9117ff5344ba4b4db18d1fc Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Fri, 15 Sep 2023 09:48:49 -0700 Subject: [PATCH 39/86] update gitignore --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f2d5b75ea..728a3e78b 100644 --- a/.gitignore +++ b/.gitignore @@ -5,11 +5,14 @@ examples/V1/recipe_results/ *~ *.DS_Store .idea/ -#logs/ +logs/ receipts/ __pycache__/ *.py[cod] *$py.class +*.ipynb_checkpoints +*.log +*.txt # C extensions *.so From ff0511d3e6b02371095cd6a2d12ef27a3f1edd40 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Fri, 15 Sep 2023 16:44:28 -0700 Subject: [PATCH 40/86] cosmetic changes --- modules/Utils/analyze_wls.py | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/modules/Utils/analyze_wls.py b/modules/Utils/analyze_wls.py index 5e590daa1..41bde7899 100644 --- a/modules/Utils/analyze_wls.py +++ b/modules/Utils/analyze_wls.py @@ -14,7 +14,7 @@ class AnalyzeWLS: Arguments: L1 - an L1 object - L1b (optiona) - a second L1 object to compare to L1 + L1b (optional) - a second L1 object to compare to L1 Attributes: None so far @@ -36,22 +36,6 @@ def __init__(self, L1, logger=None): # need a filename instead or in addition -# def __init__(self, L1, logger=None): -# if self.logger: -# self.logger = logger -# self.logger.debug('Initializing AnalyzeWLS object.') -# else: -# self.logger = None -# self.L1 = L1 -# #self.L1b = L1b -# primary_header = HeaderParse(L1, 'PRIMARY') -# self.header = primary_header.header -# self.name = primary_header.get_name() -# -# #self.ObsID = primary_header.get_obsid() -# # need a filename instead or in addition - - def plot_WLS_orderlet_diff(self, chip=None, fig_path=None, show_plot=False): """ Make a plot of differences between the wavelength solutions for the orders. @@ -79,7 +63,6 @@ def plot_WLS_orderlet_diff(self, chip=None, fig_path=None, show_plot=False): print('chip not supplied. Exiting plot_WLS_orderlet_diff') return - if chip == 'green': wav1 = self.L1.GREEN_SCI_WAVE1 wav2 = self.L1.GREEN_SCI_WAVE2 @@ -93,13 +76,12 @@ def plot_WLS_orderlet_diff(self, chip=None, fig_path=None, show_plot=False): cal = self.L1.RED_CAL_WAVE sky = self.L1.RED_SKY_WAVE - wls = [wav1, wav3, cal, sky] - labels = ['WLS1', 'WL3', 'CAL', 'SKY'] + labels = ['SCI1', 'SCI3', 'CAL', 'SKY'] num_orders = len(wav2) fig, ax = plt.subplots(4, 1, sharex='col', sharey='row', figsize=(18, 12)) - + for i, w in enumerate(wls): # plot the data for order in range(num_orders): @@ -114,12 +96,18 @@ def plot_WLS_orderlet_diff(self, chip=None, fig_path=None, show_plot=False): ax[i].tick_params(axis='both', which='major', labelsize=14) ax[i].set_xlim(wav2.min()-25,wav2.max()+25) ax[i].set_ylim(y50-dy, y50+dy) - ax[i].set_ylabel('{0}-WLS2'.format(labels[i]), fontsize=18) + ax[i].set_ylabel('{0} - SCI2'.format(labels[i]), fontsize=18) + ax[i].grid(True) - title = "{0} Chip: {1}".format(CHIP, self.L1.header['PRIMARY']['OFNAME']) + title = "{0} Chip: {1}".format(chip_title, self.L1.header['PRIMARY']['OFNAME']) ax[0].set_title(title, fontsize=22) + ax[0].axhline(y=0, color='k', linestyle='-') + ax[1].axhline(y=0, color='k', linestyle='-') + ax[2].axhline(y=0, color='k', linestyle='-') + ax[3].axhline(y=0, color='k', linestyle='-') plt.xlabel('Wavelength (Ang)', fontsize=18) - + plt.tight_layout() + # Display the plot if fig_path != None: t0 = time.process_time() From ef4c8e7d3369443a31c7dff11cb9681f67c1d529 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Mon, 18 Sep 2023 09:59:38 -0700 Subject: [PATCH 41/86] add file execution permission --- scripts/kpf_slowtouch.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/kpf_slowtouch.sh diff --git a/scripts/kpf_slowtouch.sh b/scripts/kpf_slowtouch.sh old mode 100644 new mode 100755 From f4c56863078e21fbb722d676f7d58fc6ab13273a Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:43 -0700 Subject: [PATCH 42/86] Added parameter lev0_ffi_ext_cahk. --- configs/kpf_drp.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 4243009e2..0c1288caa 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -153,6 +153,7 @@ overscan_clip = 5 overscan_order = 1 lev0_ffi_ext_red = ['RED_CCD'] lev0_ffi_ext_green = ['GREEN_CCD'] +lev0_ffi_ext_cahk = ['CA_HK'] quicklook = False gain_keyword = 'CCDGAIN' input_dir = /data/L0/ From 95269990b9d842749671c21b94bce4e46c0f3c46 Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:44 -0700 Subject: [PATCH 43/86] Set explicit bias_objects and dark_objects (best policy for automated daily runs). --- configs/kpf_masters_drp.cfg | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/configs/kpf_masters_drp.cfg b/configs/kpf_masters_drp.cfg index c22fa3554..55d138ba2 100644 --- a/configs/kpf_masters_drp.cfg +++ b/configs/kpf_masters_drp.cfg @@ -39,7 +39,10 @@ output_dir = /data/2D n_sigma = 2.1 lev0_ffi_exts = ['GREEN_CCD','RED_CCD','CA_HK'] # Leave empty list for bias_objects for recipe to automatically fill in. -bias_objects = [] +#bias_objects = [] +# 20230619 had OBJECT=test with just one frame, so we handle it specially as follows: +bias_objects = ['autocal-bias'] +# The above with explicit bias_objects = ['autocal-bias'] is the best policy for automated daily runs. masterbias_path = /testdata/kpf_master_bias.fits [DARK] @@ -47,7 +50,9 @@ exptime_minimum = 300.0 n_sigma = 2.2 lev0_ffi_exts = ['GREEN_CCD','RED_CCD','CA_HK'] # Leave empty list for dark_objects for recipe to automatically fill in. -dark_objects = [] +#dark_objects = [] +dark_objects = ['autocal-dark'] +# The above with explicit dark_objects = ['autocal-dark'] is the best policy for automated daily runs. masterdark_path = /testdata/kpf_master_dark.fits [FLAT] From f90b8111a344ebfbcb38dfcbe18c66be33f8e71f Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:44 -0700 Subject: [PATCH 44/86] Added new optional environment variable KPFCRONJOB_CONFIG_L0. --- cronjobs/kpfmastersruncmd_l0.pl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cronjobs/kpfmastersruncmd_l0.pl b/cronjobs/kpfmastersruncmd_l0.pl index 3dbdec781..f2eec7667 100755 --- a/cronjobs/kpfmastersruncmd_l0.pl +++ b/cronjobs/kpfmastersruncmd_l0.pl @@ -99,7 +99,7 @@ # Initialize fixed parameters and read command-line parameter. my $iam = 'kpfmastersruncmd_l0.pl'; -my $version = '1.5'; +my $version = '1.6'; my $procdate = shift @ARGV; # YYYYMMDD command-line parameter. @@ -114,6 +114,12 @@ my $recipe = '/code/KPF-Pipeline/recipes/kpf_masters_drp.recipe'; my $config = '/code/KPF-Pipeline/configs/kpf_masters_drp.cfg'; +my $configenvar = $ENV{KPFCRONJOB_CONFIG_L0}; + +if (defined $configenvar) { + $config = $configenvar; +} + # Print environment. From f375a9e998449fa2c999ebdd36f7059867ee3abe Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:44 -0700 Subject: [PATCH 45/86] Do not put output log files in jobs directory (scripts expects them in main KPF-Pipeline directory, in order to move them to /data/kpf/masters/yyyymmdd directory later). --- cronjobs/runDailyPipelines.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cronjobs/runDailyPipelines.sh b/cronjobs/runDailyPipelines.sh index 014ede478..f0e90e054 100755 --- a/cronjobs/runDailyPipelines.sh +++ b/cronjobs/runDailyPipelines.sh @@ -1,15 +1,15 @@ #! /bin/bash -l -printenv >& $KPFCRONJOB_LOGS/jobs/runDailyPipelines.env - procdate=$(date +\%Y\%m\%d) +printenv >& $KPFCRONJOB_LOGS/runDailyPipelines_$procdate.env + echo "Processing date: $procdate" -$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l0.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmastersruncmd_l0_$procdate.out +$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l0.pl $procdate >& $KPFCRONJOB_LOGS/kpfmastersruncmd_l0_$procdate.out echo Return value from kpfmastersruncmd_l0.pl = $? -$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l1.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmastersruncmd_l1_$procdate.out +$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l1.pl $procdate >& $KPFCRONJOB_LOGS/kpfmastersruncmd_l1_$procdate.out echo Return value from kpfmastersruncmd_l1.pl = $? -$KPFCRONJOB_CODE/cronjobs/kpfmasters_wls_auto.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmasters_wls_auto_$procdate.out +$KPFCRONJOB_CODE/cronjobs/kpfmasters_wls_auto.pl $procdate >& $KPFCRONJOB_LOGS/kpfmasters_wls_auto_$procdate.out echo Return value from kpfmasters_wls_auto.pl = $? -$KPFCRONJOB_CODE/database/cronjobs/kpfmasters_register_in_db.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmasters_register_in_db_$procdate.out +$KPFCRONJOB_CODE/database/cronjobs/kpfmasters_register_in_db.pl $procdate >& $KPFCRONJOB_LOGS/kpfmasters_register_in_db_$procdate.out echo Return value from kpfmasters_register_in_db.pl = $? From f6ad42dd92cab0717ccf80e2e02209d548d73988 Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:44 -0700 Subject: [PATCH 46/86] Optionally override self.smoothlamppattern_path from input argument with environment variable SMOOTH_LAMP_PATTERN. Also, reset flat to unity if flat < 0.1 or flat > 2.5. --- modules/master_flat/src/master_flat_framework.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/master_flat/src/master_flat_framework.py b/modules/master_flat/src/master_flat_framework.py index 397601953..ed5ca9d35 100644 --- a/modules/master_flat/src/master_flat_framework.py +++ b/modules/master_flat/src/master_flat_framework.py @@ -170,6 +170,11 @@ def _perform(self): """ + # Optionally override self.smoothlamppattern_path from input argument. + smoothlamppattern_envar = getenv('SMOOTH_LAMP_PATTERN') + if smoothlamppattern_envar is not None: + self.smoothlamppattern_path = smoothlamppattern_envar + smoothlamppattern_path_exists = exists(self.smoothlamppattern_path) if not smoothlamppattern_path_exists: raise FileNotFoundError('File does not exist: {}'.format(self.smoothlamppattern_path)) @@ -403,6 +408,9 @@ def _perform(self): # Less than low-light pixels cannot be reliably adjusted. Reset below-threshold pixels to have unity flat values. flat = np.where(stack_avg < self.low_light_limit, 1.0, flat) + # Reset flat to unity if flat < 0.1 or flat > 2.5. + flat = np.where(flat < 0.1, 1.0, flat) + flat = np.where(flat > 2.5, 1.0, flat) ### kpf master file creation ### master_holder[ffi] = flat From 2f7f3cd0faf7859d937babe23784960e0c43510a Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:44 -0700 Subject: [PATCH 47/86] Modified to enable CA_HK master bias and master dark subtraxtion. --- recipes/watchfor_kpf_l0.recipe | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/recipes/watchfor_kpf_l0.recipe b/recipes/watchfor_kpf_l0.recipe index eae1001d0..6fba648fc 100644 --- a/recipes/watchfor_kpf_l0.recipe +++ b/recipes/watchfor_kpf_l0.recipe @@ -11,6 +11,7 @@ order = config.WATCHFOR_L0.overscan_order oscan_clip = config.WATCHFOR_L0.overscan_clip lev0_ffi_ext_red = config.WATCHFOR_L0.lev0_ffi_ext_red lev0_ffi_ext_green = config.WATCHFOR_L0.lev0_ffi_ext_green +lev0_ffi_ext_cahk = config.WATCHFOR_L0.lev0_ffi_ext_cahk prescan_reg = config.WATCHFOR_L0.prescan_reg gain_key = config.WATCHFOR_L0.gain_keyword do_db_query_for_master_files = config.WATCHFOR_L0.do_db_query_for_master_files @@ -67,7 +68,7 @@ else: file_path = context.file_path lev0_ffi_exts = lev0_ffi_ext_red + lev0_ffi_ext_green - +lev0_ffi_exts_subbed = lev0_ffi_ext_red + lev0_ffi_ext_green + lev0_ffi_ext_cahk # Register L0 file in L0Files database table. Perform quality-control tasks. @@ -133,9 +134,9 @@ for raw_file_path in file_list: channel_datasec_nrows_green, n_sigma) - bias_subbed = ImageProcessing(L0_with_ffi, masterbias_path, lev0_ffi_exts, data_type, quicklook) + bias_subbed = ImageProcessing(L0_with_ffi, masterbias_path, lev0_ffi_exts_subbed, data_type, quicklook) - dark_subbed = ImageProcessing(bias_subbed, masterdark_path, lev0_ffi_exts, data_type, quicklook) + dark_subbed = ImageProcessing(bias_subbed, masterdark_path, lev0_ffi_exts_subbed, data_type, quicklook) flat_applied = ImageProcessing(dark_subbed, masterflat_path, lev0_ffi_exts, data_type, quicklook) From c04e671feb10569e64c0055e8989ec347cf86141 Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 18 Sep 2023 10:50:44 -0700 Subject: [PATCH 48/86] Made changes to make smooth lamp pattern for 20230622 and before. --- scripts/make_smooth_lamp_pattern.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/make_smooth_lamp_pattern.py b/scripts/make_smooth_lamp_pattern.py index 47c485823..257d3e306 100644 --- a/scripts/make_smooth_lamp_pattern.py +++ b/scripts/make_smooth_lamp_pattern.py @@ -17,8 +17,18 @@ from astropy.io import fits #fname_order_mask = "kpf_20230716_order_mask_untrimmed_made20230719.fits" -fname_stack_average = "kpf_20230804_master_flat.fits" -fname_smooth_lamp = "kpf_20230804_smooth_lamp_made20230808_float32.fits" + +# Used to make smooth lamp pattern for 20230624 through 20230730. +#fname_stack_average = "kpf_20230628_master_flat.fits" +#fname_smooth_lamp = kpf_20230628_smooth_lamp_made20230803_float32.fits + +# Used to make smooth lamp pattern for 20230801 and onward. +#fname_stack_average = "kpf_20230804_master_flat.fits" +#fname_smooth_lamp = "kpf_20230804_smooth_lamp_made20230808_float32.fits" + +# Used to make smooth lamp pattern for 20230622 and before. +fname_stack_average = "kpf_20230619_master_flat.fits" +fname_smooth_lamp = "kpf_20230619_smooth_lamp_made20230817_float32.fits" #hdul_order_mask = fits.open(fname_order_mask) hdul_stack_average = fits.open(fname_stack_average) From c1ecd43eb729d7e1640e1cc2ce92b389f7dd308d Mon Sep 17 00:00:00 2001 From: Aaron Householder Date: Tue, 19 Sep 2023 09:38:42 -0400 Subject: [PATCH 49/86] Chi^2 check for indivdual lines --- modules/wavelength_cal/configs/KPF.cfg | 1 + modules/wavelength_cal/src/alg.py | 67 +++++++++++++++++++++++--- 2 files changed, 60 insertions(+), 8 deletions(-) diff --git a/modules/wavelength_cal/configs/KPF.cfg b/modules/wavelength_cal/configs/KPF.cfg index c3459390e..4803b5336 100644 --- a/modules/wavelength_cal/configs/KPF.cfg +++ b/modules/wavelength_cal/configs/KPF.cfg @@ -16,6 +16,7 @@ clip_below_median = 1 peak_height_threshold = 0.3 sigma_clip = 2.5 fit_iterations = 5 +chi_2_threshold = 1 # skip these orders when solving for the LFC WLS and instead use a ThAr solution green_skip_orders = 0,1,2,3,4,5,6,7,8,9,10 diff --git a/modules/wavelength_cal/src/alg.py b/modules/wavelength_cal/src/alg.py index b2d7a3737..141506e46 100644 --- a/modules/wavelength_cal/src/alg.py +++ b/modules/wavelength_cal/src/alg.py @@ -49,6 +49,7 @@ def __init__( ) self.red_skip_orders = configpull.get_config_value('red_skip_orders') self.green_skip_orders = configpull.get_config_value('green_skip_orders') + self.chi_2_threshold = configpull.get_config_value('chi_2_threshold') self.skip_orders = configpull.get_config_value('skip_orders',None) self.quicklook_steps = configpull.get_config_value('quicklook_steps',10) self.min_wave = configpull.get_config_value('min_wave',3800) @@ -192,7 +193,7 @@ def run_wavelength_cal( def fit_many_orders( self, cal_flux, order_list, rough_wls=None, comb_lines_angstrom=None, - expected_peak_locs=None, plt_path='/data/wls/', print_update=False): + expected_peak_locs=None, plt_path=None, print_update=False): """ Iteratively performs wavelength calibration for all orders. Args: @@ -557,7 +558,7 @@ def find_peaks_in_order(self, order_flux, plot_path=None): plt.close() return fitted_peak_pixels, detected_peak_pixels, detected_peak_heights, gauss_coeffs - + def find_peaks(self, order_flux, peak_height_threshold=1.5): """ Finds all order_flux peaks in an array. This runs scipy.signal.find_peaks @@ -592,17 +593,37 @@ def find_peaks(self, order_flux, peak_height_threshold=1.5): detected_peaks, properties = signal.find_peaks(c, distance=distance, height=height) peak_heights = np.array(properties['peak_heights']) + + # Only consider peaks with height greater than 5 + valid_peak_indices = np.where(peak_heights > 5)[0] + detected_peaks = detected_peaks[valid_peak_indices] + peak_heights = peak_heights[valid_peak_indices] + # fit peaks with Gaussian to get accurate position fitted_peaks = detected_peaks.astype(float) gauss_coeffs = np.empty((4, len(detected_peaks))) width = np.mean(np.diff(detected_peaks)) // 2 + # Create mask initially set to True for all detected peaks + mask = np.ones(len(detected_peaks), dtype=bool) + for j, p in enumerate(detected_peaks): idx = p + np.arange(-width, width + 1, 1) idx = np.clip(idx, 0, len(c) - 1).astype(int) coef = self.fit_gaussian(np.arange(len(idx)), c[idx]) - gauss_coeffs[:,j] = coef - fitted_peaks[j] = coef[1] + p - width + + if coef is None: + mask[j] = False # mask out bad fits + + else:# Only update the coefficients and peaks if fit_gaussian did not return None + gauss_coeffs[:, j] = coef + fitted_peaks[j] = coef[1] + p - width + + # Remove the peaks where fit_gaussian returned None + fitted_peaks = fitted_peaks[mask] + detected_peaks = detected_peaks[mask] + peak_heights = peak_heights[mask] + gauss_coeffs = gauss_coeffs[:, mask] return fitted_peaks, detected_peaks, peak_heights, gauss_coeffs @@ -925,7 +946,7 @@ def line_match( def mode_match( self, order_flux, fitted_peak_pixels, good_peak_idx, rough_wls_order, - comb_lines_angstrom, print_update=False, plot_path='/data/wls/', start_check=True, + comb_lines_angstrom, print_update=False, plot_path=None, start_check=True, ): """ Matches detected order_flux peaks to the theoretical locations of LFC wavelengths @@ -1162,7 +1183,7 @@ def integrate_gaussian(self, x, a, mu, sig, const, int_width=0.5): ) + (const * 2 * int_width) return integrated_gaussian_val - + def fit_gaussian(self, x, y): """ Fits a continuous Gaussian to a discrete set of x and y datapoints @@ -1174,6 +1195,7 @@ def fit_gaussian(self, x, y): Returns: list: best-fit parameters [a, mu, sigma**2, const] """ + x = np.ma.compressed(x) y = np.ma.compressed(y) @@ -1184,8 +1206,37 @@ def fit_gaussian(self, x, y): np.warnings.simplefilter("ignore") popt, _ = curve_fit(self.integrate_gaussian, x, y, p0=p0, maxfev=1000000) - return popt - + if self.cal_type == 'LFC': + + # Quality Checks for Gaussian Fits + chi_squared_threshold = int(self.chi_2_threshold) + + # Calculate chi^2 + predicted_y = self.integrate_gaussian(x, *popt) + chi_squared = np.sum(((y - predicted_y) ** 2) / np.var(y)) + ''' + # Calculate RMS of residuals for Gaussian fit + rms_residual = np.sqrt(np.mean(np.square(y - predicted_y))) + + rms_threshold = 1000 # RMS Quality threshold + disagreement_threshold = 1000 # Disagreement with initial guess threshold + asymmetry_threshold = 1000 # Asymmetry in residuals threshold + # Calculate disagreement between Gaussian fit and initial guesss + disagreement = np.abs(popt[1] - p0[1]) + + # Check for asymmetry in residuals + residuals = y - predicted_y + left_residuals = residuals[:len(residuals)//2] + right_residuals = residuals[len(residuals)//2:] + asymmetry = np.abs(np.mean(left_residuals) - np.mean(right_residuals)) + ''' + # Run checks against defined quality thresholds + if (chi_squared > chi_squared_threshold): + print("Chi squared exceeded the threshold for this line. Line skipped") + return None + + return popt + def fit_polynomial(self, wls, n_pixels, fitted_peak_pixels, fit_iterations=5, sigma_clip=2.1, peak_heights=None, plot_path=None): """ Given precise wavelengths of detected LFC order_flux lines, fits a From 5fa121c95b7299a77579d8fa7e13faad657f0cdf Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Tue, 19 Sep 2023 08:53:31 -0700 Subject: [PATCH 50/86] mc --- modules/wavelength_cal/configs/KPF.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/wavelength_cal/configs/KPF.cfg b/modules/wavelength_cal/configs/KPF.cfg index c000ca2dc..8c43d122f 100644 --- a/modules/wavelength_cal/configs/KPF.cfg +++ b/modules/wavelength_cal/configs/KPF.cfg @@ -8,9 +8,9 @@ log_verbose = True quicklook_steps = 1 min_wave = 3800 max_wave = 9300 -fit_order = 5 +fit_order = 4 # "Legendre" and "spline" fit types are currently implemented -fit_type = spline +fit_type = legendre n_sections = 20 clip_peaks = 1 clip_below_median = 1 From c4c006fc2066d5056f77ac6f268aa56d871542d7 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Mon, 28 Aug 2023 17:17:18 -0700 Subject: [PATCH 51/86] feature/outlier_update make outlier_rejection iteration find outlier by looking at one fix profile or local profile at each x position for each order set and test threshold for finding the outlier --- configs/kpf_drp.cfg | 5 +- .../src/midpoint_photon_arrival.py | 4 +- .../configs/default_recipe_kpf_20220505.cfg | 2 +- modules/spectral_extraction/src/alg.py | 288 ++++++++++-------- .../src/spectral_extraction.py | 4 +- recipes/kpf_drp.recipe | 11 +- 6 files changed, 171 insertions(+), 143 deletions(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 0c1288caa..2a721b561 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -116,7 +116,7 @@ hk_trace_path = masters/kpfMaster_HKOrderBounds20220909.csv hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] # for module process: -do_l0_to_2d = True +do_l0_to_2d = False do_order_trace = False do_spectral_extraction = True do_rv = True @@ -126,7 +126,8 @@ do_wavecopy_in_sp = True #do_qlp = True do_bk_subtraction = True do_bc = True -do_outlier_rejection = False +do_outlier_rejection = True +outlier_mask_path = None # for L0->2D process [WATCHFOR_L0] diff --git a/modules/radial_velocity/src/midpoint_photon_arrival.py b/modules/radial_velocity/src/midpoint_photon_arrival.py index ca9698e89..25842a821 100644 --- a/modules/radial_velocity/src/midpoint_photon_arrival.py +++ b/modules/radial_velocity/src/midpoint_photon_arrival.py @@ -192,13 +192,11 @@ def CalcCDF(self,x,exposures,expBeg,expEnd): cdfTot=0 i=0 - while x > expEnd[i]: - cdfTot+=exposures[i] - ####Interpolation between exposures + ####Interpolation between exposures if i>1: rateRight=exposures[i]/((expEnd[i]-expBeg[i]).astype(float)+EMReadCorrection) rateLeft=exposures[i-1]/((expEnd[i-1]-expBeg[i-1]).astype(float)+EMReadCorrection) diff --git a/modules/spectral_extraction/configs/default_recipe_kpf_20220505.cfg b/modules/spectral_extraction/configs/default_recipe_kpf_20220505.cfg index ecb56b39a..23550f8cd 100644 --- a/modules/spectral_extraction/configs/default_recipe_kpf_20220505.cfg +++ b/modules/spectral_extraction/configs/default_recipe_kpf_20220505.cfg @@ -32,7 +32,7 @@ total_image_orderlets = 5 orderlet_names = ['GREEN_SKY_FLUX', 'GREEN_SCI_FLUX1', 'GREEN_SCI_FLUX2', 'GREEN_SCI_FLUX3', 'GREEN_CAL_FLUX', 'RED_SKY_FLUX', 'RED_SCI_FLUX1', 'RED_SCI_FLUX2', 'RED_SCI_FLUX3', 'RED_CAL_FLUX'] start_order = 0 total_order_per_ccd = [35, 32] -outlier_sigma = 5.0 +outlier_sigma = 10 starname = TARGNAME ra = TARGRA diff --git a/modules/spectral_extraction/src/alg.py b/modules/spectral_extraction/src/alg.py index 20b359201..497363cc9 100644 --- a/modules/spectral_extraction/src/alg.py +++ b/modules/spectral_extraction/src/alg.py @@ -217,6 +217,7 @@ def __init__(self, flat_data, flat_header, spectrum_data, spectrum_header, orde self.ccd_index = ccd_index self.do_outlier_rejection = do_outlier_rejection self.outlier_flux = outlier_flux + self.order_name = None def get_config_value(self, prop, default=''): """ Get defined value from the config file. @@ -379,7 +380,7 @@ def allocate_memory_flux(self, order_set, total_data_group, s_rate_y=1): return self.extracted_flux_pixels - def extraction_handler(self, out_data, height, data_group): + def extraction_handler(self, out_data, height, data_group, t_mask=None): """Perform the spectral extraction on one column of rectification data based on the extraction method. Args: @@ -392,7 +393,7 @@ def extraction_handler(self, out_data, height, data_group): """ if self.extraction_method == SpectralExtractionAlg.OPTIMAL: - return self.optimal_extraction(out_data[self.SDATA][0:height], out_data[self.FDATA][0:height], height, 1) + return self.optimal_extraction(out_data[self.SDATA][0:height], out_data[self.FDATA][0:height], height, 1, t_mask) elif self.extraction_method == SpectralExtractionAlg.SUM: return self.summation_extraction(out_data[self.SDATA][0:height]) @@ -499,7 +500,6 @@ def data_extraction_for_optimal_extraction(self, data_group, input_x (numpy.ndarry): x positions covering trace in input domain x_output_step (numpy.ndarray): x positions covering trace in output domain mask_height (int): height of the returned mask denoting the outlier pixels of the trace - is_debug (bool): output debug message Returns: numpy.ndarray: a 2D mask denoting the rejected outlier with the same size of height and width as @@ -507,8 +507,6 @@ def data_extraction_for_optimal_extraction(self, data_group, """ # collect all data - - # is_debug = False input_x_dim, input_y_dim = self.get_spectrum_size() p_height = mask_height p_width = input_x.size @@ -572,66 +570,54 @@ def outlier_rejection_for_optimal_extraction(self, s_data, f_data, input_x, x_ou outlier_sigma = self.get_config_value("outlier_sigma", 5.0) p_height = input_widths.size p_width = input_x.size - a_w = 3 - t_mask, scaled_profile = self.outlier_rejection(s_data, f_data, input_x, do_column, + t_mask, scaled_profile = self.outlier_rejection_local_profile(s_data, f_data, input_x, outlier_sigma, y_mid, input_widths, input_y_dim, is_debug) s_data_outlier = np.copy(s_data) if t_mask is None: return t_mask, s_data_outlier + + if t_mask is not None and is_debug: + total_outlier = np.where(t_mask == 0)[0].size + self.d_print('total outlier :', total_outlier, ' out of ', p_height*p_width, ' pixels: ', + total_outlier/(p_height*p_width)) + for x in range(p_width): - for y in range(p_height): - y_input = np.floor(input_widths[y] + y_mid[x]).astype(int) - if y_input < 0 or y_input >= input_y_dim: - continue - - if t_mask[y, x] != 0: - continue - collect_pixels = [] - - for dh in range(0, a_w): # neighbor position y_nb, x_nb in [p_height, p_width] domain - y_nb = y + dh - 1 - if y_nb < 0 or y_nb >= p_height or dh == 1: - continue - for dw in range(0, 1): - x_nb = x + dw - if x_nb < 0 or x_nb >= p_width or t_mask[y_nb, x_nb] == 0: - continue - - collect_pixels.append(s_data[y_nb, x_nb]) - p_avg = np.mean(np.array(collect_pixels)) if len(collect_pixels) > 0 else scaled_profile[y, x] - s_data_outlier[y, x] = p_avg - - if self.outlier_flux is not None: - if is_sdata_raw: - original_x = input_x[x] - original_y = y_input - else: - original_x = x_output_step[x] - original_y = (output_widths[y] + y_output_mid) - if self.outlier_flux[original_y, original_x] != s_data[y, x]: - self.d_print("Error:outlier flux is not the same as the original flux at position", - str(original_y), str(original_x)) + y_inputs = np.floor(input_widths + y_mid[x]).astype(int) + y_sel_idx = np.where((y_inputs >= 0) & (y_inputs < input_y_dim))[0] + y_out_idx = np.where(t_mask[y_sel_idx, x] == 0)[0] # check any outlier t_mask = 0 + + if y_out_idx.size == 0: # no mask found + continue + else: + y_out_idx = y_sel_idx[y_out_idx] + + s_data_outlier[y_out_idx, x] = 0.0 + + if self.outlier_flux is not None: + if is_sdata_raw: + original_x = input_x[x] # a number + original_y = y_inputs[y_out_idx] # an array + else: + original_x = x_output_step[x] + original_y = (output_widths[y_out_idx] + y_output_mid) - self.outlier_flux[original_y, original_x] = p_avg + self.outlier_flux[original_y, original_x] = 1 return t_mask, s_data_outlier @staticmethod - def outlier_rejection(s_data, f_data, input_x, is_column_analysis, outlier_sigma, y_mid, input_widths, input_y_dim, is_debug=False): + def outlier_rejection_fix_profile(s_data, f_data, input_x, outlier_sigma, y_mid, input_widths, input_y_dim, is_debug=False): """ Args: s_data (numpy.ndarray): spectrum pixels of one order trace (starightened trace) f_data (numpy.ndarray): flat pixels of one order trace (straightened trace) input_x (numpy.ndarray): x positions that cover the trace in input domain - is_column_analysis (bool): do the analysis column by column per orderlet or do the analysis on entier - orderlet outlier_sigma (float): thresold for the deviation to find the outlier. y_mid (numpy.ndarray): y location of the trace in input domain - y_output_mid (int): y location of the trace in output domain input_widths (numpy.ndarray): from lower to upper width in input domain input_y_dim (int): height of original spectrum is_debug (bool): prints out the analysis result if it is True. @@ -641,75 +627,131 @@ def outlier_rejection(s_data, f_data, input_x, is_column_analysis, outlier_sigma """ - scaled_profile = None - t_mask = np.ones_like(s_data, dtype=int) - if is_column_analysis: - profile_order = np.nanmedian(f_data, axis=1) - profile_median = np.nanmedian(profile_order) - scaled_profile = np.zeros_like(f_data) - for i_x, p_x in enumerate(input_x): - y_input = np.floor(input_widths + y_mid[i_x]).astype(int) - y_valid_idx = np.where((y_input <= (input_y_dim - 1)) & (y_input >= 0))[0] - # if y_valid_idx.size != y_input.size: - # print(' loc ', i_x, p_x, ' has less pixels extracted', y_valid_idx.size, ' vs ', y_input.size) - - c_flux = s_data[y_valid_idx, i_x] + p_h, p_w = np.shape(s_data) + + profile_order = np.nanmedian(f_data, axis=1) + profile_median = np.nanmedian(profile_order) + + if profile_median == 0: + profile_median = 1 + scaled_profile = np.zeros_like(f_data) + + for i_x, p_x in enumerate(input_x): + + y_input = np.floor(input_widths + y_mid[i_x]).astype(int) + y_valid_idx = np.where((y_input <= (input_y_dim - 1)) & (y_input >= 0))[0] + + p_median = np.nanmedian(profile_order[y_valid_idx]) if y_valid_idx.size < y_input.size else profile_median + + total_rejection = p_h//2 + for i in range(total_rejection): + p_non_outlier = np.where(t_mask[y_valid_idx, i_x] != 0)[0] + + if p_non_outlier.size == 0: + break + + eff_y = y_valid_idx[p_non_outlier] + c_flux = s_data[eff_y, i_x] c_median = np.nanmedian(c_flux) - if profile_median == 0: - profile_median = 1 - scaled_profile[y_valid_idx, i_x] = profile_order[y_valid_idx] * (c_median/profile_median) - c_residuals = c_flux - scaled_profile[y_valid_idx, i_x] + scaled_profile[:, i_x] = profile_order * (c_median/p_median) + c_residuals = c_flux - scaled_profile[eff_y, i_x] c_std = mad_std(c_residuals) if c_std == 0.0: c_std = mad_std(np.unique(c_residuals)) - c_deviation = np.absolute(c_residuals/c_std) if c_std != 0.0 else np.ones_like(c_residuals) - t_mask[y_valid_idx, i_x] = np.where((c_deviation < outlier_sigma), 1, 0) + if c_std == 0.0: + break - # for testing purpose - if is_debug: - print('i_x: ', i_x, ' c_median: ', c_median, ' c_deviation max: ', np.max(c_deviation), - ' reject no:', np.where(t_mask[:, i_x] == 0)[0].size) + c_deviation = np.absolute(c_residuals/c_std) + outside_p = (np.min(scaled_profile) > s_data[eff_y, i_x]) | (s_data[eff_y, i_x] > np.max(scaled_profile)) + outlier_idx = np.where((c_deviation >= outlier_sigma) & outside_p)[0] + if outlier_idx.size > 0: + max_idx = outlier_idx[np.argmax(c_deviation[outlier_idx])] + t_mask[eff_y[max_idx], i_x] = 0 + # t_mask[eff_y[outlier_idx[0]], i_x] = 0 + else: + break # converge + #if is_debug: + # print("p_x: ", p_x, "total iterate: ", i, ' out of ', p_h) + if np.where(t_mask == 0)[0].size == 0: + t_mask = None else: - with np.errstate(divide='ignore', invalid='ignore'): - relative_values = np.true_divide(s_data, f_data) - to_zero = (relative_values == np.inf) - relative_values[to_zero] = 0.0 - t_mask[to_zero] = 0 - to_nan_zero = np.where(np.isnan(relative_values)) - t_mask[to_nan_zero] = 0 - relative_values[to_nan_zero] = 0.0 - - total_iterate = 3 - max_rejected = 10 # max outlier allowed each iteration - - for t in range(total_iterate): - valid_pixel = np.where(t_mask != 0) # doing analysis on non-outlier pixels - c_median = np.nanmedian(relative_values[valid_pixel]) - c_std = mad_std(relative_values[valid_pixel], ignore_nan=True) - p_threshold = [c_median + outlier_sigma * c_std, c_median - outlier_sigma * c_std] - to_zero_idx = np.where(((relative_values > p_threshold[0]) | (relative_values < p_threshold[1])) & - (t_mask != 0)) - - # no more outlier, stop the iteration - if to_zero_idx[0].size <= 0: + if is_debug: + print("outlier at ", np.where(t_mask == 0)[1]+input_x[0]) + + return t_mask, scaled_profile + + @staticmethod + def outlier_rejection_local_profile(s_data, f_data, input_x, outlier_sigma, y_mid, input_widths, input_y_dim, is_debug=False): + """ + + Args: + s_data (numpy.ndarray): spectrum pixels of one order trace (starightened trace) + f_data (numpy.ndarray): flat pixels of one order trace (straightened trace) + input_x (numpy.ndarray): x positions that cover the trace in input domain + outlier_sigma (float): thresold for the deviation to find the outlier. + y_mid (numpy.ndarray): y location of the trace in input domain + input_widths (numpy.ndarray): from lower to upper width in input domain + input_y_dim (int): height of original spectrum + is_debug (bool): prints out the analysis result if it is True. + + Returns: + np.ndarray: t_mask in which each pixel indicates if the trace pixel is an outlier(0) or not (1). + + """ + + t_mask = np.ones_like(s_data, dtype=int) + p_h, p_w = np.shape(s_data) + + scaled_profile = np.zeros_like(f_data) + + for i_x, p_x in enumerate(input_x): + y_input = np.floor(input_widths + y_mid[i_x]).astype(int) + y_valid_idx = np.where((y_input <= (input_y_dim - 1)) & (y_input >= 0))[0] + + profile_order = f_data[:, i_x] + profile_median = np.nanmedian(profile_order[y_valid_idx]) + total_rejection = y_valid_idx.size//2 + i = 0 + while i < total_rejection: + p_non_outlier = np.where(t_mask[y_valid_idx, i_x] != 0)[0] + + if p_non_outlier.size == 0: break - to_reject = max_rejected if to_zero_idx[0].size > max_rejected else to_zero_idx[0].size + eff_y = y_valid_idx[p_non_outlier] + c_flux = s_data[eff_y, i_x] + c_median = np.nanmedian(c_flux) - if c_median == 0.0 and c_std == 0.0: # for some reason, we got zero median and std on the pixels ?? + scaled_profile[:, i_x] = profile_order * (c_median/profile_median) + c_residuals = c_flux - scaled_profile[eff_y, i_x] + c_std = mad_std(c_residuals) + if c_std == 0.0: + c_std = mad_std(np.unique(c_residuals)) + if c_std == 0.0: break - t_mask[to_zero_idx[0][0:to_reject], to_zero_idx[1][0:to_reject]] = 0 - relative_values[to_zero_idx[0][0:to_reject], to_zero_idx[1][0:to_reject]] = 0 - scaled_profile = relative_values + c_deviation = np.absolute(c_residuals/c_std) + outside_p = (np.min(scaled_profile) > s_data[eff_y, i_x]) | (s_data[eff_y, i_x] > np.max(scaled_profile)) + outlier_idx = np.where((c_deviation >= outlier_sigma) & outside_p)[0] + if outlier_idx.size > 0: + max_idx = outlier_idx[np.argmax(c_deviation[outlier_idx])] # max_idx: within area of eff_y + t_mask[eff_y[max_idx], i_x] = 0 + i += 1 + else: + break # converge + if is_debug: + print("p_x: ", p_x, "total iterate: ", i, ' out of ', p_h) if np.where(t_mask == 0)[0].size == 0: t_mask = None - + else: + if is_debug: + print("outlier at ", np.where(t_mask == 0)[1] + input_x[0]) return t_mask, scaled_profile + def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): """ Collect and extract the spectral data along the order per polynomial fit data and no rectification. @@ -741,6 +783,7 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): raw_group = list() rectified_group = list() + for dt in data_group: if dt['is_raw_data']: raw_group.append(dt) @@ -770,8 +813,9 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): is_debug = False mask_height = np.shape(out_data)[1] - #if order_idx == 0: - # is_debug = True + if self.order_name == 'GREEN_SKY_FLUX': + if order_idx == 134: + is_debug = True s_data, f_data, is_sdata_raw = self.data_extraction_for_optimal_extraction(data_group, y_mid, y_output_mid, input_widths, y_output_widths, input_x, x_output_step, mask_height, is_debug) @@ -780,29 +824,16 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): if self.do_outlier_rejection and self.extraction_method == SpectralExtractionAlg.OPTIMAL and \ self.rectification_method == SpectralExtractionAlg.NoRECT and \ s_data is not None and f_data is not None: - trace_mask, s_data_outlier = self.outlier_rejection_for_optimal_extraction(s_data, f_data, + t_mask, s_data_outlier = self.outlier_rejection_for_optimal_extraction(s_data, f_data, input_x, x_output_step, is_sdata_raw, input_widths, y_output_widths, y_mid, y_output_mid, do_column=True, is_debug=is_debug) else: - trace_mask = None s_data_outlier = None + t_mask = None - - total_outlier = 0 for s_x, o_x in enumerate(x_output_step): # ox: 0...x_dim-1, out_data: 0...x_dim-1, corners: 0... - """ - x_i = input_x[s_x] - y_input = np.floor(input_widths + y_mid[s_x]).astype(int) - y_input_idx = np.where((y_input <= (input_y_dim - 1)) & (y_input >= 0))[0] - - out_data.fill(0.0) - for dt in raw_group: - out_data[dt['idx']][y_input_idx, 0] = dt['data'][y_input[y_input_idx], x_i] - for dt in rectified_group: - out_data[dt['idx']][y_input_idx, 0] = dt['data'][y_output_widths[y_input_idx] + y_output_mid, o_x] - """ if f_data is not None: out_data[self.FDATA][:] = f_data[:, s_x][:, np.newaxis] @@ -810,13 +841,11 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): out_data[self.SDATA][:] = s_data[:, s_x][:, np.newaxis] \ if s_data_outlier is None else s_data_outlier[:, s_x][:, np.newaxis] - extracted_result = self.extraction_handler(out_data, y_size, data_group) + extracted_result = self.extraction_handler(out_data, y_size, data_group, + t_mask[:, s_x][:, np.newaxis] if t_mask is not None else None) extracted_data[:, o_x:o_x+1] = extracted_result['extraction'] - if trace_mask is not None and is_debug: - total_outlier = np.where(trace_mask[0:y_size, :] == 0)[0].size - self.d_print('total outlier :', total_outlier, ' out of ', x_output_step.size*input_widths.size, ' pixels: ', - total_outlier/(x_output_step.size*input_widths.size)) + # out data starting from origin [0, 0] contains the reduced flux associated with the data range result_data = {'y_center': y_output_mid, 'widths': [lower_width, upper_width], @@ -1288,7 +1317,7 @@ def get_flux_from_order(self, data_group, order_idx, s_rate=1): 'out_y_center': flux_results.get('y_center')} @staticmethod - def optimal_extraction(s_data, f_data, data_height, data_width): + def optimal_extraction(s_data, f_data, data_height, data_width, t_mask=None): """ Do optimal extraction on collected pixels along the order. This optimal extraction method does the calculation based on the variance of the spectrum data and the @@ -1325,15 +1354,19 @@ def optimal_extraction(s_data, f_data, data_height, data_width): # taking weighted summation on spectral data of each column, # the weight is based on flat data. # formula: sum((f/sum(f)) * s/variance)/sum((f/sum(f))^2)/variance) ref. Horne 1986 + if t_mask is not None: + pixel_mask = t_mask + else: + pixel_mask = np.ones_like(f_data) d_var = np.full((data_height, 1), 1.0) # set the variance to be 1.0 - w_sum = np.sum(f_data, axis=0) + w_sum = np.sum(f_data[0:data_height, :], axis=0) nz_idx = np.where(w_sum != 0.0)[0] if nz_idx.size > 0: - p_data = f_data[:, nz_idx]/w_sum[nz_idx] - num = p_data * s_data[:, nz_idx]/d_var - dem = np.power(p_data, 2)/d_var - w_data[0, nz_idx] = np.sum(num, axis=0)/np.sum(dem, axis=0) + p_data = f_data[0:data_height, nz_idx]/w_sum[nz_idx] + num = pixel_mask[0:data_height, nz_idx] * p_data * s_data[0:data_height, nz_idx]/d_var + dem = pixel_mask[0:data_height, nz_idx] * np.power(p_data, 2)/d_var + w_data[0, nz_idx] = (np.sum(num, axis=0)/np.sum(dem, axis=0)) # for x in range(0, data_width): # w_sum = sum(f_data[:, x]) @@ -1570,7 +1603,7 @@ def polygon_clipping2(self, borders, clipper_points, clipper_size): # print(b) new_corners = self.remove_duplicate_point2(new_poly_borders) - # print('\n new corners: ', new_corners) + return np.array(new_corners) @staticmethod @@ -2094,7 +2127,7 @@ def extract_spectrum(self, } """ - + self.order_name = order_name self.add_file_logger(print_debug) self.enable_time_profile(show_time) if self.spectrum_flux is not None: @@ -2165,11 +2198,8 @@ def extract_spectrum(self, ' xrange: ', self.get_order_xrange(c_order)) order_flux = self.get_flux_from_order(data_group, c_order) - # order_flux = self.get_flux_from_order(self.order_coeffs[c_order], self.get_order_edges(c_order), - # self.get_order_xrange(c_order), data_group, - # c_order) - # prepare out_data to contain rectification result + # prepare out_data to contain rectification result order_result[c_order] = order_flux t_start = self.time_check(t_start, '**** time [' + str(c_order) + ']: ') diff --git a/modules/spectral_extraction/src/spectral_extraction.py b/modules/spectral_extraction/src/spectral_extraction.py index 0314b6aee..f380e6e8c 100644 --- a/modules/spectral_extraction/src/spectral_extraction.py +++ b/modules/spectral_extraction/src/spectral_extraction.py @@ -232,9 +232,9 @@ def __init__(self, else: self.outlier_lev0 = KPF0() if self.outlier_lev0[self.data_ext].size == 0: - self.outlier_lev0[self.data_ext] = np.empty_like(self.spec_flux) + self.outlier_lev0[self.data_ext] = np.zeros_like(self.spec_flux) - self.outlier_lev0[self.data_ext][:] = self.spec_flux + # self.outlier_lev0[self.data_ext][:] = self.spec_flux outlier_flux = self.outlier_lev0[self.data_ext] \ if self.outlier_lev0 is not None and hasattr(self.outlier_lev0, data_ext) else None diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index e8eecdbcd..1a33e8e81 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -422,8 +422,7 @@ if do_order_trace: # lev1_list = [] -outlier_path = None - +outlier_mask_path = config.ARGUMENT.outlier_mask_path if do_spectral_extraction or do_hk or do_bc: lev0_flat_rect = None trace_list = [] @@ -511,10 +510,10 @@ if do_spectral_extraction or do_hk or do_bc: lev0_data = ImageProcessing(lev0_data, order_mask, ccd_list, data_type, None) op_data = None - if outlier_path == None: - outlier_rejection_file = '' + if outlier_mask_path == None: + outlier_mask_file = '' else: - outlier_rejection_file = outlier_path + short_lev0_file + outlier_mask_file = outlier_mask_path + short_lev0_file for idx in ccd_idx: ccd = ccd_list[idx] @@ -532,7 +531,7 @@ if do_spectral_extraction or do_hk or do_bc: rectification_method=rect_method, extraction_method=extract_method, clip_file=clip_file, data_extension=ccd, trace_file=trace_file, flat_extension=ccd+ccd_stack, - do_outlier_rejection=do_outlier, outlier_file=outlier_rejection_file) + do_outlier_rejection=do_outlier, outlier_file=outlier_mask_file) if op_data != None: From 49c11fbe418aa7fbdc35f86820af50e32d0d0e57 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 19 Sep 2023 11:15:50 -0700 Subject: [PATCH 52/86] revert the setting of do_l0_to_2d in kpf_drp.cfg --- configs/kpf_drp.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 2a721b561..f916faa2d 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -116,7 +116,7 @@ hk_trace_path = masters/kpfMaster_HKOrderBounds20220909.csv hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] # for module process: -do_l0_to_2d = False +do_l0_to_2d = True do_order_trace = False do_spectral_extraction = True do_rv = True From 9026302987b210ed31492e209d9151f8c92b78db Mon Sep 17 00:00:00 2001 From: Aaron Householder Date: Tue, 19 Sep 2023 16:21:22 -0400 Subject: [PATCH 53/86] Update to include ThAr --- modules/wavelength_cal/configs/KPF.cfg | 2 +- modules/wavelength_cal/src/alg.py | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/wavelength_cal/configs/KPF.cfg b/modules/wavelength_cal/configs/KPF.cfg index 633726de3..fe33d801d 100644 --- a/modules/wavelength_cal/configs/KPF.cfg +++ b/modules/wavelength_cal/configs/KPF.cfg @@ -16,7 +16,7 @@ clip_below_median = 1 peak_height_threshold = 0.3 sigma_clip = 2.5 fit_iterations = 5 -chi_2_threshold = 1 +chi_2_threshold = 2 # width in pixels of the fitting window for Gauss fits to emission lines fit_width = 25 diff --git a/modules/wavelength_cal/src/alg.py b/modules/wavelength_cal/src/alg.py index d64a9df3d..f1fbe3ddd 100644 --- a/modules/wavelength_cal/src/alg.py +++ b/modules/wavelength_cal/src/alg.py @@ -65,7 +65,6 @@ def __init__( self.peak_height_threshold = configpull.get_config_value('peak_height_threshold',1.5) self.sigma_clip = configpull.get_config_value('sigma_clip',2.1) self.fit_iterations = configpull.get_config_value('fit_iterations',5) - self.fit_width = configpull.get_config_value('fit_width', 10) self.logger = logger def run_wavelength_cal( @@ -603,8 +602,7 @@ def find_peaks(self, order_flux, peak_height_threshold=1.5): # fit peaks with Gaussian to get accurate position fitted_peaks = detected_peaks.astype(float) gauss_coeffs = np.empty((4, len(detected_peaks))) - # width = np.mean(np.diff(detected_peaks)) // 2 - width = self.fit_width // 2 + width = np.mean(np.diff(detected_peaks)) // 2 # Create mask initially set to True for all detected peaks mask = np.ones(len(detected_peaks), dtype=bool) @@ -848,6 +846,7 @@ def line_match( """ num_input_lines = len(linelist) num_pixels = len(flux) + successful_fits = [] missed_lines = 0 coefs = np.zeros((4,num_input_lines)) @@ -868,11 +867,17 @@ def line_match( last_fit_pixel = peak_pixel + gaussian_fit_width # fit gaussian to matched peak location - coefs[:,i] = self.fit_gaussian( + result= self.fit_gaussian( np.arange(first_fit_pixel,last_fit_pixel), flux[first_fit_pixel:last_fit_pixel] ) + if result is not None: + coefs[:, i] = result + successful_fits.append(i) # Append index of successful fit + else: + missed_lines += 1 + amp = coefs[0,i] if amp < 0: missed_lines += 1 @@ -881,6 +886,8 @@ def line_match( coefs[:,i] = np.nan missed_lines += 1 + linelist = linelist[successful_fits] + coefs = coefs[:, successful_fits] linelist = linelist[np.isfinite(coefs[0,:])] coefs = coefs[:, np.isfinite(coefs[0,:])] @@ -1208,8 +1215,7 @@ def fit_gaussian(self, x, y): np.warnings.simplefilter("ignore") popt, _ = curve_fit(self.integrate_gaussian, x, y, p0=p0, maxfev=1000000) - if self.cal_type == 'LFC': - + if self.cal_type == 'LFC' or 'ThAr': # Quality Checks for Gaussian Fits chi_squared_threshold = int(self.chi_2_threshold) From 1c36700222ab07b0018354d28d6868738896f980 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Wed, 20 Sep 2023 08:46:13 -0700 Subject: [PATCH 54/86] added HK image plot for 2D --- configs/quicklook_match.cfg | 2 +- modules/Utils/analyze_hk.py | 8 +++++--- modules/quicklook/src/alg.py | 23 +++++++++++++++++++++-- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index 6a7415009..1aaec1a66 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,7 +7,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/L1/202?????/KP.20230701.36172.51*.fits' +fullpath = '/data/??/202?????/KP.20230917.31745.41*.fits' #fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] diff --git a/modules/Utils/analyze_hk.py b/modules/Utils/analyze_hk.py index ae99b79c3..fa6732429 100644 --- a/modules/Utils/analyze_hk.py +++ b/modules/Utils/analyze_hk.py @@ -64,7 +64,7 @@ def __init__(self, L0, trace_file=None, offset=-1, wavesoln_file=None, logger=No self.wave_lib *= 1 - self.rv_shift/3e5 # Doppler shift wavelength solution - def plot_HK_image_2D(self, fig_path=None, show_plot=False): + def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): """ Generate a 2D image of the Ca H& K spectrum. @@ -84,7 +84,9 @@ def plot_HK_image_2D(self, fig_path=None, show_plot=False): vmax = np.nanpercentile(self.image,99.5), interpolation = 'None', origin = 'lower', + cmap='viridis', aspect='auto') + ax.grid(False) if self.trace_file != None: for i in self.trace_location_sci.keys(): @@ -121,12 +123,12 @@ def plot_HK_image_2D(self, fig_path=None, show_plot=False): facecolor='none', label = 'Sky')) fig.colorbar(im, orientation='vertical',label = 'Counts (ADU) - Saturation at '+str(64232)) - ax.set_title('Ca H&K CCD: ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) + ax.set_title('Ca H&K CCD: ' + str(self.ObsID) + ' (' + str(kpftype)+ ') - ' + self.name, fontsize=18) ax.set_xlabel('Column (pixel number)', fontsize=18, labelpad=10) ax.set_ylabel('Row (pixel number)', fontsize=18, labelpad=10) ax.xaxis.set_tick_params(labelsize=14) ax.yaxis.set_tick_params(labelsize=14) - plt.legend() + plt.legend(facecolor='lightgray') # Display the plot if fig_path != None: diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index 8c8327872..46bf5c9e2 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -150,13 +150,13 @@ def qlp_L0(self, kpf0, output_dir): savedir = L0_QLP_file_base +'HK/' os.makedirs(savedir, exist_ok=True) # make directories if needed - # Exposure Meter spectrum plot + # Exposure Meter image plot trace_file = self.config['CaHK']['trace_file'] wavesoln_file = self.config['CaHK']['cahk_wav'] myHK = AnalyzeHK(kpf0, trace_file = trace_file, wavesoln_file = wavesoln_file, logger=self.logger) - filename = savedir + self.ObsID + '_HK_image_zoomable.png' + filename = savedir + self.ObsID + '_HK_image_L0_zoomable.png' self.logger.info('Generating QLP image ' + filename) myHK.plot_HK_image_2D(fig_path=filename, show_plot=False) @@ -242,6 +242,25 @@ def qlp_2D(self, kpf2d, output_dir): except Exception as e: self.logger.error(f"Failure in Guider quicklook pipeline: {e}\n{traceback.format_exc()}") + # Make CaHK plots + if 'HK' in self.data_products: + try: + savedir = D2_QLP_file_base +'HK/' + os.makedirs(savedir, exist_ok=True) # make directories if needed + + # Exposure Meter spectrum plot + trace_file = self.config['CaHK']['trace_file'] + wavesoln_file = self.config['CaHK']['cahk_wav'] + myHK = AnalyzeHK(kpf2d, trace_file = trace_file, + wavesoln_file = wavesoln_file, + logger=self.logger) + filename = savedir + self.ObsID + '_HK_image_2D_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myHK.plot_HK_image_2D(fig_path=filename, kpftype='2D', show_plot=False) + + except Exception as e: + self.logger.error(f"Failure in CaHK quicklook pipeline: {e}\n{traceback.format_exc()}") + # Make 2D images # to-do: process bias and dark differently if chips != []: From 0f641b8a4100c49336384dff07ef60f1a3f67026 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Wed, 20 Sep 2023 16:05:35 -0700 Subject: [PATCH 55/86] check if date-beg-corr time is less than date-end-corr time in expmeter_sci table prior to the computation of midpoint photon arrival. --- configs/kpf_drp_local.cfg | 2 ++ configs/kpf_masters_l1.cfg | 1 + modules/radial_velocity/src/midpoint_photon_arrival.py | 10 +++++++--- modules/spectral_extraction/src/alg.py | 10 +++------- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/configs/kpf_drp_local.cfg b/configs/kpf_drp_local.cfg index 93a3dbb98..14528e429 100644 --- a/configs/kpf_drp_local.cfg +++ b/configs/kpf_drp_local.cfg @@ -122,6 +122,8 @@ do_wavecopy_in_sp = True do_qlp = False do_bk_subtraction = True do_bc = True +do_outlier_rejection = False +outlier_mask_path = None # for L0->2D process [WATCHFOR_L0] diff --git a/configs/kpf_masters_l1.cfg b/configs/kpf_masters_l1.cfg index 98e236f07..c521ef058 100644 --- a/configs/kpf_masters_l1.cfg +++ b/configs/kpf_masters_l1.cfg @@ -30,6 +30,7 @@ do_wavecopy_in_sp = True do_qlp = False do_bk_subtraction = True do_bc = True +outlier_mask_path = None # the subdirectory containing order trace result, L1 data, L2 data, L2 reweighted data, qlp, bary data output_trace = / diff --git a/modules/radial_velocity/src/midpoint_photon_arrival.py b/modules/radial_velocity/src/midpoint_photon_arrival.py index 25842a821..1e7a2dd4d 100644 --- a/modules/radial_velocity/src/midpoint_photon_arrival.py +++ b/modules/radial_velocity/src/midpoint_photon_arrival.py @@ -320,11 +320,11 @@ def midPoint(self,start,finish,exposures,expBeg,expEnd,clip=True): ###Find the exposure containing the middle number of photons dummyVarBeg=np.ones(len(expBeg)) dummyVarEnd=np.ones(len(expEnd)) - + for i in range(len(expEnd)): dummyVarBeg[i]=self.CalcCDF(expBeg[i],exposures,expBeg,expEnd) dummyVarEnd[i]=self.CalcCDF(expEnd[i],exposures,expBeg,expEnd) - + indxHigh=np.min(np.where(dummyVarEnd>=midVal)) indxLow=np.max(np.where(dummyVarBeg<=midVal)) @@ -383,6 +383,10 @@ def orderedMidPoint(self): date_beg = np.array(self.df_EM[startkey], dtype=np.datetime64) date_end = np.array(self.df_EM[endkey], dtype=np.datetime64) + valid_idx = np.where(date_beg < date_end)[0] + date_beg = date_beg[valid_idx] + date_end = date_end[valid_idx] + # Bin wavelengths into orders df_EM_bin=self.binOrder(df_EM_norm,self.segmentMin,self.segmentMax) wav_str = df_EM_bin.columns @@ -391,7 +395,7 @@ def orderedMidPoint(self): midBand = np.empty(len(wav_str), dtype='datetime64[ms]') for i in range(len(wav_str)): midBand[i]=(self.midPoint(self.start,self.finish,df_EM_bin[wav_str[i]].astype(float),date_beg,date_end,self.clip)) - + self.orderMid=df_EM_bin.columns.astype(float) self.midPhoto=midBand diff --git a/modules/spectral_extraction/src/alg.py b/modules/spectral_extraction/src/alg.py index 497363cc9..b7e2dde3e 100644 --- a/modules/spectral_extraction/src/alg.py +++ b/modules/spectral_extraction/src/alg.py @@ -671,8 +671,6 @@ def outlier_rejection_fix_profile(s_data, f_data, input_x, outlier_sigma, y_mid, # t_mask[eff_y[outlier_idx[0]], i_x] = 0 else: break # converge - #if is_debug: - # print("p_x: ", p_x, "total iterate: ", i, ' out of ', p_h) if np.where(t_mask == 0)[0].size == 0: t_mask = None @@ -741,8 +739,6 @@ def outlier_rejection_local_profile(s_data, f_data, input_x, outlier_sigma, y_mi i += 1 else: break # converge - if is_debug: - print("p_x: ", p_x, "total iterate: ", i, ' out of ', p_h) if np.where(t_mask == 0)[0].size == 0: t_mask = None @@ -813,9 +809,9 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): is_debug = False mask_height = np.shape(out_data)[1] - if self.order_name == 'GREEN_SKY_FLUX': - if order_idx == 134: - is_debug = True + # if self.order_name == 'GREEN_SKY_FLUX': + # if order_idx == 134: + # is_debug = True s_data, f_data, is_sdata_raw = self.data_extraction_for_optimal_extraction(data_group, y_mid, y_output_mid, input_widths, y_output_widths, input_x, x_output_step, mask_height, is_debug) From aee5ef326f6a1635b3b0f6a2850dd724fca1d220 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Thu, 21 Sep 2023 10:05:07 -0700 Subject: [PATCH 56/86] cast amps to int --- modules/Utils/amplifier_mask.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/Utils/amplifier_mask.py b/modules/Utils/amplifier_mask.py index 6515ff93b..3ea781e13 100644 --- a/modules/Utils/amplifier_mask.py +++ b/modules/Utils/amplifier_mask.py @@ -23,8 +23,8 @@ def __init__(self, action, context): #Input arguments self.input_l1 = self.action.args[0] self.header = self.input_l1.header['PRIMARY'] - self.namps = {'GREEN': self.header['GRNAMPS'], - 'RED': self.header['REDAMPS']} + self.namps = {'GREEN': int(self.header['GRNAMPS']), + 'RED': int(self.header['REDAMPS'])} # self.chips = ['RED', 'GREEN'] self.chips = ['GREEN',] From e8ca1ca58b262f7aedcdf0bc83e904f0d2b82727 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Fri, 22 Sep 2023 08:57:42 -0700 Subject: [PATCH 57/86] Exctract SCI instead of SKY for HK; plot updates --- modules/Utils/analyze_hk.py | 62 +++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/modules/Utils/analyze_hk.py b/modules/Utils/analyze_hk.py index fa6732429..c5aa59a40 100644 --- a/modules/Utils/analyze_hk.py +++ b/modules/Utils/analyze_hk.py @@ -80,8 +80,8 @@ def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): """ fig, ax = plt.subplots(figsize = (12,5),tight_layout=True) im = ax.imshow(self.image, - vmin = np.nanpercentile(self.image,1), - vmax = np.nanpercentile(self.image,99.5), + vmin = np.nanpercentile(self.image,0.1), + vmax = np.nanpercentile(self.image,99.9), interpolation = 'None', origin = 'lower', cmap='viridis', @@ -142,42 +142,52 @@ def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): def plot_HK_spectrum_1D(self, fig_path=None, show_plot=False): orders = np.array(self.wave_lib.columns) - padding = 200 + padding = 1 plt.figure(figsize=(12,5),tight_layout=True) - color_grid = ['purple','blue','green','yellow','orange','red'] - chk_bandpass = [384.0, 401.7] + fig, ax = plt.subplots(1, 1, figsize=(12,5)) + + color_grid = ['purple','darkblue','darkgreen','gold','darkorange','darkred'] + chk_bandpass = [383.0, 401.0]#[384.0, 401.7] caK = [393.2, 393.5] caH = [396.7, 397.0] Vcont = [389.9, 391.9] Rcont = [397.4, 399.4] - fig, ax = plt.subplots(1, 1, figsize=(12,5)) - ax.fill_between(chk_bandpass, y1=0, y2=1, facecolor='gray', alpha=0.3, zorder=-100) - ax.fill_between(caH, y1=0, y2=1, facecolor='m', alpha=0.3) - ax.fill_between(caK, y1=0, y2=1, facecolor='m', alpha=0.3) - ax.fill_between(Vcont, y1=0, y2=1, facecolor='c', alpha=0.3) - ax.fill_between(Rcont, y1=0, y2=1, facecolor='c', alpha=0.3) - - ax.text(np.mean(Vcont)-0.6,0.08,'V cont.') - ax.text(np.mean(Rcont)-0.6,0.08,'R cont.') - ax.text(np.mean(caK)-0.155,0.08,'K') - ax.text(np.mean(caH)-0.155,0.08,'H') - ax.set_xlim(384,400) - ax.set_ylim(0,1) - ax.plot([396.847,396.847],[0,1],':',color ='black') - ax.plot([393.366,393.366],[0,1],':',color ='black') - - # Add labels + # Compute and Plot spectra + specHK = np.zeros((len(orders), self.image.shape[1]), dtype=np.float64) for i in range(len(orders)): wav = self.wave_lib[i] - flux = np.sum(self.image[self.trace_location_sci[i]['x1']:self.trace_location_sci[i]['x2'],:],axis=0) - ax.plot(wav[padding:-padding],flux[padding:-padding]/np.nanpercentile(flux[padding:-padding],99.9),color = color_grid[i],linewidth = 0.5) + # This should be correct, but the sky and sci orders are flipped in the definition file + #flux = np.sum(self.image[self.trace_location_sci[i]['x1']:self.trace_location_sci[i]['x2'],:],axis=0) + # so for now use the sky orders + #flux = np.sum(self.image[self.trace_location_sky[i]['x1']:self.trace_location_sky[i]['x2'],:],axis=0) + specHK[i,:] = np.sum(self.image[self.trace_location_sky[i]['x1']:self.trace_location_sky[i]['x2'],:],axis=0) + ax.plot(wav[padding:-padding], + specHK[i,padding:-padding], + color = color_grid[i], + linewidth = 1.0) + ymax = 1.15*np.nanpercentile(specHK[:,padding:-padding],99.9) + ax.set_ylim(0, ymax) + ax.set_xlim(chk_bandpass[0], chk_bandpass[1]) + + # Add labels + plt.title('Ca H&K Spectrum: ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) ax.set_xlabel('Wavelength (nm)',fontsize=18) - ax.set_ylabel('Flux',fontsize=18) + ax.set_ylabel('Flux (ADU)',fontsize=18) ax.xaxis.set_tick_params(labelsize=14) ax.yaxis.set_tick_params(labelsize=14) - plt.title('Ca H&K Spectrum: ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) + ax.fill_between(chk_bandpass, y1=0, y2=ymax, facecolor='gray', alpha=0.3, zorder=-100) + ax.fill_between(caH, y1=0, y2=ymax, facecolor='m', alpha=0.3) + ax.fill_between(caK, y1=0, y2=ymax, facecolor='m', alpha=0.3) + ax.fill_between(Vcont, y1=0, y2=ymax, facecolor='c', alpha=0.3) + ax.fill_between(Rcont, y1=0, y2=ymax, facecolor='c', alpha=0.3) + ax.text(np.mean(Vcont)-0.89, 0.95*ymax, 'V continuum') + ax.text(np.mean(Rcont)-0.89, 0.95*ymax, 'R continuum') + ax.text(np.mean(caK)-0.40, 0.95*ymax, 'K') + ax.text(np.mean(caH)-0.40, 0.95*ymax, 'H') + ax.plot([396.847,396.847],[0,ymax],':',color ='black') + ax.plot([393.366,393.366],[0,ymax],':',color ='black') # Display the plot if fig_path != None: From 7cb7731662857a13673299024fe1209308051c58 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Fri, 22 Sep 2023 08:58:22 -0700 Subject: [PATCH 58/86] Compute HK spectrum from 2D not L0 --- configs/quicklook_match.cfg | 2 +- modules/quicklook/src/alg.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index 1aaec1a66..ec0a637b9 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,7 +7,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/??/202?????/KP.20230917.31745.41*.fits' +fullpath = '/data/2D/202?????/KP.20230914.31916.48*.fits' #fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index 46bf5c9e2..fa9a1adbc 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -159,11 +159,6 @@ def qlp_L0(self, kpf0, output_dir): filename = savedir + self.ObsID + '_HK_image_L0_zoomable.png' self.logger.info('Generating QLP image ' + filename) myHK.plot_HK_image_2D(fig_path=filename, show_plot=False) - - # Exposure Meter time series plot - filename = savedir + self.ObsID + '_HK_spectrum_zoomable.png' - self.logger.info('Generating QLP image ' + filename) - myHK.plot_HK_spectrum_1D(fig_path=filename, show_plot=False) except Exception as e: self.logger.error(f"Failure in CaHK quicklook pipeline: {e}\n{traceback.format_exc()}") @@ -258,6 +253,11 @@ def qlp_2D(self, kpf2d, output_dir): self.logger.info('Generating QLP image ' + filename) myHK.plot_HK_image_2D(fig_path=filename, kpftype='2D', show_plot=False) + # Exposure Meter spectrum plot + filename = savedir + self.ObsID + '_HK_spectrum_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myHK.plot_HK_spectrum_1D(fig_path=filename, show_plot=False) + except Exception as e: self.logger.error(f"Failure in CaHK quicklook pipeline: {e}\n{traceback.format_exc()}") From f1b6089493a789cee4fa274d87b82363aaec7432 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Fri, 22 Sep 2023 16:31:34 -0700 Subject: [PATCH 59/86] use 0920 order trace result for spectral extraction update CA HK boundary file update OrderMask compuatation for the case that some trace is not fully covered. --- configs/kpf_drp.cfg | 11 ++++--- configs/kpf_masters_l1.cfg | 9 +++--- modules/order_trace/src/alg_order_mask.py | 38 ++++++++++++++++++++++- modules/order_trace/src/order_mask.py | 7 ++++- modules/spectral_extraction/src/alg.py | 7 ++--- recipes/kpf_drp.recipe | 2 +- 6 files changed, 57 insertions(+), 17 deletions(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index f916faa2d..be7afdb3a 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -42,9 +42,10 @@ ccd_list = ['GREEN_CCD', 'RED_CCD'] # flat_file: flat file for order trace process. updated upon new flat file. # ccd_idx: index in ccd_list for the ccd to be processed in DRP recipe #flat_file = KP.20221107.04689.77 -flat_file = /data/reference_fits/kpf_20230730_master_flat.fits -order_trace_flat = /data/reference_fits/kpf_20230730_master_flat.fits -order_trace_files = [ '/data/reference_fits/kpf_20230730_master_flat_GREEN_CCD.csv', '/data/reference_fits/kpf_20230730_master_flat_RED_CCD.csv'] +#flat_file = /data/reference_fits/kpf_20230730_master_flat.fits +flat_file = /data/reference_fits/kpf_20230920_master_flat.fits +order_trace_flat = /data/reference_fits/kpf_20230920_master_flat.fits +order_trace_files = [ '/data/reference_fits/kpf_20230920_master_flat_GREEN_CCD.csv', '/data/reference_fits/kpf_20230920_master_flat_RED_CCD.csv'] fitting_poly_degree = 3 ccd_idx = [0, 1] @@ -60,7 +61,7 @@ ccd_idx = [0, 1] # - note: order_per_ccd, start_order, orderlet_names should (the outer list), orderlet_widths_ccds (the outer list) have size as that of ccd_list. # the inner lists of orderlet_names and orderlet_widths_ccds are with the same size. orders_per_ccd=[35,32] -start_order = [-1, -1] +start_order = [-1, 0] orderlet_names = [['GREEN_SKY_FLUX', 'GREEN_SCI_FLUX1', 'GREEN_SCI_FLUX2', 'GREEN_SCI_FLUX3', 'GREEN_CAL_FLUX'], ['RED_SKY_FLUX', 'RED_SCI_FLUX1', 'RED_SCI_FLUX2', 'RED_SCI_FLUX3', 'RED_CAL_FLUX']] #orderlet_widths_ccds = [[],[]] orderlet_widths_ccds = [[-1, -1, -1, 1, -1],[-1, -1, -1, -1, -1]] @@ -112,7 +113,7 @@ hk_extract_exts = ['CA_HK_SCI', 'CA_HK_SKY'] hk_wave_exts = ['CA_HK_SCI_WAVE', 'CA_HK_SKY_WAVE'] hk_dark_fits = masters/KP.20221029.21537.28.fits hk_bias_fits = None -hk_trace_path = masters/kpfMaster_HKOrderBounds20220909.csv +hk_trace_path = masters/kpfMaster_HKOrderBounds20230818.csv hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] # for module process: diff --git a/configs/kpf_masters_l1.cfg b/configs/kpf_masters_l1.cfg index c521ef058..a1ba6e2c9 100644 --- a/configs/kpf_masters_l1.cfg +++ b/configs/kpf_masters_l1.cfg @@ -59,8 +59,8 @@ ccd_list = ['GREEN_CCD', 'RED_CCD'] flat_file = *_master_flat.fits # order_trace_files = ['', ''] # flat_file = /data/masters/20230411/kpf_20230411_master_flat.fits -order_trace_flat = /data/reference_fits/kpf_20230730_master_flat.fits -order_trace_files = [ '/data/reference_fits/kpf_20230730_master_flat_GREEN_CCD.csv', '/data/reference_fits/kpf_20230730_master_flat_RED_CCD.csv'] +order_trace_flat = /data/reference_fits/kpf_20230920_master_flat.fits +order_trace_files = [ '/data/reference_fits/kpf_20230920_master_flat_GREEN_CCD.csv', '/data/reference_fits/kpf_20230920_master_flat_RED_CCD.csv'] fitting_poly_degree = 3 ccd_idx = [0, 1] @@ -74,7 +74,7 @@ ccd_idx = [0, 1] # ex: start_order = [-1, -1] means the traces of sky fiber of first order for both green and red are missing in the order trace result. # - note: order_per_ccd, start_order, orderlet_names should have size as that of ccd_list. orders_per_ccd=[35,32] -start_order = [-1, -1] +start_order = [-1, 0] orderlet_names = [['GREEN_SKY_FLUX', 'GREEN_SCI_FLUX1', 'GREEN_SCI_FLUX2', 'GREEN_SCI_FLUX3', 'GREEN_CAL_FLUX'], ['RED_SKY_FLUX', 'RED_SCI_FLUX1', 'RED_SCI_FLUX2', 'RED_SCI_FLUX3', 'RED_CAL_FLUX']] orderlet_widths_ccds = [[-1, -1, -1, 1, -1],[-1, -1, -1, -1, -1]] @@ -124,7 +124,8 @@ hk_extract_exts = ['CA_HK_SCI', 'CA_HK_SKY'] hk_wave_exts = ['CA_HK_SCI_WAVE', 'CA_HK_SKY_WAVE'] hk_dark_fits = masters/KP.20221029.21537.28.fits hk_bias_fits = None -hk_trace_path = masters/kpfMaster_HKOrderBounds20220909.csv +#hk_trace_path = masters/kpfMaster_HKOrderBounds20220909.csv +hk_trace_path = masters/kpfMaster_HKOrderBounds20230818.csv hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] # for L0->2D process diff --git a/modules/order_trace/src/alg_order_mask.py b/modules/order_trace/src/alg_order_mask.py index 1618e76c4..00fb9851b 100644 --- a/modules/order_trace/src/alg_order_mask.py +++ b/modules/order_trace/src/alg_order_mask.py @@ -25,6 +25,7 @@ class OrderMaskAlg(ModuleAlgBase): listed in the order_trace_data may not the first orderlet of the first order. A negative number like -n means the first n traces are not included in the order_trace_data. order_mask_data (numpy.ndarray): 2D array to contain the order mask data. Defaults to None. + full_coverage (int): if set order mask to cover full width for each order. Defaults to 0. config (configparser.ConfigParser, optional): config context. Defaults to None. logger (logging.Logger, optional): Instance of logging.Logger. Defaults to None. logger_name (str, optional): Name of the logger defined for the OrderMaskAlg instance. @@ -62,6 +63,7 @@ def __init__(self, spectrum_data, order_trace_data, order_trace_header, orderlet_names=None, start_order=0, order_mask_data=None, + full_coverage=0, config=None, logger=None, logger_name=None): @@ -130,6 +132,8 @@ def __init__(self, spectrum_data, order_trace_data, order_trace_header, else: self.order_mask_data = order_mask_data + self.full_coverage = full_coverage + def get_config_value(self, prop, default=''): """ Get defined value from the config file. @@ -221,8 +225,40 @@ def set_order_mask(self, order_idx, s_width=None, trace_value=1): # y_lower = np.floor(y_mid - y_widths[0]).astype(int) # y_upper = np.ceil(y_mid + y_widths[1]).astype(int) + #y_lower = np.where(y_lower <= 0, 0, y_lower) + #y_upper = np.where(y_upper >= self.dim_h, self.dim_h-1, y_upper) + + # xrange correction if full coverage is required. + if self.full_coverage: + y_mid_original = np.polyval(coeffs, np.arange(0, self.dim_w, dtype=int)) + y_lower_original = np.ceil(y_mid_original - y_widths[0]).astype(int) + y_upper_original = np.floor(y_mid_original + y_widths[1]).astype(int) + + new_x1 = xrange[0] + new_x2 = xrange[1] + if xrange[0] > 0: + for x in range(xrange[0]-1, -1, -1): + if (0 < y_upper_original[x] <= self.dim_h) or (0 <= y_lower_original[x] < self.dim_h): + new_x1 = x + else: + break + if new_x1 != xrange[0]: + xrange[0] = new_x1 + if xrange[1] < self.dim_w-1: + for x in range(xrange[1]+1, self.dim_w, 1): + if (0 < y_upper_original[x] <= self.dim_h) or (0 <= y_lower_original[x] < self.dim_h): + new_x2 = x + else: + break + if new_x2 != xrange[1]: + xrange[1] = new_x2 + + x_step = np.arange(new_x1, new_x2+1).astype(int) + y_lower = y_lower_original[new_x1:new_x2+1] + y_upper = y_upper_original[new_x1:new_x2+1] + y_lower = np.where(y_lower <= 0, 0, y_lower) - y_upper = np.where(y_upper >= self.dim_h, self.dim_h-1, y_upper) + y_upper = np.where(y_upper >= self.dim_h, self.dim_h, y_upper) x_img_step = x_step + x_o for idx, x in enumerate(x_img_step): diff --git a/modules/order_trace/src/order_mask.py b/modules/order_trace/src/order_mask.py index 2444d2453..a347a1321 100644 --- a/modules/order_trace/src/order_mask.py +++ b/modules/order_trace/src/order_mask.py @@ -34,6 +34,8 @@ trace pixels. - `action.args['non_orderlet_value']: (number, optional)`: Value for non orderlet pixels. Defaults to 0. + - `action.args['full_coverage']: (number, optional)`: Make order mask to cover entire image width + regardless of the horizontal coverage provided by order trace result. Defaults to 0. - `context (keckdrpframework.models.processing_context.ProcessingContext)`: `context.config_path` contains the path of the config file defined for the module of order trace in the master @@ -104,7 +106,8 @@ class OrderMask(KPF0_Primitive): 'poly_degree': 3, 'origin': [0, 0], 'orderlets_on_image': None, - 'orderlet_widths': None + 'orderlet_widths': None, + 'full_coverage': 0 } NORMAL = 0 @@ -159,6 +162,7 @@ def __init__(self, self.orderlet_values.append(self.orderlet_values[-1]) non_od_value = action.args['non_orderlet_value'] if 'non_orderlet_value' in args_keys else None + self.full_coverage = self.get_args_value("full_coverage", action.args, args_keys) # input configuration self.config = configparser.ConfigParser() @@ -196,6 +200,7 @@ def __init__(self, order_mask_data=non_od_value, orderlet_names=self.orderlet_names, start_order=start_order, + full_coverage = self.full_coverage, config=self.config, logger=self.logger) except Exception as e: self.alg = None diff --git a/modules/spectral_extraction/src/alg.py b/modules/spectral_extraction/src/alg.py index b7e2dde3e..2100cf345 100644 --- a/modules/spectral_extraction/src/alg.py +++ b/modules/spectral_extraction/src/alg.py @@ -522,7 +522,6 @@ def data_extraction_for_optimal_extraction(self, data_group, s_data = np.zeros((p_height, p_width), dtype=float) if s_dt is not None else None f_data = np.zeros((p_height, p_width), dtype=float) if f_dt is not None else None - for i_x, p_x in enumerate(input_x): y_input = np.floor(input_widths + y_mid[i_x]).astype(int) y_input_idx = np.where((y_input <= (input_y_dim - 1)) & (y_input >= 0))[0] @@ -805,12 +804,11 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): extracted_data = np.zeros((y_size if self.extraction_method == self.NOEXTRACT else 1, output_x_dim)) y_output_widths = np.arange(-lower_width, upper_width) # in parallel to input_widths - is_debug = False mask_height = np.shape(out_data)[1] - # if self.order_name == 'GREEN_SKY_FLUX': - # if order_idx == 134: + #if self.order_name == 'GREEN_SKY_FLUX': + # if order_idx == 4: # is_debug = True s_data, f_data, is_sdata_raw = self.data_extraction_for_optimal_extraction(data_group, y_mid, y_output_mid, input_widths, y_output_widths, @@ -836,7 +834,6 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): if s_data is not None: out_data[self.SDATA][:] = s_data[:, s_x][:, np.newaxis] \ if s_data_outlier is None else s_data_outlier[:, s_x][:, np.newaxis] - extracted_result = self.extraction_handler(out_data, y_size, data_group, t_mask[:, s_x][:, np.newaxis] if t_mask is not None else None) extracted_data[:, o_x:o_x+1] = extracted_result['extraction'] diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index 1a33e8e81..3ecd52bf7 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -504,7 +504,7 @@ if do_spectral_extraction or do_hk or do_bc: order_mask = OrderMask(lev0_data, order_mask, orderlet_names=orderlet_names[idx], start_order=start_order[idx], trace_file=trace_list[idx], data_extension=ccd_list[idx], - orderlet_widths=orderlet_widths) + orderlet_widths=orderlet_widths, full_coverage=1) # update L0 by background subtraction if order_mask != None: lev0_data = ImageProcessing(lev0_data, order_mask, ccd_list, data_type, None) From 2304c65430f51ef2caabc7427e3f4acf780e7b09 Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 25 Sep 2023 08:13:06 -0700 Subject: [PATCH 60/86] Ensure root owns product files in /data/kpf/masters/yyyymmdd and recipe log files are stored. --- cronjobs/kpfmasters_wls_auto.pl | 89 ++----------------- cronjobs/kpfmastersruncmd_l0.pl | 56 ++---------- cronjobs/kpfmastersruncmd_l1.pl | 57 ++---------- cronjobs/runDailyPipelines.sh | 10 +-- .../cronjobs/kpfmasters_register_in_db.pl | 35 +------- 5 files changed, 28 insertions(+), 219 deletions(-) diff --git a/cronjobs/kpfmasters_wls_auto.pl b/cronjobs/kpfmasters_wls_auto.pl index 2682f1392..b8d1ed7c3 100755 --- a/cronjobs/kpfmasters_wls_auto.pl +++ b/cronjobs/kpfmasters_wls_auto.pl @@ -19,7 +19,6 @@ use strict; use warnings; use File::Copy; -use File::Path qw/make_path/; select STDERR; $| = 1; select STDOUT; $| = 1; @@ -93,7 +92,7 @@ # Initialize fixed parameters and read command-line parameter. my $iam = 'kpfmasters_wls_auto.pl'; -my $version = '1.5'; +my $version = '1.6'; my $procdate = shift @ARGV; # YYYYMMDD command-line parameter. @@ -134,17 +133,21 @@ "make init\n" . "export PYTHONUNBUFFERED=1\n" . "git config --global --add safe.directory /code/KPF-Pipeline\n" . + "mkdir -p /data/masters/${procdate}\n" . + "cp -pr /masters/${procdate}/kpf_${procdate}*L1.fits /data/masters/${procdate}\n" . "kpf -r $recipe -c $config --date ${procdate}\n" . + "cp -p /data/masters/${procdate}/*master_WLS* /masters/${procdate}\n" . + "mkdir -p /masters/${procdate}/wlpixelfiles\n" . + "cp -p /data/masters/wlpixelfiles/*kpf_${procdate}* /masters/${procdate}/wlpixelfiles\n" . + "cp -p /code/KPF-Pipeline/pipeline_${procdate}.log /masters/${procdate}/pipeline_wls_auto_${procdate}.log\n" . + "rm /code/KPF-Pipeline/pipeline_${procdate}.log\n" . "exit\n"; my $makescriptcmd = "echo \"$script\" > $dockercmdscript"; `$makescriptcmd`; `chmod +x $dockercmdscript`; -`mkdir -p $sbxdir`; -`cp -pr ${mastersdir}/${procdate}/kpf_${procdate}*L1.fits $sbxdir`; - my $dockerruncmd = "docker run -d --name $containername " . - "-v ${codedir}:/code/KPF-Pipeline -v ${testdatadir}:/testdata -v $sandbox:/data " . + "-v ${codedir}:/code/KPF-Pipeline -v ${testdatadir}:/testdata -v $sandbox:/data -v ${mastersdir}:/masters " . "$containerimage bash ./$dockercmdscript"; print "Executing $dockerruncmd\n"; my $opdockerruncmd = `$dockerruncmd`; @@ -182,63 +185,6 @@ $icheckpoint++; -# Directory to store products should already exist because -# cronjob kpfmastersruncmd_l0.pl ran before. - -my $destdir = "${mastersdir}/$procdate"; - -if (! (-e $destdir)) { - print "*** Error: Product directory does not exist ($destdir): $!\n"; - exit(64); -} - -sleep(30); - -my $globfiles = "$sbxdir/*master_WLS*"; - -my @files = glob("$globfiles"); - -foreach my $file (@files) { - my $destfile = "$destdir/$file"; - if (! (-e $destfile)) { - if (! (copy($file, $destdir))) { - print "*** Warning: couldn't copy $file to $destdir ($!); " . - "skipping...\n"; - } else { - print "Copied $file to $destdir\n"; - } - } -} - - -# Make directory to store wlpixelfiles. - -my $destdir2 = "${mastersdir}/$procdate/wlpixelfiles"; - -if (! (-e $destdir2)) { - if (! make_path($destdir2)) { - die "*** Error: Could not make directory ($destdir2): $!\n"; - } else { - print "Made new directory $destdir2\n"; - } -} - -sleep(30); - -my $globfiles2 = "${sandbox}/masters/wlpixelfiles/*kpf_${procdate}*"; - -my @files2 = glob("$globfiles2"); - -foreach my $file (@files2) { - if (! (copy($file, $destdir2))) { - print "*** Warning: couldn't copy $file to $destdir2 ($!); " . - "skipping...\n"; - } else { - print "Copied $file to $destdir2\n"; - } -} - - # Log end time. $endscript = time(); @@ -249,21 +195,4 @@ print "Terminating normally...\n"; -# Copy log file from runtime directory to product directory. - -my ($logfileBase) = $iam =~ /(.+)\.pl/; - -my $logfile = $logdir . '/' . $logfileBase . '_' . $procdate . '.out'; - -if (-e $logfile) { - - if (! (copy($logfile, $destdir))) { - die "*** Warning: couldn't copy $logfile to $destdir ($!); " . - "quitting...\n"; - } else { - print "Copied $logfile to $destdir\n"; - } -} - - exit(0); diff --git a/cronjobs/kpfmastersruncmd_l0.pl b/cronjobs/kpfmastersruncmd_l0.pl index f2eec7667..bf51e96d3 100755 --- a/cronjobs/kpfmastersruncmd_l0.pl +++ b/cronjobs/kpfmastersruncmd_l0.pl @@ -25,7 +25,6 @@ use strict; use warnings; use File::Copy; -use File::Path qw/make_path/; select STDERR; $| = 1; select STDOUT; $| = 1; @@ -99,7 +98,7 @@ # Initialize fixed parameters and read command-line parameter. my $iam = 'kpfmastersruncmd_l0.pl'; -my $version = '1.6'; +my $version = '1.7'; my $procdate = shift @ARGV; # YYYYMMDD command-line parameter. @@ -147,6 +146,10 @@ "export PYTHONUNBUFFERED=1\n" . "git config --global --add safe.directory /code/KPF-Pipeline\n" . "kpf -r $recipe -c $config --date ${procdate}\n" . + "mkdir -p /masters/${procdate}\n" . + "cp -p /testdata/kpf_${procdate}* /masters/${procdate}\n" . + "chown root:root /masters/${procdate}/*\n" . + "cp -p /data/logs/${procdate}/pipeline_${procdate}.log /masters/${procdate}/pipeline_masters_drp_l0_${procdate}.log\n" . "exit\n"; my $makescriptcmd = "echo \"$script\" > $dockercmdscript"; `$makescriptcmd`; @@ -157,7 +160,7 @@ `cp -pr /data/kpf/L0/$procdate/*.fits $sandbox/L0/$procdate`; my $dockerruncmd = "docker run -d --name $containername " . - "-v ${codedir}:/code/KPF-Pipeline -v ${testdatadir}:/testdata -v $sandbox:/data " . + "-v ${codedir}:/code/KPF-Pipeline -v ${testdatadir}:/testdata -v $sandbox:/data -v ${mastersdir}:/masters " . "$containerimage bash ./$dockercmdscript"; print "Executing $dockerruncmd\n"; my $opdockerruncmd = `$dockerruncmd`; @@ -195,34 +198,6 @@ $icheckpoint++; -# Make directory to store products. - -my $destdir = "${mastersdir}/$procdate"; - -if (! (-e $destdir)) { - if (! make_path($destdir)) { - die "*** Error: Could not make directory ($destdir): $!\n"; - } else { - print "Made new directory $destdir\n"; - } -} - -sleep(30); - -my $globfiles = "${testdatadir}/kpf_${procdate}*"; - -my @files = glob("$globfiles"); - -foreach my $file (@files) { - if (! (copy($file, $destdir))) { - print "*** Warning: couldn't copy $file to $destdir ($!); " . - "skipping...\n"; - } else { - print "Copied $file to $destdir\n"; - } -} - - # Log end time. $endscript = time(); @@ -233,23 +208,4 @@ print "Terminating normally...\n"; -# Copy log file from runtime directory to product directory, assuming -# that the following convention for log-file naming is followed. - - -my ($logfileBase) = $iam =~ /(.+)\.pl/; - -my $logfile = $logdir . '/' . $logfileBase . '_' . $procdate . '.out'; - -if (-e $logfile) { - - if (! (copy($logfile, $destdir))) { - die "*** Warning: couldn't copy $logfile to $destdir ($!); " . - "quitting...\n"; - } else { - print "Copied $logfile to $destdir\n"; - } -} - - exit(0); diff --git a/cronjobs/kpfmastersruncmd_l1.pl b/cronjobs/kpfmastersruncmd_l1.pl index 5417a929c..a3dae9260 100755 --- a/cronjobs/kpfmastersruncmd_l1.pl +++ b/cronjobs/kpfmastersruncmd_l1.pl @@ -22,7 +22,6 @@ use strict; use warnings; use File::Copy; -use File::Path qw/make_path/; select STDERR; $| = 1; select STDOUT; $| = 1; @@ -113,7 +112,7 @@ # Initialize fixed parameters and read command-line parameter. my $iam = 'kpfmastersruncmd_l1.pl'; -my $version = '1.7'; +my $version = '1.8'; my $procdate = shift @ARGV; # YYYYMMDD command-line parameter. @@ -183,15 +182,16 @@ "export PYTHONUNBUFFERED=1\n" . "pip install psycopg2-binary\n" . "git config --global --add safe.directory /code/KPF-Pipeline\n" . + "mkdir -p /data/masters/${procdate}\n" . + "cp -pr /masters/${procdate}/kpf_${procdate}*.fits /data/masters/${procdate}\n" . "kpf -r $recipe -c $config --date ${procdate}\n" . + "cp -p /data/masters/${procdate}/* /masters/${procdate}\n" . + "cp -p /data/logs/${procdate}/pipeline_${procdate}.log /masters/${procdate}/pipeline_masters_drp_l1_${procdate}.log\n" . "exit\n"; my $makescriptcmd = "echo \"$script\" > $dockercmdscript"; `$makescriptcmd`; `chmod +x $dockercmdscript`; -`mkdir -p $sandbox/masters/$procdate`; -`cp -pr ${mastersdir}/${procdate}/kpf_${procdate}*.fits ${sandbox}/masters/$procdate`; - my $dockerruncmd = "docker run -d --name $containername " . "-v ${codedir}:/code/KPF-Pipeline -v ${testdatadir}:/testdata -v $sandbox:/data -v ${mastersdir}:/masters " . "--network=host -e DBPORT=$dbport -e DBNAME=$dbname -e DBUSER=$dbuser -e DBSERVER=127.0.0.1 " . @@ -232,35 +232,6 @@ $icheckpoint++; -# Directory to store products should already exist because -# cronjob kpfmastersruncmd_l0.pl ran before. - -my $destdir = "${mastersdir}/$procdate"; - -if (! (-e $destdir)) { - print "*** Error: Product directory does not exist ($destdir): $!\n"; - exit(64); -} - -sleep(30); - -my $globfiles = "${sandbox}/masters/$procdate/*"; - -my @files = glob("$globfiles"); - -foreach my $file (@files) { - my $destfile = "$destdir/$file"; - if (! (-e $destfile)) { - if (! (copy($file, $destdir))) { - print "*** Warning: couldn't copy $file to $destdir ($!); " . - "skipping...\n"; - } else { - print "Copied $file to $destdir\n"; - } - } -} - - # Log end time. $endscript = time(); @@ -271,22 +242,4 @@ print "Terminating normally...\n"; -# Copy log file from runtime directory to product directory, assuming -# that the following convention for log-file naming is followed. - -my ($logfileBase) = $iam =~ /(.+)\.pl/; - -my $logfile = $logdir . '/' . $logfileBase . '_' . $procdate . '.out'; - -if (-e $logfile) { - - if (! (copy($logfile, $destdir))) { - die "*** Warning: couldn't copy $logfile to $destdir ($!); " . - "quitting...\n"; - } else { - print "Copied $logfile to $destdir\n"; - } -} - - exit(0); diff --git a/cronjobs/runDailyPipelines.sh b/cronjobs/runDailyPipelines.sh index f0e90e054..b0e9fc534 100755 --- a/cronjobs/runDailyPipelines.sh +++ b/cronjobs/runDailyPipelines.sh @@ -2,14 +2,14 @@ procdate=$(date +\%Y\%m\%d) -printenv >& $KPFCRONJOB_LOGS/runDailyPipelines_$procdate.env +printenv >& $KPFCRONJOB_LOGS/jobs/runDailyPipelines_$procdate.env echo "Processing date: $procdate" -$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l0.pl $procdate >& $KPFCRONJOB_LOGS/kpfmastersruncmd_l0_$procdate.out +$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l0.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmastersruncmd_l0_$procdate.out echo Return value from kpfmastersruncmd_l0.pl = $? -$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l1.pl $procdate >& $KPFCRONJOB_LOGS/kpfmastersruncmd_l1_$procdate.out +$KPFCRONJOB_CODE/cronjobs/kpfmastersruncmd_l1.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmastersruncmd_l1_$procdate.out echo Return value from kpfmastersruncmd_l1.pl = $? -$KPFCRONJOB_CODE/cronjobs/kpfmasters_wls_auto.pl $procdate >& $KPFCRONJOB_LOGS/kpfmasters_wls_auto_$procdate.out +$KPFCRONJOB_CODE/cronjobs/kpfmasters_wls_auto.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmasters_wls_auto_$procdate.out echo Return value from kpfmasters_wls_auto.pl = $? -$KPFCRONJOB_CODE/database/cronjobs/kpfmasters_register_in_db.pl $procdate >& $KPFCRONJOB_LOGS/kpfmasters_register_in_db_$procdate.out +$KPFCRONJOB_CODE/database/cronjobs/kpfmasters_register_in_db.pl $procdate >& $KPFCRONJOB_LOGS/jobs/kpfmasters_register_in_db_$procdate.out echo Return value from kpfmasters_register_in_db.pl = $? diff --git a/database/cronjobs/kpfmasters_register_in_db.pl b/database/cronjobs/kpfmasters_register_in_db.pl index f5d10ce64..1f157108a 100755 --- a/database/cronjobs/kpfmasters_register_in_db.pl +++ b/database/cronjobs/kpfmasters_register_in_db.pl @@ -100,7 +100,7 @@ # Initialize fixed parameters and read command-line parameter. my $iam = 'kpfmasters_register_in_db.pl'; -my $version = '1.5'; +my $version = '1.6'; my $procdate = shift @ARGV; # YYYYMMDD command-line parameter. @@ -176,6 +176,8 @@ "pip install psycopg2-binary\n" . "git config --global --add safe.directory /code/KPF-Pipeline\n" . "python $pythonscript $procdate >& ${pylogfile}\n" . + "cp -p /code/KPF-Pipeline/${pylogfile} /masters/${procdate}\n" . + "rm /code/KPF-Pipeline/${pylogfile}\n" . "exit\n"; my $makescriptcmd = "echo \"$script\" > $dockercmdscript"; `$makescriptcmd`; @@ -231,35 +233,4 @@ print "Terminating normally...\n"; -# Copy the Python-script log file from runtime directory to product directory. - -if (-e $pylogfile) { - - if (! (copy($pylogfile, $destdir))) { - print "*** Warning: couldn't copy $pylogfile to $destdir ($!); " . - "skipping...\n"; - } else { - print "Copied $pylogfile to $destdir\n"; - } -} - - -# Copy the log file generated by this Perl script to product directory, assuming -# that the following convention for log-file naming is followed. - -my ($logfileBase) = $iam =~ /(.+)\.pl/; - -my $logfile = $logdir . '/' . $logfileBase . '_' . $procdate . '.out'; - -if (-e $logfile) { - - if (! (copy($logfile, $destdir))) { - die "*** Warning: couldn't copy $logfile to $destdir ($!); " . - "quitting...\n"; - } else { - print "Copied $logfile to $destdir\n"; - } -} - - exit(0); From cefda1ad7dc040c6017c0acf1359a52aed1b5ab6 Mon Sep 17 00:00:00 2001 From: DaiFei Date: Mon, 25 Sep 2023 10:46:16 -0700 Subject: [PATCH 61/86] new reference files --- modules/quicklook/configs/default.cfg | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/quicklook/configs/default.cfg b/modules/quicklook/configs/default.cfg index ef4c06fa8..c11ad9149 100644 --- a/modules/quicklook/configs/default.cfg +++ b/modules/quicklook/configs/default.cfg @@ -1,6 +1,6 @@ [2D] master_socal =/data/2D_take1/20220516/KP.20220516.00473.95.fits -master_flat =/data/masters/20230515/kpf_20230515_master_flat.fits +master_flat =/data/reference_fits/kpf_20230920_master_flat.fits master_bias =/data/masters/20230515/kpf_20230515_master_bias.fits master_dark =/data/masters/20230515/kpf_20230515_master_dark.fits master_ThAr =/data/masters/MasterThar_20221109_2D.fits @@ -30,7 +30,7 @@ CCF_RV_2=CCD2RV [L1] wave_soln = /data/masters/MasterLFCWLS.fits -order_trace = /code/KPF-Pipeline/static/kpf_20230418_order_trace_ +order_trace = /data/reference_fits/kpf_20230920_master_flat_ n_per_row = 8 @@ -44,7 +44,7 @@ input_prefix_l2 =/data/L2/ gain = 1.48424 [CaHK] -trace_file =/data/masters/kpfMaster_HKOrderBounds20220909.csv +trace_file =/data/masters/kpfMaster_HKOrderBounds20230818.csv cahk_wav=/data/masters/kpfMaster_HKwave20220909_sci.csv [Nightly] From b544de34b9d175fe150867a7a70c4b312edce3ff Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 26 Sep 2023 18:03:08 -0700 Subject: [PATCH 62/86] update ca hk wavelength solution files defined in kpf_drp.cfg --- configs/kpf_drp.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index e557b10e8..1e764765a 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -113,7 +113,7 @@ hk_wave_exts = ['CA_HK_SCI_WAVE', 'CA_HK_SKY_WAVE'] hk_dark_fits = masters/KP.20221029.21537.28.fits hk_bias_fits = None hk_trace_path = masters/kpfMaster_HKOrderBounds20230818.csv -hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] +hk_wavelength_path = ["masters/kpfMaster_HKwave20230818_sci.csv", "masters/kpfMaster_HKwave20230818_sky.csv"] # for module process: do_l0_to_2d = True From 36dd7dbf46228fe9978b0710ea6034f18088cc3d Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Wed, 27 Sep 2023 09:43:06 -0700 Subject: [PATCH 63/86] update hk_wavelength_path in kpf_masters_l1.cfg --- configs/kpf_masters_l1.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/configs/kpf_masters_l1.cfg b/configs/kpf_masters_l1.cfg index a1ba6e2c9..79b8e1ebd 100644 --- a/configs/kpf_masters_l1.cfg +++ b/configs/kpf_masters_l1.cfg @@ -126,7 +126,8 @@ hk_dark_fits = masters/KP.20221029.21537.28.fits hk_bias_fits = None #hk_trace_path = masters/kpfMaster_HKOrderBounds20220909.csv hk_trace_path = masters/kpfMaster_HKOrderBounds20230818.csv -hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] +#hk_wavelength_path = ["masters/kpfMaster_HKwave20220909_sci.csv", "masters/kpfMaster_HKwave20220909_sky.csv"] +hk_wavelength_path = ["masters/kpfMaster_HKwave20230818_sci.csv", "masters/kpfMaster_HKwave20230818_sky.csv"] # for L0->2D process [WATCHFOR_L0] From e2a238aa6d6756115feb46866864afb9424adc0d Mon Sep 17 00:00:00 2001 From: DaiFei Date: Wed, 27 Sep 2023 10:48:31 -0700 Subject: [PATCH 64/86] cahk --- modules/quicklook/configs/default.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/quicklook/configs/default.cfg b/modules/quicklook/configs/default.cfg index c11ad9149..3c76d83e0 100644 --- a/modules/quicklook/configs/default.cfg +++ b/modules/quicklook/configs/default.cfg @@ -45,7 +45,7 @@ gain = 1.48424 [CaHK] trace_file =/data/masters/kpfMaster_HKOrderBounds20230818.csv -cahk_wav=/data/masters/kpfMaster_HKwave20220909_sci.csv +cahk_wav=/data/masters/kpfMaster_HKwave20230818_sci.csv [Nightly] exposures_dir=/data/2D/ From 543ef2f031ca8b120ad537fdfa078484c1fd136c Mon Sep 17 00:00:00 2001 From: Russ Date: Mon, 2 Oct 2023 10:44:02 -0700 Subject: [PATCH 65/86] Fixed bugs in and made changes to method monotonic_check necessary for running under the framework. --- configs/quality_control_example.cfg | 9 ++- .../quality_control/src/quality_control.py | 63 ++++++++++++++----- .../src/quality_control_framework.py | 22 ++++--- 3 files changed, 68 insertions(+), 26 deletions(-) diff --git a/configs/quality_control_example.cfg b/configs/quality_control_example.cfg index 151091686..6859095db 100644 --- a/configs/quality_control_example.cfg +++ b/configs/quality_control_example.cfg @@ -9,9 +9,12 @@ log_directory = /data/logs/ [ARGUMENT] data_type = KPF # L0, 2D, L1, L2 are possible choices for data_level_str. -data_level_str = L0 -input_fits_filename = /data/L0/20230430/KP.20230430.05981.93.fits -output_fits_filename = /data/2D/20230430/KP.20230430.05981.93_2D.fits +#data_level_str = L0 +#input_fits_filename = /data/L0/20230927/KP.20230927.82330.61.fits +#output_fits_filename = /data/2D/20230927/KP.20230927.82330.61_2D.fits +data_level_str = L1 +input_fits_filename = /data/masters/20230927/kpf_20230927_master_flat_L1.fits +output_fits_filename = /data/masters/20230927/kpf_20230927_master_flat_L1_QC.fits [MODULE_CONFIGS] quality_control = modules/quality_control/configs/default.cfg diff --git a/modules/quality_control/src/quality_control.py b/modules/quality_control/src/quality_control.py index a0de73ce7..05b19be7f 100644 --- a/modules/quality_control/src/quality_control.py +++ b/modules/quality_control/src/quality_control.py @@ -168,7 +168,7 @@ def __init__(self): self.descriptions[name1] = 'Jarque-Bera test of pixel values for RED AMP-1 detector.' self.data_types[name1] = 'float' self.fits_keywords[name1] = 'JBTRED1' - self.fits_comments[name1] = 'J-B test for RED AMP-1 detector' + self.fits_comments[name1] = 'QC: J-B test for RED AMP-1 detector' self.db_columns[name1] = None self.methods[name1] = ["add_qc_keyword_to_header"] @@ -177,7 +177,7 @@ def __init__(self): self.descriptions[name2] = 'Check if wavelength solution is monotonic.' self.data_types[name2] = 'int' self.fits_keywords[name2] = 'MONOTWLS' - self.fits_comments[name2] = 'Monotonic wavelength-solution check' + self.fits_comments[name2] = 'QC: Monotonic wavelength-solution check' self.db_columns[name2] = None self.methods[name2] = ["add_qc_keyword_to_header","monotonic_check","add_qc_keyword_to_header_for_monotonic_wls"] @@ -186,7 +186,7 @@ def __init__(self): self.descriptions[name3] = 'Check if data in file are not junk.' self.data_types[name3] = 'int' self.fits_keywords[name3] = 'JUNKDATA' - self.fits_comments[name3] = 'Not-junk check' + self.fits_comments[name3] = 'QC: Not-junk check' self.db_columns[name3] = None self.methods[name3] = ["add_qc_keyword_to_header"] @@ -261,6 +261,8 @@ def add_qc_keyword_to_header(self,qc_name,value): self.fits_object.header['PRIMARY'][keyword] = (value,comment) + print('---->add_qc_keyword_to_header: qc_name,keyword,value,comment = {},{},{},{}'.format(qc_name,keyword,value,comment)) + ##################################################################### @@ -352,7 +354,7 @@ class QCL1(QC): fits_object = from_fits('KPF',l1_file) qcl1 = qc.QCL1(fits_object) - qcl1.add_qc_keyword_to_header_for_monotonic_wls() + qcl1.add_qc_keyword_to_header_for_monotonic_wls(qc_name) to_fits(qcl1.fits_object,l1_file) """ @@ -364,25 +366,22 @@ def __init__(self,fits_object): def add_qc_keyword_to_header_for_monotonic_wls(self,qc_name): - keyword = self.qcdefinitions.fits_keywords[qc_name] - comment = self.qcdefinitions.fits_comments[qc_name] - qc_pass = self.monotonic_check(self.fits_filename) + qc_pass = self.monotonic_check() if qc_pass: value = 1 else: value = 0 - self.fits_object.header['PRIMARY'][keyword] = (value,comment) + self.add_qc_keyword_to_header(qc_name,value) - def monotonic_check(self,L1,debug=False): + def monotonic_check(self,debug=False): """ This Quality Control function checks to see if a wavelength solution is monotonic, specifically if wavelength decreases (or stays constant) with increasing array index. Args: - L1 - an L1 file that the QC check is to be run on debug - an optional flag. If True, nonmonotonic orders/orderlets will be noted with print statements and plots. @@ -391,7 +390,14 @@ def monotonic_check(self,L1,debug=False): bad_orders - an array of strings listing the nonmonotonic orders and orderlets """ - #self.L1 = L1 + L1 = self.fits_object + + if debug: + print(L1.info()) + type_L1 = type(L1) + print("type_L1 = ",type_L1) + print("L1 = ",L1) + QC_pass = True bad_orders = [] @@ -405,13 +411,39 @@ def monotonic_check(self,L1,debug=False): # Iterate over extensions (orderlets) and orders to check for monotonicity in each combination. for ext in extensions: - if ext in L1: # Check if extension exists (e.g., if RED isn't processed) + + if debug: + print("ext = ",ext) + + naxis1 = L1.header[ext]["NAXIS1"] + naxis2 = L1.header[ext]["NAXIS2"] + extname = L1.header[ext]["EXTNAME"] + + if debug: + print("naxis1,naxis2,extname = ",naxis1,naxis2,extname) + + if ext == extname: # Check if extension exists (e.g., if RED isn't processed) + + if debug: + data_shape = np.shape(L1[ext]) + print("data_shape = ", data_shape) + norders = L1[ext].shape[0] for o in range(norders): - WLS = L1[ext].data[o,:] # wavelength solution of the current order/orderlet + + if debug: + print("order = ",o) + + np_obj_ffi = np.array(L1[ext]) + + if debug: + print("wls_shape = ", np.shape(np_obj_ffi)) + + WLS = np_obj_ffi[o,:] # wavelength solution of the current order/orderlet + isMonotonic = np.all(WLS[:-1] >= WLS[1:]) # this expression determines monotonicity for the orderlet/order if not isMonotonic: - QC_pass = False # the QC test fails if one order/orderlet is not monotonic + QC_pass = False # the QC test fails if one order/orderlet is not monotonic bad_orders.append(ext + '(' + str(o)+')') # append the bad order/orderlet to the list if debug: print('L1[' + ext + ']['+ str(o) +']: monotonic = ' + str(isMonotonic)) @@ -423,7 +455,8 @@ def monotonic_check(self,L1,debug=False): print("File: " + L1['PRIMARY'].header['OFNAME']) except: pass - print("Monotonic = " + str(QC_pass)) + + print("QC_pass = ",QC_pass) return QC_pass, bad_orders diff --git a/modules/quality_control/src/quality_control_framework.py b/modules/quality_control/src/quality_control_framework.py index 80af91de4..15622e896 100644 --- a/modules/quality_control/src/quality_control_framework.py +++ b/modules/quality_control/src/quality_control_framework.py @@ -54,7 +54,7 @@ def __init__(self, action, context): self.logger.info('Started {}'.format(self.__class__.__name__)) self.logger.debug('module_config_path = {}'.format(self.module_config_path)) - + self.logger.info('self.data_type = {}'.format(self.data_type)) self.logger.info('self.fits_filename = {}'.format(self.fits_filename)) @@ -70,8 +70,8 @@ def __init__(self, action, context): self.logger.info('self.debug_level_cfg = {}'.format(self.debug_level_cfg)) - self.logger.info('Type of self.debug_level_cfg = {}'.format(type(self.debug_level_cfg))) - + self.logger.info('Type of self.debug_level_cfg = {}'.format(type(self.debug_level_cfg))) + def _perform(self): @@ -82,7 +82,6 @@ def _perform(self): quality_control_exit_code = 0 - # Perform quality control. self.logger.info('Performing quality control on {}'.format(self.fits_filename)) @@ -98,10 +97,17 @@ def _perform(self): value = 3.14159256 qc_obj.add_qc_keyword_to_header(name,value) elif 'L1' in self.data_level_str: + + self.logger.info('self.data_level_str = {}'.format(self.data_level_str)) + qc_obj = qc.QCL1(self.fits_object) - name = 'jarque_bera_test_red_amp1' - value = 3.14159256 - qc_obj.add_qc_keyword_to_header(name,value) + + name = 'monotonic_wavelength_solution_check' + try: + qc_obj.add_qc_keyword_to_header_for_monotonic_wls(name) + except: + pass + elif 'L2' in self.data_level_str: qc_obj = qc.QCL2(self.fits_object) name = 'jarque_bera_test_red_amp1' @@ -114,7 +120,7 @@ def _perform(self): qc_obj.qcdefinitions.list_qc_metrics() # Finish. - + self.logger.info('Finished {}'.format(self.__class__.__name__)) return Arguments([quality_control_exit_code, self.fits_object]) From 9fc57022c49c9a33e2805f17bf8b0e4151dc1c03 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Thu, 5 Oct 2023 10:21:04 -0700 Subject: [PATCH 66/86] undo changes to config --- configs/framework_multi.cfg | 5 ++++- kpfpipe/cli.py | 12 ++++++++++-- scripts/launch_QLP.sh | 2 +- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/configs/framework_multi.cfg b/configs/framework_multi.cfg index 718f64343..59803668f 100644 --- a/configs/framework_multi.cfg +++ b/configs/framework_multi.cfg @@ -23,6 +23,8 @@ monitor_interval = 1 # sec # In second. # event_timeout = 3600 +# event_timeout = 5 + # # Default event to trigger on new files @@ -76,7 +78,8 @@ temp_directory = "temp" # # no_event_event = None # -no_event_event = Event ('no_event', None) +no_event_event = Event('no_event', None) +# no_event_event = Event('exit', None) # # How long to wait before sending no_event_event diff --git a/kpfpipe/cli.py b/kpfpipe/cli.py index a67151aa2..63ea00409 100755 --- a/kpfpipe/cli.py +++ b/kpfpipe/cli.py @@ -12,6 +12,7 @@ import time import threading import logging +import tempfile from multiprocessing import Process, cpu_count from watchdog.observers import Observer @@ -84,7 +85,7 @@ def worker(worker_num, pipeline_config, framework_logcfg_file, framework_config_ # tells this instance to wait for something to happen, forever # qm_only=False, ingest_data_only=False, framework.pipeline.start(pipeline_config) - framework.start(wait_for_event=True, continuous=True) + framework.start(wait_for_event=False, continuous=False) class FileAlarm(PatternMatchingEventHandler): @@ -172,7 +173,13 @@ def main(): if args.reprocess: print(f"Setting queue manager to shutdown after reprocessing.") frame_config.set('DEFAULT', 'event_timeout', '5') - frame_config.set('DEFAULT', 'no_event_event', 'None') + frame_config.set('DEFAULT', 'no_event_event', 'exit') + frame_config.set('DEFAULT', 'no_event_wait_time', '60') + + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='cfg') as tp: + frame_config.write(tp) + + frame_config = tp.name # Using the multiprocessing library, create the specified number of instances if args.watch and args.ncpus > 1: @@ -183,6 +190,7 @@ def main(): else: frame_config = ConfigClass(framework_config) + # Try to initialize the framework try: framework = Framework(pipe, frame_config) diff --git a/scripts/launch_QLP.sh b/scripts/launch_QLP.sh index cb017fa51..d00c3acf5 100755 --- a/scripts/launch_QLP.sh +++ b/scripts/launch_QLP.sh @@ -2,7 +2,7 @@ # This script is designed to launch the 5 QLP instances with a single command -ncpus=1 +ncpus=4 data_levels=("L0" "2D" "L1" "L2" "masters") for lvl in "${data_levels[@]}"; do From c6379d68031576b4ced2340f9078eac2577f1562 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Thu, 5 Oct 2023 10:21:39 -0700 Subject: [PATCH 67/86] undo changes to config --- kpfpipe/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kpfpipe/cli.py b/kpfpipe/cli.py index 63ea00409..f9f9c22bd 100755 --- a/kpfpipe/cli.py +++ b/kpfpipe/cli.py @@ -85,7 +85,7 @@ def worker(worker_num, pipeline_config, framework_logcfg_file, framework_config_ # tells this instance to wait for something to happen, forever # qm_only=False, ingest_data_only=False, framework.pipeline.start(pipeline_config) - framework.start(wait_for_event=False, continuous=False) + framework.start(wait_for_event=True, continuous=True) class FileAlarm(PatternMatchingEventHandler): From 562fe6b479f273512601c7f23a648a61062a8ad6 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Thu, 5 Oct 2023 10:43:35 -0700 Subject: [PATCH 68/86] add garbage collection --- kpfpipe/pipelines/kpfpipeline.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kpfpipe/pipelines/kpfpipeline.py b/kpfpipe/pipelines/kpfpipeline.py index e50ae8c73..0494c8059 100644 --- a/kpfpipe/pipelines/kpfpipeline.py +++ b/kpfpipe/pipelines/kpfpipeline.py @@ -2,6 +2,7 @@ # algorithm module. from asyncio.log import logger import os +import gc import sys from copy import copy import importlib @@ -264,5 +265,7 @@ def next_file(self, action: Action, context: ProcessingContext): file_path)) self.start_recipe(action, context) + + gc.collect() return Arguments(name="next_file") From 716d23d0cde2185917e2b0bdc9d34b1c2b56ff4e Mon Sep 17 00:00:00 2001 From: Russ Date: Sun, 8 Oct 2023 08:39:25 -0700 Subject: [PATCH 69/86] Fixed typo in import statement. Added call to QualityControlFramework. --- recipes/kpf_drp.recipe | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index a78616eab..35d01e6dc 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -5,7 +5,7 @@ from modules.spectral_extraction.src.spectral_extraction import SpectralExtracti from modules.radial_velocity.src.radial_velocity_init import RadialVelocityInit from modules.radial_velocity.src.radial_velocity import RadialVelocity from modules.radial_velocity.src.radial_velocity_reweighting_ref import RadialVelocityReweightingRef -from modules.radial_velocoty.src.radial_velocity_reweighting import RadialVelocityReweighting +from modules.radial_velocity.src.radial_velocity_reweighting import RadialVelocityReweighting from modules.ca_hk.src.ca_hk_extraction import CaHKExtraction from modules.Utils.string_proc import str_replace from modules.Utils.string_proc import date_from_kpffile @@ -24,6 +24,7 @@ from database.modules.query_db_nearest_master_files.src.query_db_nearest_master_ from database.modules.query_db_one_nearest_master_file.src.query_db_one_nearest_master_file_framework import QueryDBOneNearestMasterFileFramework from modules.quality_control_exposure.src.quality_control_exposure_framework import QualityControlExposureFramework from modules.query_db_l0_file.src.query_db_l0_file_framework import QueryDBL0FileFramework +from modules.quality_control.src.quality_control_framework import QualityControlFramework # set the flags for each process # note: no rv reweighting is made here. @@ -546,6 +547,33 @@ if do_spectral_extraction or do_hk or do_bc: start_bary_index=s_bary_idx[idx], wls_ext=wave_to_ext[idx][0]) result = to_fits(output_data, output_lev1_file) + + + + + + # do quality_control + if True: + quality_control_return_list = QualityControlFramework('KPF', + 'L1', + output_lev1_file, + output_data, + 1) + + exit_code = quality_control_return_list[0] + output_data = quality_control_return_list[1] + + if output_data != None: + result = to_fits(output_data, output_lev1_file) + + + + + + + + + # do ca_hk extraction if do_hk: # add ca_hk extraction result to existing L1 file or create a new L1 file if L1 file doesn't exist From bddb4d4112c8748b09580ff1fb90b8100f84ad48 Mon Sep 17 00:00:00 2001 From: Russ Date: Sun, 8 Oct 2023 08:39:32 -0700 Subject: [PATCH 70/86] Upgrade to robustly select appropriate FITS file for prototype product FITS header. Modified data units of stacked-data output from electrons/second to electrons. --- .../src/master_arclamp_framework.py | 48 +++++++++++++++---- 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/modules/master_arclamp/src/master_arclamp_framework.py b/modules/master_arclamp/src/master_arclamp_framework.py index 9a1a8fe44..cf2cdb0f2 100644 --- a/modules/master_arclamp/src/master_arclamp_framework.py +++ b/modules/master_arclamp/src/master_arclamp_framework.py @@ -26,7 +26,7 @@ class MasterArclampFramework(KPF0_Primitive): by stacking input images for exposures with IMTYPE.lower() == 'arclamp' and specified input arclamp_object. The input FITS files are selected from the given path that can include many kinds of FITS files, not just arclamp files. - Subtract master bias from each input flat 2D raw image. Separately normalize + Subtract master bias from each input flat 2D raw image. Separately normalize debiased images by EXPTIME, and then subtract master dark. Optionally divide by the master flat. Stack all normalized debiased, flattened images. Set appropriate infobit if number of pixels with less than 5 samples @@ -173,7 +173,41 @@ def _perform(self): arclamp_object_list.append(header_object) #self.logger.debug('arclamp_file_path,exp_time,header_object = {},{},{}'.format(arclamp_file_path,exp_time,header_object)) - tester = KPF0.from_fits(all_arclamp_files[0]) + + # Ensure prototype FITS header for product file has matching OBJECT and contains both + # GRNAMPS and REDAMPS keywords (indicating that the data exist). + + for arclamp_file_path in (all_arclamp_files): + + tester = KPF0.from_fits(arclamp_file_path) + tester_object = tester.header['PRIMARY']['OBJECT'] + + if tester_object == self.arclamp_object: + + try: + tester_grnamps = tester.header['PRIMARY']['GRNAMPS'] + except KeyError as err: + continue + + try: + tester_redamps = tester.header['PRIMARY']['REDAMPS'] + except KeyError as err: + continue + + self.logger.info('Prototype FITS header from {}'.format(arclamp_file_path)) + + break + + else: + + tester = None + + if tester is None: + master_arclamp_exit_code = 6 + exit_list = [master_arclamp_exit_code,master_arclamp_infobits] + return Arguments(exit_list) + + del_ext_list = [] for i in tester.extensions.keys(): if i != 'GREEN_CCD' and i != 'RED_CCD' and i != 'PRIMARY' and i != 'RECEIPT' and i != 'CONFIG': @@ -257,11 +291,9 @@ def _perform(self): single_frame_data = frames_data[i] exp_time = frames_data_exptimes[i] - #self.logger.debug('Normalizing arclamp image: i,fitsfile,ffi,exp_time = {},{},{},{}'.format(i,frames_data_path[i],ffi,exp_time)) - - single_normalized_frame_data = single_frame_data / exp_time # Separately normalize by EXPTIME. + # Subtract off exposure time times master-dark-current rate. - single_normalized_frame_data -= np.array(master_dark_data[ffi]) # Subtract master-dark-current rate. + single_normalized_frame_data = single_frame_data - exp_time * np.array(master_dark_data[ffi]) if self.skip_flattening == 0: #self.logger.debug('Flattening arclamp image: i,fitsfile,ffi,exp_time = {},{},{},{}'.format(i,frames_data_path[i],ffi,exp_time)) @@ -317,7 +349,7 @@ def _perform(self): for ffi in self.lev0_ffi_exts: if ffi in del_ext_list: continue - master_holder.header[ffi]['BUNIT'] = ('DN/sec','Units of master arclamp') + master_holder.header[ffi]['BUNIT'] = ('electrons','Units of master arclamp') master_holder.header[ffi]['NFRAMES'] = (n_frames_kept[ffi],'Number of frames in input stack') master_holder.header[ffi]['SKIPFLAT'] = (self.skip_flattening,'Flag to skip flat-field calibration') master_holder.header[ffi]['NSIGMA'] = (self.n_sigma,'Number of sigmas for data-clipping') @@ -337,7 +369,7 @@ def _perform(self): master_holder.header[ffi]['BIT01'] = ('2**1 = 2', 'RED_CCD has gt 1% pixels with lt 5 samples') ffi_unc_ext_name = ffi + '_UNC' - master_holder.header[ffi_unc_ext_name]['BUNIT'] = ('DN/sec','Units of master-arclamp uncertainty') + master_holder.header[ffi_unc_ext_name]['BUNIT'] = ('electrons','Units of master-arclamp uncertainty') ffi_cnt_ext_name = ffi + '_CNT' master_holder.header[ffi_cnt_ext_name]['BUNIT'] = ('Count','Number of stack samples') From dae57a476e1ed42db81642471c43cf8d3611dec2 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 8 Oct 2023 23:44:08 -0700 Subject: [PATCH 71/86] Added SKY plot and zoom-in on H&K regions --- configs/quicklook_match.cfg | 2 +- modules/Utils/analyze_hk.py | 164 ++++++++++++++++++++++++++++++----- modules/quicklook/src/alg.py | 19 ++-- 3 files changed, 159 insertions(+), 26 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index ec0a637b9..2f815ee80 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,7 +7,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/2D/202?????/KP.20230914.31916.48*.fits' +fullpath = '/data/2D/202?????/KP.20230925.55748.52*.fits' #fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] diff --git a/modules/Utils/analyze_hk.py b/modules/Utils/analyze_hk.py index c5aa59a40..fb33ce730 100644 --- a/modules/Utils/analyze_hk.py +++ b/modules/Utils/analyze_hk.py @@ -3,6 +3,7 @@ import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as patches +from matplotlib.ticker import ScalarFormatter from modules.Utils.kpf_parse import HeaderParse class AnalyzeHK: @@ -80,7 +81,7 @@ def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): """ fig, ax = plt.subplots(figsize = (12,5),tight_layout=True) im = ax.imshow(self.image, - vmin = np.nanpercentile(self.image,0.1), + vmin = np.nanpercentile(self.image,0.01), vmax = np.nanpercentile(self.image,99.9), interpolation = 'None', origin = 'lower', @@ -139,18 +140,32 @@ def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): plt.show() plt.close('all') - def plot_HK_spectrum_1D(self, fig_path=None, show_plot=False): + def plot_HK_spectrum_1D(self, trace='sci', fig_path=None, show_plot=False): + + """ + Generate a 1D spectrum plot of the Ca H& K spectrum. + + Args: + trace (string) - 'sci' or 'sky' to select the fiber + fig_path (string) - set to the path for the file to be generated. + show_plot (boolean) - show the plot in the current environment. + + Returns: + PNG plot in fig_path or shows the plot it in the current environment + (e.g., in a Jupyter Notebook). + + """ orders = np.array(self.wave_lib.columns) padding = 1 - plt.figure(figsize=(12,5),tight_layout=True) + #plt.figure(figsize=(12,5),tight_layout=True) fig, ax = plt.subplots(1, 1, figsize=(12,5)) color_grid = ['purple','darkblue','darkgreen','gold','darkorange','darkred'] chk_bandpass = [383.0, 401.0]#[384.0, 401.7] - caK = [393.2, 393.5] - caH = [396.7, 397.0] + caK = [393.3663-0.150, 393.3663+0.150] + caH = [396.8469-0.150, 396.8469+0.150] Vcont = [389.9, 391.9] Rcont = [397.4, 399.4] @@ -159,35 +174,144 @@ def plot_HK_spectrum_1D(self, fig_path=None, show_plot=False): for i in range(len(orders)): wav = self.wave_lib[i] # This should be correct, but the sky and sci orders are flipped in the definition file - #flux = np.sum(self.image[self.trace_location_sci[i]['x1']:self.trace_location_sci[i]['x2'],:],axis=0) - # so for now use the sky orders - #flux = np.sum(self.image[self.trace_location_sky[i]['x1']:self.trace_location_sky[i]['x2'],:],axis=0) - specHK[i,:] = np.sum(self.image[self.trace_location_sky[i]['x1']:self.trace_location_sky[i]['x2'],:],axis=0) + if trace == 'sci': + specHK[i,:] = np.sum(self.image[self.trace_location_sky[i]['x1']:self.trace_location_sky[i]['x2'],:],axis=0) + elif trace == 'sky': + specHK[i,:] = np.sum(self.image[self.trace_location_sci[i]['x1']:self.trace_location_sci[i]['x2'],:],axis=0) ax.plot(wav[padding:-padding], specHK[i,padding:-padding], color = color_grid[i], linewidth = 1.0) + ymin = np.nanpercentile(specHK[:,padding:-padding],0.1) ymax = 1.15*np.nanpercentile(specHK[:,padding:-padding],99.9) - ax.set_ylim(0, ymax) + yrange = (ymax-ymin) + ax.set_ylim(ymin, ymax) ax.set_xlim(chk_bandpass[0], chk_bandpass[1]) # Add labels - plt.title('Ca H&K Spectrum: ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) + plt.title('Ca H&K ' + trace.upper() + ' Spectrum: ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) ax.set_xlabel('Wavelength (nm)',fontsize=18) ax.set_ylabel('Flux (ADU)',fontsize=18) ax.xaxis.set_tick_params(labelsize=14) ax.yaxis.set_tick_params(labelsize=14) - ax.fill_between(chk_bandpass, y1=0, y2=ymax, facecolor='gray', alpha=0.3, zorder=-100) - ax.fill_between(caH, y1=0, y2=ymax, facecolor='m', alpha=0.3) - ax.fill_between(caK, y1=0, y2=ymax, facecolor='m', alpha=0.3) - ax.fill_between(Vcont, y1=0, y2=ymax, facecolor='c', alpha=0.3) - ax.fill_between(Rcont, y1=0, y2=ymax, facecolor='c', alpha=0.3) + ax.fill_between(chk_bandpass, y1=ymin, y2=ymin+yrange, facecolor='gray', alpha=0.3, zorder=-100) + ax.fill_between(caH, y1=ymin, y2=ymin+yrange, facecolor='m', alpha=0.3) + ax.fill_between(caK, y1=ymin, y2=ymin+yrange, facecolor='m', alpha=0.3) + ax.fill_between(Vcont, y1=ymin, y2=ymin+yrange, facecolor='c', alpha=0.3) + ax.fill_between(Rcont, y1=ymin, y2=ymin+yrange, facecolor='c', alpha=0.3) ax.text(np.mean(Vcont)-0.89, 0.95*ymax, 'V continuum') ax.text(np.mean(Rcont)-0.89, 0.95*ymax, 'R continuum') - ax.text(np.mean(caK)-0.40, 0.95*ymax, 'K') - ax.text(np.mean(caH)-0.40, 0.95*ymax, 'H') - ax.plot([396.847,396.847],[0,ymax],':',color ='black') - ax.plot([393.366,393.366],[0,ymax],':',color ='black') + ax.text(np.mean(caK) -0.40, 0.95*ymax, 'K') + ax.text(np.mean(caH) -0.40, 0.95*ymax, 'H') + ax.plot([396.847,396.847],[ymin,ymax],':',color ='black') + ax.plot([393.366,393.366],[ymin,ymax],':',color ='black') + ax.plot([chk_bandpass[0], chk_bandpass[1]],[0,0],':',color ='white') + + # Display the plot + if fig_path != None: + t0 = time.process_time() + plt.savefig(fig_path, dpi=200, facecolor='w') + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if show_plot == True: + plt.show() + plt.close('all') + + def plot_HK_spectrum_1D_zoom(self, trace='sci', fig_path=None, show_plot=False): + + """ + Generate a 1D spectrum plot zoomed in on the C&K lines + + Args: + trace (string) - 'sci' or 'sky' to select the fiber + fig_path (string) - set to the path for the file to be generated. + show_plot (boolean) - show the plot in the current environment. + + Returns: + PNG plot in fig_path or shows the plot it in the current environment + (e.g., in a Jupyter Notebook). + + """ + + orders = np.array(self.wave_lib.columns) + padding = 1 + + fig, (axk, axh) = plt.subplots(1,2, figsize=(12,5)) + + color_grid = ['purple','darkblue','darkgreen','gold','darkorange','darkred'] + chk_bandpass = [383.0, 401.0]#[384.0, 401.7] + caK = [393.3663-0.150, 393.3663+0.150] + caH = [396.8469-0.150, 396.8469+0.150] + Vcont = [389.9, 391.9] + Rcont = [397.4, 399.4] + + # Compute and Plot spectra + specHK = np.zeros((len(orders), self.image.shape[1]), dtype=np.float64) + for i in range(len(orders)): + # This should be correct, but the sky and sci orders are flipped in the definition file + if trace == 'sci': + specHK[i,:] = np.sum(self.image[self.trace_location_sky[i]['x1']:self.trace_location_sky[i]['x2'],:],axis=0) + elif trace == 'sky': + specHK[i,:] = np.sum(self.image[self.trace_location_sci[i]['x1']:self.trace_location_sci[i]['x2'],:],axis=0) + + axh.set_xlim(np.mean(caH)-1.25, np.mean(caH)+1.25) + for o in [4,5]: + wav = self.wave_lib[o] + axh.step(wav[padding:-padding], + specHK[o,padding:-padding], + color = color_grid[o], + linewidth = 2.0) + axk.set_xlim(np.mean(caK)-1.25, np.mean(caK)+1.25) + for o in [3]: + wav = self.wave_lib[o] + axk.step(wav[padding:-padding], + specHK[o,padding:-padding], + color = color_grid[o], + linewidth = 2.0) + + # Find y ranges + indh = (self.wave_lib[5] > (np.mean(caH)-1.25)).values * (self.wave_lib[5] < (np.mean(caH)+1.25)).values + indk = (self.wave_lib[3] > (np.mean(caK)-1.25)).values * (self.wave_lib[3] < (np.mean(caK)+1.25)).values + yminh = min([0,np.nanpercentile(specHK[5,indh],0.1)]) + ymaxh = 1.15*np.nanpercentile(specHK[5,indh],99.9) + yrangeh = (ymaxh-yminh) + ymink = min([0,np.nanpercentile(specHK[3,indk],0.1)]) + ymaxk = 1.15*np.nanpercentile(specHK[3,indk],99.9) + yrangek = (ymaxk-ymink) + axh.set_ylim(yminh, ymaxh) + axk.set_ylim(ymink, ymaxk) + + # Add labels + axk.set_xlabel('Wavelength (nm)',fontsize=18) + axk.set_ylabel('Flux (ADU)',fontsize=18) + axk.xaxis.set_tick_params(labelsize=14) + axk.yaxis.set_tick_params(labelsize=14) + axh.set_xlabel('Wavelength (nm)',fontsize=18) + axh.set_ylabel('Flux (ADU)',fontsize=18) + axh.xaxis.set_tick_params(labelsize=14) + axh.yaxis.set_tick_params(labelsize=14) + axk.fill_between(chk_bandpass, y1=ymink, y2=ymink+yrangek, facecolor='gray', alpha=0.3, zorder=-100) + axh.fill_between(chk_bandpass, y1=yminh, y2=yminh+yrangeh, facecolor='gray', alpha=0.3, zorder=-100) + axh.fill_between(caH, y1=yminh, y2=yminh+yrangeh, facecolor='m', alpha=0.3) + axk.fill_between(caK, y1=ymink, y2=ymink+yrangek, facecolor='m', alpha=0.3) + axk.text(np.mean(caK) -0.10, ymink+0.92*yrangek, 'K', fontsize=14) + axh.text(np.mean(caH) -0.10, yminh+0.92*yrangeh, 'H', fontsize=14) + axh.plot([396.847,396.847],[yminh,ymaxh],':',color ='black') + axk.plot([393.366,393.366],[ymink,ymaxk],':',color ='black') + axk.plot([chk_bandpass[0], chk_bandpass[1]],[0,0],':',color ='white') + axh.plot([chk_bandpass[0], chk_bandpass[1]],[0,0],':',color ='white') + + # Set y-axis to display in scientific notation + formatter = ScalarFormatter(useMathText=True) + formatter.set_scientific(True) + formatter.set_powerlimits((-1,1)) + axh.yaxis.set_major_formatter(formatter) + axk.yaxis.set_major_formatter(formatter) + + # Add overall title to array of plots + ax = fig.add_subplot(111, frame_on=False) + ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) + ax.set_title('Ca H&K ' + trace.upper() + ' Spectrum: ' + str(self.ObsID) + ' - ' + self.name + '\n', fontsize=18) + ax.grid(False) # Display the plot if fig_path != None: diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index fa9a1adbc..3d6723a56 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -252,12 +252,21 @@ def qlp_2D(self, kpf2d, output_dir): filename = savedir + self.ObsID + '_HK_image_2D_zoomable.png' self.logger.info('Generating QLP image ' + filename) myHK.plot_HK_image_2D(fig_path=filename, kpftype='2D', show_plot=False) - - # Exposure Meter spectrum plot - filename = savedir + self.ObsID + '_HK_spectrum_zoomable.png' + + # Exposure Meter spectra plots + filename = savedir + self.ObsID + '_HK_spectrum_sci_zoomable.png' self.logger.info('Generating QLP image ' + filename) - myHK.plot_HK_spectrum_1D(fig_path=filename, show_plot=False) - + myHK.plot_HK_spectrum_1D(trace='sci', fig_path=filename, show_plot=False) + filename = savedir + self.ObsID + '_HK_spectrum_sky_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myHK.plot_HK_spectrum_1D(trace='sky', fig_path=filename, show_plot=False) + filename = savedir + self.ObsID + '_HK_spectrum_sci_zoom_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myHK.plot_HK_spectrum_1D_zoom(trace='sci', fig_path=filename, show_plot=False) + filename = savedir + self.ObsID + '_HK_spectrum_sky_zoom_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myHK.plot_HK_spectrum_1D_zoom(trace='sky', fig_path=filename, show_plot=False) + except Exception as e: self.logger.error(f"Failure in CaHK quicklook pipeline: {e}\n{traceback.format_exc()}") From 087f9cb0ca962a5a4cf08184fb14b71c5bb03e10 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 8 Oct 2023 23:44:25 -0700 Subject: [PATCH 72/86] typo fix --- modules/Utils/analyze_l2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/Utils/analyze_l2.py b/modules/Utils/analyze_l2.py index 72597a425..901c5657b 100644 --- a/modules/Utils/analyze_l2.py +++ b/modules/Utils/analyze_l2.py @@ -194,7 +194,7 @@ def plot_CCF_grid(self, chip=None, annotate=False, # Add overall title to array of plots ax = fig.add_subplot(111, frame_on=False) ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) - ax.set_title('2D - ' + chip_title + ' CCD: ' + str(self.ObsID) + ' - ' + self.name + '\n', fontsize=30) + ax.set_title('L2 - ' + chip_title + ' CCD: ' + str(self.ObsID) + ' - ' + self.name + '\n', fontsize=30) # Display the plot if fig_path != None: From 1999c231fdca7bc28a2a2fbaf91144673451b082 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Mon, 9 Oct 2023 13:28:37 -0700 Subject: [PATCH 73/86] Added column cut plots --- modules/Utils/analyze_hk.py | 77 +++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/modules/Utils/analyze_hk.py b/modules/Utils/analyze_hk.py index fb33ce730..6235decd7 100644 --- a/modules/Utils/analyze_hk.py +++ b/modules/Utils/analyze_hk.py @@ -63,6 +63,7 @@ def __init__(self, L0, trace_file=None, offset=-1, wavesoln_file=None, logger=No if wavesoln_file != None: self.wave_lib = pd.read_csv(wavesoln_file, header=None, sep = ' ', comment = '#') self.wave_lib *= 1 - self.rv_shift/3e5 # Doppler shift wavelength solution + self.color_grid = ['purple','darkblue','darkgreen','gold','darkorange','darkred'] def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): @@ -140,6 +141,82 @@ def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): plt.show() plt.close('all') + def plot_HK_2D_column_cut(self, column=512, fig_path=None, kpftype='L0', show_plot=False): + + """ + Generate a column cut plot of a 2D image of the Ca H& K spectrum. + + Args: + fig_path (string) - set to the path for the file to be generated. + show_plot (boolean) - show the plot in the current environment. + + Returns: + PNG plot in fig_path or shows the plot it in the current environment + (e.g., in a Jupyter Notebook). + + """ + lw = 1 # linewidth + fig, axs = plt.subplots(2, 1, figsize=(12, 8), sharex=True, tight_layout=True) + axs[0].step(np.arange(255), self.image[:,column], color='k', linewidth=lw) + axs[1].step(np.arange(255), self.image[:,column], color='k', linewidth=lw) + + ymin = np.percentile(self.image[:,column],1) + ymax = np.percentile(self.image[:,column],99) + yrange = ymax-ymin + + # to determine ymin/ymax, create subarray with missing pixels for SCI orders + remove_indices = [] + remove_indices.extend(range(0,25)) # hack because there's an unextracted order not in the trace locations + remove_indices.extend(range(230,255)) # hack because there's an unextracted order not in the trace locations + for key, value in self.trace_location_sky.items(): + remove_indices.extend(range(value['x1']-2, value['x2']+2)) + new_cut = np.delete(self.image[:,column], remove_indices) + ymin_off_order = np.percentile(new_cut,1) + ymax_off_order = np.percentile(new_cut,99) + yrange_off_order = ymax_off_order-ymin_off_order + + # Add color highlights + for o in range(-1,5): # iterate through the order definitions -- -1 to +5 + axs[0].step(range(self.trace_location_sky[o]['x1'],self.trace_location_sky[o]['x2']), + self.image[self.trace_location_sky[o]['x1']:self.trace_location_sky[o]['x2'],column], + color= self.color_grid[o+1], linewidth=lw) + axs[0].step(range(self.trace_location_sci[o]['x1'],self.trace_location_sci[o]['x2']), + self.image[self.trace_location_sci[o]['x1']:self.trace_location_sci[o]['x2'],column], + color= self.color_grid[o+1], linewidth=lw) + axs[1].step(range(self.trace_location_sky[o]['x1'],self.trace_location_sky[o]['x2']), + self.image[self.trace_location_sky[o]['x1']:self.trace_location_sky[o]['x2'],column], + color= self.color_grid[o+1], linewidth=lw) + axs[1].step(range(self.trace_location_sci[o]['x1'],self.trace_location_sci[o]['x2']), + self.image[self.trace_location_sci[o]['x1']:self.trace_location_sci[o]['x2'],column], + color= self.color_grid[o+1], linewidth=lw) + + # Set axis parameters + axs[0].set_title('Ca H&K - Cut at Column #' + str(column) + ': ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) + axs[1].set_xlabel('Row Number',fontsize=18) + axs[0].set_ylabel('Flux (ADU)',fontsize=18) + axs[1].set_ylabel('Flux (ADU)',fontsize=18) + axs[0].plot(0,255,[0,0],':',color ='white') + axs[0].xaxis.set_tick_params(labelsize=14) + axs[0].yaxis.set_tick_params(labelsize=14) + axs[1].xaxis.set_tick_params(labelsize=14) + axs[1].yaxis.set_tick_params(labelsize=14) + axs[0].fill_between([0,255], y1=-1000000, y2=1000000, facecolor='lightgray', alpha=0.3, zorder=-100) + axs[1].fill_between([0,255], y1=-1000000, y2=1000000, facecolor='lightgray', alpha=0.3, zorder=-100) + axs[0].set_xlim(0,255) + axs[1].set_xlim(0,255) + axs[0].set_ylim(min([0,ymin-0.05*yrange]), ymax+0.05*yrange) + axs[1].set_ylim(min([0,ymin_off_order-0.05*yrange_off_order]), ymax_off_order+0.05*yrange_off_order) + + # Display the plot + if fig_path != None: + t0 = time.process_time() + plt.savefig(fig_path, dpi=300, facecolor='w') + self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') + if show_plot == True: + plt.show() + plt.close('all') + + def plot_HK_spectrum_1D(self, trace='sci', fig_path=None, show_plot=False): """ From 1771fb8afd9fe59cac1e18fe6acf36107d3d386d Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Mon, 9 Oct 2023 17:32:50 -0700 Subject: [PATCH 74/86] polished column cut plot and added it to QLP --- configs/quicklook_match.cfg | 2 +- modules/Utils/analyze_hk.py | 117 +++++++++++++++++++---------------- modules/quicklook/src/alg.py | 3 + 3 files changed, 67 insertions(+), 55 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index 2f815ee80..d94223087 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,7 +7,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/2D/202?????/KP.20230925.55748.52*.fits' +fullpath = '/data/2D/202?????/KP.20230923.21775.55*.fits' #fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] diff --git a/modules/Utils/analyze_hk.py b/modules/Utils/analyze_hk.py index 6235decd7..4e8b360f2 100644 --- a/modules/Utils/analyze_hk.py +++ b/modules/Utils/analyze_hk.py @@ -141,7 +141,8 @@ def plot_HK_image_2D(self, fig_path=None, kpftype='L0', show_plot=False): plt.show() plt.close('all') - def plot_HK_2D_column_cut(self, column=512, fig_path=None, kpftype='L0', show_plot=False): + + def plot_HK_2D_column_cut(self, fig_path=None, kpftype='L0', show_plot=False): """ Generate a column cut plot of a 2D image of the Ca H& K spectrum. @@ -156,61 +157,69 @@ def plot_HK_2D_column_cut(self, column=512, fig_path=None, kpftype='L0', show_pl """ lw = 1 # linewidth - fig, axs = plt.subplots(2, 1, figsize=(12, 8), sharex=True, tight_layout=True) - axs[0].step(np.arange(255), self.image[:,column], color='k', linewidth=lw) - axs[1].step(np.arange(255), self.image[:,column], color='k', linewidth=lw) + fig, axs = plt.subplots(4,1, figsize=(8, 12), tight_layout=True) + + cols = [512, 900] - ymin = np.percentile(self.image[:,column],1) - ymax = np.percentile(self.image[:,column],99) - yrange = ymax-ymin - - # to determine ymin/ymax, create subarray with missing pixels for SCI orders - remove_indices = [] - remove_indices.extend(range(0,25)) # hack because there's an unextracted order not in the trace locations - remove_indices.extend(range(230,255)) # hack because there's an unextracted order not in the trace locations - for key, value in self.trace_location_sky.items(): - remove_indices.extend(range(value['x1']-2, value['x2']+2)) - new_cut = np.delete(self.image[:,column], remove_indices) - ymin_off_order = np.percentile(new_cut,1) - ymax_off_order = np.percentile(new_cut,99) - yrange_off_order = ymax_off_order-ymin_off_order - - # Add color highlights - for o in range(-1,5): # iterate through the order definitions -- -1 to +5 - axs[0].step(range(self.trace_location_sky[o]['x1'],self.trace_location_sky[o]['x2']), - self.image[self.trace_location_sky[o]['x1']:self.trace_location_sky[o]['x2'],column], - color= self.color_grid[o+1], linewidth=lw) - axs[0].step(range(self.trace_location_sci[o]['x1'],self.trace_location_sci[o]['x2']), - self.image[self.trace_location_sci[o]['x1']:self.trace_location_sci[o]['x2'],column], - color= self.color_grid[o+1], linewidth=lw) - axs[1].step(range(self.trace_location_sky[o]['x1'],self.trace_location_sky[o]['x2']), - self.image[self.trace_location_sky[o]['x1']:self.trace_location_sky[o]['x2'],column], - color= self.color_grid[o+1], linewidth=lw) - axs[1].step(range(self.trace_location_sci[o]['x1'],self.trace_location_sci[o]['x2']), - self.image[self.trace_location_sci[o]['x1']:self.trace_location_sci[o]['x2'],column], - color= self.color_grid[o+1], linewidth=lw) - - # Set axis parameters - axs[0].set_title('Ca H&K - Cut at Column #' + str(column) + ': ' + str(self.ObsID) + ' - ' + self.name, fontsize=18) - axs[1].set_xlabel('Row Number',fontsize=18) - axs[0].set_ylabel('Flux (ADU)',fontsize=18) - axs[1].set_ylabel('Flux (ADU)',fontsize=18) - axs[0].plot(0,255,[0,0],':',color ='white') - axs[0].xaxis.set_tick_params(labelsize=14) - axs[0].yaxis.set_tick_params(labelsize=14) - axs[1].xaxis.set_tick_params(labelsize=14) - axs[1].yaxis.set_tick_params(labelsize=14) - axs[0].fill_between([0,255], y1=-1000000, y2=1000000, facecolor='lightgray', alpha=0.3, zorder=-100) - axs[1].fill_between([0,255], y1=-1000000, y2=1000000, facecolor='lightgray', alpha=0.3, zorder=-100) - axs[0].set_xlim(0,255) - axs[1].set_xlim(0,255) - axs[0].set_ylim(min([0,ymin-0.05*yrange]), ymax+0.05*yrange) - axs[1].set_ylim(min([0,ymin_off_order-0.05*yrange_off_order]), ymax_off_order+0.05*yrange_off_order) + for i, col in enumerate(cols): + axs[0+2*i].step(np.arange(255), self.image[:,col], color='k', linewidth=lw) + axs[1+2*i].step(np.arange(255), self.image[:,col], color='k', linewidth=lw) + + ymin = np.percentile(self.image[:,col],1) + ymax = np.percentile(self.image[:,col],99) + yrange = ymax-ymin + + # to determine ymin/ymax, create subarray with missing pixels for SCI orders + remove_indices = [] + remove_indices.extend(range(0,25)) # hack because there's an unextracted order not in the trace locations + remove_indices.extend(range(230,255)) # hack because there's an unextracted order not in the trace locations + for key, value in self.trace_location_sky.items(): + remove_indices.extend(range(value['x1']-2, value['x2']+2)) + new_cut = np.delete(self.image[:,col], remove_indices) + ymin_off_order = np.percentile(new_cut,1) + ymax_off_order = np.percentile(new_cut,99) + yrange_off_order = ymax_off_order-ymin_off_order + + # Add color highlights + for o in range(-1,5): # iterate through the order definitions -- -1 to +5 + axs[0+2*i].step(range(self.trace_location_sky[o]['x1'],self.trace_location_sky[o]['x2']), + self.image[self.trace_location_sky[o]['x1']:self.trace_location_sky[o]['x2'],col], + color= self.color_grid[o+1], linewidth=lw) + axs[0+2*i].step(range(self.trace_location_sci[o]['x1'],self.trace_location_sci[o]['x2']), + self.image[self.trace_location_sci[o]['x1']:self.trace_location_sci[o]['x2'],col], + color= self.color_grid[o+1], linewidth=lw) + axs[1+2*i].step(range(self.trace_location_sky[o]['x1'],self.trace_location_sky[o]['x2']), + self.image[self.trace_location_sky[o]['x1']:self.trace_location_sky[o]['x2'],col], + color= self.color_grid[o+1], linewidth=lw) + axs[1+2*i].step(range(self.trace_location_sci[o]['x1'],self.trace_location_sci[o]['x2']), + self.image[self.trace_location_sci[o]['x1']:self.trace_location_sci[o]['x2'],col], + color= self.color_grid[o+1], linewidth=lw) + axs[0+2*i].plot([0,255],[0,0],':',color ='darkgray', linewidth=lw) + axs[1+2*i].plot([0,255],[0,0],':',color ='darkgray', linewidth=lw) + axs[0+2*i].fill_between([0,255], y1=-1000000, y2=1000000, facecolor='lightgray', alpha=0.3, zorder=-100) + axs[1+2*i].fill_between([0,255], y1=-1000000, y2=1000000, facecolor='lightgray', alpha=0.3, zorder=-100) + + # Set axis parameters + axs[0+2*i].set_xlabel('Row Number in Column #' + str(col),fontsize=14) + axs[1+2*i].set_xlabel('Row Number in Column #' + str(col),fontsize=14) + axs[0+2*i].set_ylabel('Flux (ADU)',fontsize=14) + axs[1+2*i].set_ylabel('Flux (ADU) [zoomed in]',fontsize=14) + axs[0+2*i].xaxis.set_tick_params(labelsize=12) + axs[0+2*i].yaxis.set_tick_params(labelsize=12) + axs[1+2*i].xaxis.set_tick_params(labelsize=12) + axs[1+2*i].yaxis.set_tick_params(labelsize=12) + axs[0+2*i].set_xlim(0,255) + axs[1+2*i].set_xlim(0,255) + axs[0+2*i].set_ylim(min([-1,ymin-0.05*yrange]), ymax+0.05*yrange) + axs[1+2*i].set_ylim(min([-1,ymin_off_order-0.05*yrange_off_order]), ymax_off_order+0.05*yrange_off_order) + + # Add overall title + axs[0].set_title('Ca H&K - Column Cuts through 2D Image: ' + str(self.ObsID) + ' - ' + self.name, fontsize=12) # Display the plot if fig_path != None: t0 = time.process_time() - plt.savefig(fig_path, dpi=300, facecolor='w') + plt.savefig(fig_path, dpi=200, facecolor='w') self.logger.info(f'Seconds to execute savefig: {(time.process_time()-t0):.1f}') if show_plot == True: plt.show() @@ -271,7 +280,7 @@ def plot_HK_spectrum_1D(self, trace='sci', fig_path=None, show_plot=False): ax.set_ylabel('Flux (ADU)',fontsize=18) ax.xaxis.set_tick_params(labelsize=14) ax.yaxis.set_tick_params(labelsize=14) - ax.fill_between(chk_bandpass, y1=ymin, y2=ymin+yrange, facecolor='gray', alpha=0.3, zorder=-100) + ax.fill_between(chk_bandpass, y1=ymin, y2=ymin+yrange, facecolor='lightgray', alpha=0.3, zorder=-100) ax.fill_between(caH, y1=ymin, y2=ymin+yrange, facecolor='m', alpha=0.3) ax.fill_between(caK, y1=ymin, y2=ymin+yrange, facecolor='m', alpha=0.3) ax.fill_between(Vcont, y1=ymin, y2=ymin+yrange, facecolor='c', alpha=0.3) @@ -366,8 +375,8 @@ def plot_HK_spectrum_1D_zoom(self, trace='sci', fig_path=None, show_plot=False): axh.set_ylabel('Flux (ADU)',fontsize=18) axh.xaxis.set_tick_params(labelsize=14) axh.yaxis.set_tick_params(labelsize=14) - axk.fill_between(chk_bandpass, y1=ymink, y2=ymink+yrangek, facecolor='gray', alpha=0.3, zorder=-100) - axh.fill_between(chk_bandpass, y1=yminh, y2=yminh+yrangeh, facecolor='gray', alpha=0.3, zorder=-100) + axk.fill_between(chk_bandpass, y1=ymink, y2=ymink+yrangek, facecolor='lightgray', alpha=0.3, zorder=-100) + axh.fill_between(chk_bandpass, y1=yminh, y2=yminh+yrangeh, facecolor='lightgray', alpha=0.3, zorder=-100) axh.fill_between(caH, y1=yminh, y2=yminh+yrangeh, facecolor='m', alpha=0.3) axk.fill_between(caK, y1=ymink, y2=ymink+yrangek, facecolor='m', alpha=0.3) axk.text(np.mean(caK) -0.10, ymink+0.92*yrangek, 'K', fontsize=14) diff --git a/modules/quicklook/src/alg.py b/modules/quicklook/src/alg.py index 3d6723a56..be5390f96 100644 --- a/modules/quicklook/src/alg.py +++ b/modules/quicklook/src/alg.py @@ -266,6 +266,9 @@ def qlp_2D(self, kpf2d, output_dir): filename = savedir + self.ObsID + '_HK_spectrum_sky_zoom_zoomable.png' self.logger.info('Generating QLP image ' + filename) myHK.plot_HK_spectrum_1D_zoom(trace='sky', fig_path=filename, show_plot=False) + filename = savedir + self.ObsID + '_HK_column_cut_zoomable.png' + self.logger.info('Generating QLP image ' + filename) + myHK.plot_HK_2D_column_cut(fig_path=filename, kpftype='2D', show_plot=False) except Exception as e: self.logger.error(f"Failure in CaHK quicklook pipeline: {e}\n{traceback.format_exc()}") From d6f47f62bfc20df1a3a762119ec0a05287e01fff Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sat, 21 Oct 2023 23:14:20 -0700 Subject: [PATCH 75/86] added read noise estimates from overscan region --- modules/Utils/analyze_l0.py | 243 ++++++++++++++++++++++++++++-------- 1 file changed, 191 insertions(+), 52 deletions(-) diff --git a/modules/Utils/analyze_l0.py b/modules/Utils/analyze_l0.py index 8ebf349ec..23af0aed6 100644 --- a/modules/Utils/analyze_l0.py +++ b/modules/Utils/analyze_l0.py @@ -1,12 +1,12 @@ +import re import time import numpy as np import matplotlib.pyplot as plt -from modules.Utils.kpf_parse import HeaderParse +from modules.Utils.kpf_parse import HeaderParse, get_data_products_L0 from modules.Utils.utils import DummyLogger from datetime import datetime class AnalyzeL0: - """ Description: This class contains functions to analyze L0 images (storing them @@ -16,18 +16,178 @@ class AnalyzeL0: L0 - an L0 object Attributes: - None so far + amp_names (list) - names of the amplifier regions present in the L0 image + gain (dictionary) - gain values for amplifier regions (determined by Ashley Baker) + regions (list of strings) - list of amplifier regions in L0 + nregions_green (int) - number of amplifier regions present in Green CCD image + nregions_red (int) - number of amplifier regions present in Red CCD image + green_present (boolean) - True if the Green CCD image is in the L0 object + red_present (boolean) - True if the Red CCD image is in the L0 object + read_noise_overscan (dictionary) - read noise estimates in e- determined by std of overscan regions. + The dictionary is over the list of regions + """ def __init__(self, L0, logger=None): self.logger = logger if logger is not None else DummyLogger() - self.logger.debug('Initializing AnalyzeL0 object.') self.L0 = L0 + self.data_products = get_data_products_L0(L0) primary_header = HeaderParse(L0, 'PRIMARY') self.header = primary_header.header self.name = primary_header.get_name() self.ObsID = primary_header.get_obsid() - + if ('Green' in self.data_products) or ('Red' in self.data_products): + self.regions = [key for key in self.L0.extensions if 'AMP' in key] + bad_regions = [] # remove regions with no data + for region in self.regions: + if len(self.L0[region].data) == 0: + bad_regions.append(region) # the bad region can't be removed in this loop, so a list is created + if bad_regions != []: + for region in bad_regions: + self.regions.remove(region) + self.determine_regions() # sets self.green_present, self.red_present, nregions_green, nregions_red + self.gain = { + 'GREEN_AMP1': 5.175, + 'GREEN_AMP2': 5.208, + 'GREEN_AMP3': 5.52, + 'GREEN_AMP4': 5.39, + 'RED_AMP1': 5.02, + 'RED_AMP2': 5.27, + 'RED_AMP3': 5.32, + 'RED_AMP4': 5.23, + } + self.read_noise_overscan = {} # initialize dictionary + try: + self.measure_read_noise_overscan() + except Exception as e: + print(e) + print('measure_read_noise_overscan() failed on ' + self.ObsID) + + def reject_outliers(self, data, n=5.0): + """ + This method performs an iterative n-sigma outlier rejection + and is used in measure_read_noise_overscan(). + + Parameters: + data (list or np.array): Input data set + n (float): Number of standard deviations from the mean. + Data points outside this range are considered outliers. + + Returns: + np.array: Data set with outliers removed. + """ + data = np.array(data) + + while True: + mean, std = np.mean(data), np.std(data) + outliers = (data < mean - n * std) | (data > mean + n * std) + if not np.any(outliers): + break + data = data[~outliers] + + return data + + + def determine_regions(self): + """ + This method determines if the Green and Red CCD images are present in the + AnalyzeL0 object and counts the number of amplifier regions in each CCD image. + These results are written to attributes of the AnalyzeL0 object. + + Parameters: + None + + Attributes: + self.green_present (boolean) - True if the Green CCD image (of non-zero size) is in the L0 object + self.red_present (boolean) - True if the Red CCD image (of non-zero size) is in the L0 object + self.nregions_green (int) - number of amplifier regions present in Green CCD image + self.nregions_red (int) - number of amplifier regions present in Red CCD image + + Returns: + None + """ + + # Determine if specific amplifier regions are present and count them + for CHIP in ['GREEN', 'RED']: + nregions = 0 + amp1_present = False + amp2_present = False + amp3_present = False + amp4_present = False + if CHIP + '_AMP1' in self.L0.extensions: + if self.L0[CHIP + '_AMP1'].shape[0] > 0: # Sanity check that data is present + amp1_present = True + if CHIP + '_AMP2' in self.L0.extensions: + if self.L0[CHIP + '_AMP2'].shape[0] > 0: + amp2_present = True + if CHIP + '_AMP3' in self.L0.extensions: + if self.L0[CHIP + '_AMP3'].shape[0] > 0: + amp3_present = True + if CHIP + '_AMP4' in self.L0.extensions: + if self.L0[CHIP + '_AMP4'].shape[0] > 0: + amp4_present = True + if not amp1_present: + nregions = 0 + if amp1_present: + nregions = 1 + if amp1_present & amp2_present: + nregions = 2 + if amp1_present & amp2_present & amp3_present & amp4_present: + nregions = 4 + if CHIP == 'GREEN': + self.nregions_green = nregions + if nregions >= 1: + self.green_present = True + else: + self.green_present = True + if CHIP == 'RED': + self.nregions_red = nregions + if nregions >= 1: + self.red_present = True + else: + self.red_present = True + + + def measure_read_noise_overscan(self, nparallel=30, nserial=50, nsigma=5.0): + """ + Measure read noise in the overscan region of a KPF CCD image. + Read noise is measured as the standard deviation of the pixel values, + after applying an n-sigma iterative outlier rejection. + + Args: + nparallel (integer) - overscan length in parallel direction (30 pixels was final value in 2023) + nserial (integer) - overscan length in serial direction (30 pixels was final value in 2023) + + Attributes: + TBD + + Returns: + None + """ + + for region in self.regions: + CHIP = region.split('_')[0] + NUM = region.split('AMP')[1] + gain = self.gain[CHIP+'_AMP'+NUM] + data = self.L0[region] + if np.nanmedian(data) > 200*2**16: # divide by 2**16 if needed + data /= 2**16 + if region == 'GREEN_AMP2': + data = data[:,::-1] # flip so that overscan is on the right + if region == 'GREEN_AMP3': + data = data[::-1,:] + if region == 'GREEN_AMP4': + data = data[::-1,::-1] + if region == 'RED_AMP2': + data = data[:,::-1] + if region == 'RED_AMP3': + data = data + if region == 'RED_AMP4': + data = data[:,::-1] + overscan_region = data[5:2040 + nparallel-5,2044 + 10:2044 + nserial-10] + vals = self.reject_outliers(overscan_region.flat, nsigma) + self.read_noise_overscan[region] = gain * np.std(vals) + def plot_L0_stitched_image(self, chip=None, fig_path=None, show_plot=False): """ @@ -46,45 +206,12 @@ def plot_L0_stitched_image(self, chip=None, fig_path=None, show_plot=False): """ L0 = self.L0 + CHIP = chip.upper() if chip == 'green' or chip == 'red': if chip == 'green': - CHIP = 'GREEN' + regions = self.nregions_green if chip == 'red': - CHIP = 'RED' - # Determine the number of regions in image - regions = 0 - amp1_present = False - amp2_present = False - amp3_present = False - amp4_present = False -# this method worked for .fits files, but not for L0 objects -# for hdu in L0: -# if hdu.name == CHIP + '_AMP1': -# amp1_present = True -# if hdu.name == CHIP + '_AMP2': -# amp2_present = True -# if hdu.name == CHIP + '_AMP3': -# amp3_present = True -# if hdu.name == CHIP + '_AMP4': -# amp4_present = True - if CHIP + '_AMP1' in L0.extensions: - if L0[CHIP + '_AMP1'].shape[0] > 0: - amp1_present = True - if CHIP + '_AMP2' in L0.extensions: - if L0[CHIP + '_AMP2'].shape[0] > 0: - amp2_present = True - if CHIP + '_AMP3' in L0.extensions: - if L0[CHIP + '_AMP3'].shape[0] > 0: - amp3_present = True - if CHIP + '_AMP4' in L0.extensions: - if L0[CHIP + '_AMP4'].shape[0] > 0: - amp4_present = True - if amp1_present: - regions = 1 - if amp1_present & amp2_present: - regions = 2 - if amp1_present & amp2_present & amp3_present & amp4_present: - regions = 4 + regions = self.nregions_red # Assemble image if regions == 1: @@ -101,7 +228,7 @@ def plot_L0_stitched_image(self, chip=None, fig_path=None, show_plot=False): image = np.concatenate((image_bot, image_top), axis=0) # Determine if image needs to be divided by 2^16 - if np.nanmedian(image) > 400*2**16: + if np.nanmedian(image) > 200*2**16: twotosixteen = True image /= 2**16 else: @@ -127,19 +254,31 @@ def plot_L0_stitched_image(self, chip=None, fig_path=None, show_plot=False): cbar.ax.yaxis.label.set_size(14) cbar.ax.tick_params(labelsize=12) plt.grid(False) - + + # Create label for read noise + rn_text = '' + if self.read_noise_overscan != {}: + rn_text = 'Read noise = ' + for i, region in enumerate(self.regions): + if CHIP == 'GREEN': + nregions = self.nregions_green + if CHIP == 'RED': + nregions = self.nregions_red + if region.split('_')[0] == CHIP: + rn_text += f"{self.read_noise_overscan[region]:.2f}" + if i < nregions-1: + rn_text += ', ' + rn_text += ' e-' + # Create a timestamp and annotate in the lower right corner current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') timestamp_label = f"KPF QLP: {current_time}" - plt.annotate(timestamp_label, - xy=(1, 0), - xycoords='axes fraction', - fontsize=8, - color="lightgray", - ha="right", - va="bottom", - xytext=(100, -32), # adjust the second parameter to control annotation height - textcoords='offset points') + plt.annotate(timestamp_label, xy=(1, 0), xycoords='axes fraction', + fontsize=8, color="darkgray", ha="right", va="bottom", + xytext=(100, -32), textcoords='offset points') + plt.annotate(rn_text, xy=(0, 0), xycoords='axes fraction', + fontsize=8, color="darkgray", ha="left", va="bottom", + xytext=(-100, -32), textcoords='offset points') plt.subplots_adjust(bottom=0.1) # Display the plot From 92b4c1ca9baf372900ed911a4f1358329225b0a9 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 22 Oct 2023 08:29:23 -0700 Subject: [PATCH 76/86] started get_read_speed() to determine CCD read speed from headers --- modules/Utils/kpf_parse.py | 42 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/modules/Utils/kpf_parse.py b/modules/Utils/kpf_parse.py index 770df3e18..93a813cdb 100644 --- a/modules/Utils/kpf_parse.py +++ b/modules/Utils/kpf_parse.py @@ -122,6 +122,48 @@ def get_obsid(self): self.ObsID = self.ObsID.replace('_L1', '') self.ObsID = self.ObsID.replace('_L2', '') return self.ObsID + + def get_read_speed(self): + """ + This method determines the read speed of the CCDs. + The two options are 'fast' (~12 sec) and 'normal' (~48 sec) + + Parameters: + None + + Attributes: + self.read_speed (string) - 'fast', 'regular', 'unknown' + self.green_acf (string) - name of ACF file used to read the Green CCD + self.red_acf (string) - name of ACF file used to read Red CCD + + Returns: + None + """ + self.read_speed = 'unknown' + self.green_acf = 'unknown' + self.red_acf = 'unknown' + if hasattr(self, 'header'): + try: + self.green_acf = self.header['GRACFFLN'] + except: + pass + try: + self.red_acf = self.header['RDACFFLN'] + except: + pass + + if ('fast' in self.green_acf) or ('fast' in self.red_acf): + self.read_speed = 'fast' + elif ('regular' in self.green_acf) or ('regular' in self.red_acf): + self.read_speed = 'regular' + #elif + +#GRDATE = '2023-09-10T13:29:09.804475' / FITS file write time Kwd green DATE +#GRDATE-B= '2023-09-10T13:26:56.365516' / Shutter-open time Kwd green DATE-BEG +#GRDATE-E= '2023-09-10T13:28:56.398782' / Shutter-close time Kwd green DATE-END +#RDDATE = '2023-09-10T13:29:09.896907' / FITS file write time Kwd red DATE +#RDDATE-B= '2023-09-10T13:26:56.365516' / Shutter-open time Kwd red DATE-BEG +#RDDATE-E= '2023-09-10T13:28:56.398782' / Shutter-close time Kwd red DATE-END def get_datecode(ObsID): From 49a59bfdc4f1b4c9bfeb65ae66f2e4fa4675ada7 Mon Sep 17 00:00:00 2001 From: Andrew Howard Date: Sun, 22 Oct 2023 14:08:44 -0700 Subject: [PATCH 77/86] read speed and read noise determinations now working --- modules/Utils/analyze_l0.py | 12 ++++-- modules/Utils/kpf_parse.py | 83 +++++++++++++++++++++++++++++-------- 2 files changed, 75 insertions(+), 20 deletions(-) diff --git a/modules/Utils/analyze_l0.py b/modules/Utils/analyze_l0.py index 23af0aed6..660b83aac 100644 --- a/modules/Utils/analyze_l0.py +++ b/modules/Utils/analyze_l0.py @@ -62,6 +62,9 @@ def __init__(self, L0, logger=None): except Exception as e: print(e) print('measure_read_noise_overscan() failed on ' + self.ObsID) + + self.read_speed, self.green_acf, self.red_acf, self.green_read_time, self.red_read_time = \ + primary_header.get_read_speed() def reject_outliers(self, data, n=5.0): """ @@ -155,8 +158,10 @@ def measure_read_noise_overscan(self, nparallel=30, nserial=50, nsigma=5.0): after applying an n-sigma iterative outlier rejection. Args: - nparallel (integer) - overscan length in parallel direction (30 pixels was final value in 2023) - nserial (integer) - overscan length in serial direction (30 pixels was final value in 2023) + nparallel (integer) - overscan length in parallel direction + (30 pixels was final value in 2023) + nserial (integer) - overscan length in serial direction + (30 pixels was final value in 2023) Attributes: TBD @@ -259,7 +264,8 @@ def plot_L0_stitched_image(self, chip=None, fig_path=None, show_plot=False): rn_text = '' if self.read_noise_overscan != {}: rn_text = 'Read noise = ' - for i, region in enumerate(self.regions): + chip_regions = [item for item in self.regions if CHIP in item] + for i, region in enumerate(chip_regions): if CHIP == 'GREEN': nregions = self.nregions_green if CHIP == 'RED': diff --git a/modules/Utils/kpf_parse.py b/modules/Utils/kpf_parse.py index 93a813cdb..16e1fbb0b 100644 --- a/modules/Utils/kpf_parse.py +++ b/modules/Utils/kpf_parse.py @@ -1,4 +1,5 @@ from astropy.io import fits +from datetime import datetime class KPFParse: @@ -126,38 +127,86 @@ def get_obsid(self): def get_read_speed(self): """ This method determines the read speed of the CCDs. - The two options are 'fast' (~12 sec) and 'normal' (~48 sec) + The two options are 'fast' (~12 sec) and 'normal' (~48 sec). + This method also reports the ACF files used (CCD waveform files) and + the read times for each CCD. Parameters: None Attributes: - self.read_speed (string) - 'fast', 'regular', 'unknown' - self.green_acf (string) - name of ACF file used to read the Green CCD - self.red_acf (string) - name of ACF file used to read Red CCD + read_speed (string) - 'fast', 'regular', 'unknown' + green_acf (string) - name of ACF file used to read the Green CCD + red_acf (string) - name of ACF file used to read Red CCD + green_read_time (double) - seconds to read out the Green CCD + red_read_time (double) - seconds to read out the Red CCD Returns: - None + a tuple of (read_speed, green_acf, red_acf, green_read_time, red_read_time) """ - self.read_speed = 'unknown' - self.green_acf = 'unknown' - self.red_acf = 'unknown' + fast_read_time_max = 20 # sec + datetime_format = "%Y-%m-%dT%H:%M:%S.%f" + green_acf = 'unknown' + red_acf = 'unknown' + read_speed = 'unknown' + green_read_time = 0. + red_read_time = 0. + if hasattr(self, 'header'): + # Green CCD Read Time + try: + dt1 = datetime.strptime(self.header['GRDATE'], datetime_format) # fits file write time + dt2 = datetime.strptime(self.header['GRDATE-E'], datetime_format) # shutter-close time + deltat = dt1-dt2 + green_read_time = deltat.total_seconds() + except: + pass + # Red CCD Read Time try: - self.green_acf = self.header['GRACFFLN'] + dt1 = datetime.strptime(self.header['RDDATE'], datetime_format) # fits file write time + dt2 = datetime.strptime(self.header['RDDATE-E'], datetime_format) # shutter-close time + deltat = dt1-dt2 + red_read_time = deltat.total_seconds() except: pass + # ACF file for Green CCD try: - self.red_acf = self.header['RDACFFLN'] + green_acf = self.header['GRACFFLN'] except: pass - - if ('fast' in self.green_acf) or ('fast' in self.red_acf): - self.read_speed = 'fast' - elif ('regular' in self.green_acf) or ('regular' in self.red_acf): - self.read_speed = 'regular' - #elif - + # ACF file for Red CCD + try: + red_acf = self.header['RDACFFLN'] + except: + pass + # Determine read speed ('fast' or 'regular') + try: + if ('fast' in green_acf) or ('fast' in red_acf): + read_speed = 'fast' + elif ('regular' in green_acf) or ('regular' in red_acf): + read_speed = 'regular' + else: + a = green_read_time + b = red_read_time + best_read_time = min(x for x in [a, b] if x != 0) if a * b != 0 else (a or b) + if (best_read_time > 0) and (best_read_time < fast_read_time_max): + read_speed = 'fast' + elif best_read_time >= fast_read_time_max: + read_speed = 'regular' + except: + pass + return (read_speed, green_acf, red_acf, green_read_time, red_read_time) + +#datetime_format = "%Y-%m-%dT%H:%M:%S.%f" +#dt1 = datetime.strptime(time1, datetime_format) +#dt2 = datetime.strptime(time2, datetime_format) + +# Compute the difference +#difference = dt2 - dt1 + +# Return the total seconds rounded to 0.1-second precision +#round(difference.total_seconds(), 1) + #GRDATE = '2023-09-10T13:29:09.804475' / FITS file write time Kwd green DATE #GRDATE-B= '2023-09-10T13:26:56.365516' / Shutter-open time Kwd green DATE-BEG #GRDATE-E= '2023-09-10T13:28:56.398782' / Shutter-close time Kwd green DATE-END From 2ef541a7ae7866c1e2f87a2892e4aa5c635eb493 Mon Sep 17 00:00:00 2001 From: root Date: Sun, 22 Oct 2023 21:16:45 +0000 Subject: [PATCH 78/86] changed title --- configs/quicklook_match.cfg | 2 +- modules/Utils/analyze_l0.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/quicklook_match.cfg b/configs/quicklook_match.cfg index d94223087..e7dfbe237 100644 --- a/configs/quicklook_match.cfg +++ b/configs/quicklook_match.cfg @@ -7,7 +7,7 @@ log_directory = /data/logs_QLP/ [ARGUMENT] # see quicklook_match.recipe for a description of how to set fullpath -fullpath = '/data/2D/202?????/KP.20230923.21775.55*.fits' +fullpath = '/data/L0/202?????/KP.20230102.10546.01*.fits' #fullpath = '/data/masters/20230419/*.fits' [MODULE_CONFIGS] diff --git a/modules/Utils/analyze_l0.py b/modules/Utils/analyze_l0.py index 660b83aac..02040819c 100644 --- a/modules/Utils/analyze_l0.py +++ b/modules/Utils/analyze_l0.py @@ -247,9 +247,9 @@ def plot_L0_stitched_image(self, chip=None, fig_path=None, show_plot=False): vmin=np.percentile(image,1), vmax=np.percentile(image,99.5)) if chip == 'green': - plt.title('L0 (no processing) - Green CCD: ' + str(self.ObsID) + ' - ' + self.name, fontsize=14) + plt.title('L0 - Green CCD: ' + str(self.ObsID) + ' - ' + self.name, fontsize=14) if chip == 'red': - plt.title('L0 (no processing) - Red CCD: ' + str(self.ObsID) + ' - ' + self.name, fontsize=14) + plt.title('L0 - Red CCD: ' + str(self.ObsID) + ' - ' + self.name, fontsize=14) plt.xlabel('Column (pixel number)', fontsize=14) plt.ylabel('Row (pixel number)', fontsize=14) cbar_label = 'ADU' From 95304ff937ffa298201ddf3fd1fd5dbf25c1ce5a Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Mon, 23 Oct 2023 16:22:11 -0700 Subject: [PATCH 79/86] feature/order_trace_post_process add a post process to order trace module to enhance and refine the order trace upper and lower edges along each trace. add parameters 'orderlet_gap_pixels' in config file as the number of pixels to ignore between orderlets during extraction. --- configs/kpf_drp.cfg | 6 + modules/order_trace/src/alg.py | 235 ++++++++++++++++++++++++- modules/order_trace/src/order_trace.py | 45 ++++- modules/spectral_extraction/src/alg.py | 15 +- recipes/kpf_drp.recipe | 8 +- 5 files changed, 296 insertions(+), 13 deletions(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 1e764765a..497bd4ee1 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -47,6 +47,8 @@ order_trace_flat = /data/reference_fits/kpf_20230920_master_flat.fits order_trace_files = [ '/data/reference_fits/kpf_20230920_master_flat_GREEN_CCD.csv', '/data/reference_fits/kpf_20230920_master_flat_RED_CCD.csv'] fitting_poly_degree = 3 ccd_idx = [0, 1] +# number of pixels to ignore between orderlets during extraction +orderlet_gap_pixels = 1.5 # for spectral extraction: # - update orders_per_ccd, start_order, orderlet_names per new order trace result @@ -98,6 +100,10 @@ reweighting_enable_masks = [['espresso'], ['espresso']] ccf_ext = ['GREEN_CCF', 'RED_CCF'] rv_ext = RV static_ccf_ratio = ['/code/KPF-Pipeline/static/static_green_ccf_ratio_2.csv', '/code/KPF-Pipeline/static/static_red_ccf_ratio_2.csv'] +# starting and ending location for CCF calculation, >= 0, position relative to left end of the image, +# < 0, position relative to the right end of the image +rv_start_x = 500 +rv_end_x = -500 # for ca_hk: # hk_fiber_list: [] diff --git a/modules/order_trace/src/alg.py b/modules/order_trace/src/alg.py index aca375fc5..cfcd93f7c 100644 --- a/modules/order_trace/src/alg.py +++ b/modules/order_trace/src/alg.py @@ -53,6 +53,7 @@ class OrderTraceAlg(ModuleAlgBase): data_range (list): Range of data to be traced, [, , , ]. original_size (list): Original size of the flat data, [, ]. poly_degree (int): Order of polynomial for order trace fitting. + orders_ccd (number, options)`: Total orders of the ccd. Defaults to -1. Raises: AttributeError: The ``Raises`` section is a list of all exceptions that are relevant to the interface. @@ -65,7 +66,7 @@ class OrderTraceAlg(ModuleAlgBase): LOWER = 0 name = 'OrderTrace' - def __init__(self, data, poly_degree=None, expected_traces = None, config=None, logger=None): + def __init__(self, data, poly_degree=None, expected_traces=None, orders_ccd=-1, config=None, logger=None): if not isinstance(data, np.ndarray): raise TypeError('image data type error, cannot construct object from OrderTraceAlg') if not isinstance(config, ConfigParser): @@ -87,6 +88,7 @@ def __init__(self, data, poly_degree=None, expected_traces = None, config=None, self.poly_degree = poly_degree self.trace_ratio = None self.expected_traces = expected_traces # this is useful for regression test + self.orders_ccd = orders_ccd def get_config_value(self, param: str, default): """Get defined value from the config file. @@ -132,6 +134,9 @@ def get_data_range(self): self.set_data_range() return self.data_range + def get_total_orderlet(self): + return self.get_config_value('total_image_orderlets', 1) + def get_poly_degree(self): """Order of polynomial for order trace fitting. @@ -214,6 +219,12 @@ def correct_nan_data(self): self.flat_data = np.nan_to_num(self.flat_data) return self.flat_data + def get_total_orders(self, traces=0): + if self.orders_ccd < 0: + if traces != 0: + self.orders_ccd = (traces+self.get_total_orderlet())//self.get_total_orderlet() \ + if traces%self.get_total_orderlet() else traces//self.get_total_orderlet() + return self.orders_ccd @ staticmethod def opt_filter(y_data: np.ndarray, par: int, weight: np.ndarray = None): @@ -2490,7 +2501,6 @@ def curve_fitting_on_all_clusters(self, index: np.ndarray, x: np.ndarray, y: np. for c_idx in range(1, max_index+1): idx_at_order = np.where(index == c_idx)[0] if x[idx_at_order].size == 0: # for debugging - import pdb;pdb.set_trace() print('c_idx ', c_idx, x, y) else: poly, error, area = self.curve_fitting_on_one_cluster(c_idx, index, x, y, power) @@ -2825,6 +2835,215 @@ def float_to_string(afloat): new_str = f"{afloat:.4f}" return new_str + def post_process(self, orig_coeffs, orig_widths, orderlet_gap = 2): + """ post process and refine the calculated widths to make the widths located closer to the valley between two + consecutive orderlet traces and in the style of being more symmetric to the valley. + Args: + orig_coeffs: coeffs from high order to low order + orig_widths: widths array of lower and upper widths + Returns: + numpy.array: new_coeffs with one extra row added to orig_coeffs as the same format of the parameter + `cluster_coeffs` to `write_cluster_info_to_dataframe`. + list: orig_widths containing the width information in the same format of the parameter `cluster_widths` + to `write_cluster_info_to_dataframe`. + """ + + ext_data, nx, ny = self.get_spectral_data() + power = self.get_poly_degree() + total_trace = np.shape(orig_coeffs)[0] + total_orderlets = self.get_total_orderlet() + + # assume the missing traces are located at the bottom if there is any trace missing + # i.e. total_orders * total_orderlets < total_trace + total_orders = self.get_total_orders(total_trace) + start_order = total_trace - total_orders * total_orderlets + od_idxs = np.arange(start_order, start_order+total_orderlets, dtype=int) + # the trace index in orig_coeffs for each order + od_sets = np.array([od_idxs + i * total_orderlets for i in range(total_orders)]) + + # setting for post process + # skip the trace which vertical position falls outside the range of [y_v_off, ny-1-y_v_off] + th_v = 1 + y_v_off = 3 + w_th = orderlet_gap/2 + th = 0.35 + x_int = 10 + + + xrange = np.arange(0, nx, dtype=int) + lower_widths = orig_widths[:, 0] + upper_widths = orig_widths[:, 1] + xlefts = orig_coeffs[:, power+1].astype(int) + xrights = orig_coeffs[:, power+2].astype(int) + y_vals = np.array([np.polyval(orig_coeffs[i, 0:power+1], xrange) for i in range(total_trace)]) + v_limit = [y_v_off + th_v, ny - 1 - y_v_off - th_v] # range limit for trace top and bottom + new_widths = [{}] * total_trace + + for order_idx in range(total_orders): + t_idxs = od_sets[order_idx] + v_orderlet = np.where(t_idxs >= 0)[0] # valid orderlet per order to process 0-4 for kpf + t_idxs = t_idxs[v_orderlet] # trace index per order to process. 0-174/0-159 for kpf green/red + #print('order_idx: ', order_idx, 'trace:', t_idxs) + new_order_trace = [] + for x in range(0, nx, x_int): + v_orderlet_x = np.where((xlefts[t_idxs] <= x) & (xrights[t_idxs] >= x))[0] # valid orderlet per x + # if only one trace is included or the included traces are not consecutive + if v_orderlet_x.size <= 1 or np.max(np.ediff1d(v_orderlet_x)) > 1: + continue + t_idx_x = t_idxs[v_orderlet_x].copy() # trace index + t_orderlet_x = v_orderlet[v_orderlet_x].copy() # orderlet index + + y_orig_range = np.array( + [np.floor(y_vals[t, x] + np.array([-lower_widths[t], upper_widths[t]], dtype=int)) for t in t_idx_x]) + + v_y_range = np.where( (v_limit[0] <= y_orig_range[:, 0]) & (y_orig_range[:, 0] <= v_limit[1]) & + (v_limit[0] <= y_orig_range[:, 1]) & (y_orig_range[:, 1] <= v_limit[1]))[0] + + if v_y_range.size <= 1 or np.max(np.ediff1d(v_y_range)) > 1: + continue + + t_idx_x = t_idx_x[v_y_range] # valid traces of one order at x + t_orderlet_x = t_orderlet_x[v_y_range] # valid orderlets of one order at x + + # recalculate y range in case trace exclusion is changed + if v_y_range.size != v_orderlet_x.size: + y_orig_range = np.array( + [np.floor(y_vals[t, x] + np.array([-lower_widths[t], upper_widths[t]], dtype=int)) for t in t_idx_x]) + + # valley (flux) between consecutive traces of each order including the one before the first trace and + # after the last trace + y_valley_centers = np.zeros(t_idx_x.size+1).astype(int) + flux_valley_mins = np.zeros(t_idx_x.size+1) + + # valley between two consecutive orderlets (traces) + for i in range(1, t_idx_x.size): + left_idx = i-1 + right_idx = i + v_left = min(max(y_orig_range[left_idx][1] - y_v_off, 0), ny - 1) + v_right = max(min(y_orig_range[right_idx][0] + y_v_off, ny - 1), 0) + y_valley_list = np.arange(v_left, v_right+1, dtype=int) + y_avgs_list = [] + for y_p in y_valley_list: + y_inc = np.arange(max(y_p-th_v, 0), min(y_p+th_v, ny-1)+1, dtype=int) + y_avgs_list.append(np.average(ext_data[y_inc, x])) + + y_valley_centers[i] = y_valley_list[np.argmin(y_avgs_list)] + flux_valley_mins[i] = ext_data[y_valley_centers[i], x] + y_peaks = y_vals[t_idx_x, x].astype(int) + + # add valley to the left of the first and the right of the last orderlet + for i in [0, t_idx_x.size]: + if i == 0: + delta_y = y_valley_centers[i + 1] - y_peaks[i] + y_valley_centers[i] = max(y_peaks[i] - delta_y, 0) + else: + delta_y = y_peaks[i - 1] - y_valley_centers[i - 1] + y_valley_centers[i] = min(y_peaks[i - 1] + delta_y, ny - 1) + + flux_valley_mins[i] = ext_data[y_valley_centers[i], x] + + # halves of peak to valley around the valley + y_left_halves = np.ones(t_idx_x.size+1, dtype=int) * -1 + y_right_halves = np.ones(t_idx_x.size+1, dtype=int) * -1 + y_center_halves = np.ones(t_idx_x.size+1, dtype=int) + + for i in range(t_idx_x.size+1): + # get right side around the valleys + if i < t_idx_x.size: + y_right_th = (ext_data[y_peaks[i], x] - flux_valley_mins[i]) * th + flux_valley_mins[i] + for y in range(y_valley_centers[i], y_peaks[i]): + if ext_data[y, x] > y_right_th: + y_right_halves[i] = y + break + + if i > 0: + y_left_th = (ext_data[y_peaks[i-1], x] - flux_valley_mins[i]) * th + flux_valley_mins[i] + for y in range(y_valley_centers[i], y_peaks[i-1], -1): + if ext_data[y, x] > y_left_th: + y_left_halves[i] = y + break + mid_to_set = 1 + # symmetric to the valley center at two ends + if i == 0: + y_left_halves[i] = y_valley_centers[i] * 2 - y_right_halves[i] + if y_left_halves[i] < 0: + y_center_halves[i] = y_valley_centers[i] + y_left_halves[i] = -1 + mid_to_set = 0 + elif i == t_idx_x.size: + y_right_halves[i] = y_valley_centers[i] * 2 - y_left_halves[i] + if y_right_halves[i] > ny - 1: + y_right_halves[i] = -1 + y_center_halves[i] = y_valley_centers[i] + mid_to_set = 0 + if mid_to_set == 1: + # the center is located at .5 location + if (y_left_halves[i] + y_right_halves[i]) % 2 == 1: + y_op1 = (y_left_halves[i] + y_right_halves[i]) // 2 + y_op2 = (y_left_halves[i] + y_right_halves[i] + 1) // 2 + y_center_halves[i] = y_op1 if ext_data[y_op1, x] < ext_data[y_op2, x] else y_op2 + else: + y_center_halves[i] = (y_left_halves[i] + y_right_halves[i]) // 2 + else: + y_center_halves[i] = -1 + + new_info = {} + new_info['x'] = x + new_info['y_valley_centers'] = y_valley_centers + new_info['y_left_halves'] = y_left_halves + new_info['y_right_halves'] = y_right_halves + new_info['y_peaks'] = y_peaks + new_info['y_middle_halves'] = y_center_halves + new_info['valid_orderlet'] = t_orderlet_x + new_info['valid_trace_index'] = t_idx_x + new_order_trace.append(new_info) + + # loop through the trace included in the same order + for od_t in t_idxs: + new_lower_widths = [] + new_upper_widths = [] + + # loop through new order trace at different x location + for one_info in new_order_trace: + idx_in_valid = np.where(one_info['valid_trace_index'] == od_t)[0] + if idx_in_valid.size == 0: + continue + idx = idx_in_valid[0] + new_lower_widths.append(one_info['y_peaks'][idx]-one_info['y_valley_centers'][idx] - w_th) + new_upper_widths.append(one_info['y_valley_centers'][idx+1] - one_info['y_peaks'][idx] - w_th) + if len(new_lower_widths) == 0: + bot = orig_widths[od_t, 0] # use the original widths + else: + bot = sum(new_lower_widths)/len(new_lower_widths) + if len(new_upper_widths) == 0: # use the original widths + top = orig_widths[od_t, 1] + else: + top = sum(new_upper_widths)/len(new_upper_widths) + + new_widths[od_t] = {'bottom_edge': bot, 'top_edge': top} + + new_coeffs = np.zeros((total_trace+1, power+3)) + new_coeffs[1:, :] = orig_coeffs + return new_coeffs, new_widths + + def convert_for_post_process(self, coeffs, widths = None): + power = self.get_poly_degree() + if widths is None: + poly_coeffs = np.flip(coeffs[:, 0:power+1], axis=1) + t_trace = np.shape(coeffs)[0] + new_coeffs = np.zeros((t_trace, power+3)) + new_coeffs[:, 0:power+1] = poly_coeffs + new_coeffs[:, power+1:power+3] = coeffs[:, power+3:power+5] + new_widths = np.array(coeffs[:, power+1:power+3]) + else: + t_trace = np.shape(coeffs)[0] -1 + new_coeffs = np.zeros((t_trace, power+3)) + new_coeffs[:, 0:power+1] = coeffs[1:, 0:power+1] + new_coeffs[:, power+1:power+3] = coeffs[1:, power+1:power+3] + new_widths = np.array([[widths[i]['bottom_edge'], widths[i]['top_edge']] for i in range(len(widths))]) + + return new_coeffs, new_widths + def extract_order_trace(self, power_for_width_estimation: int = -1, data_range = None, show_time: bool = False, print_debug: str = None, rows_to_reset = None, cols_to_reset = None): """ Order trace extraction. @@ -2898,7 +3117,19 @@ def extract_order_trace(self, power_for_width_estimation: int = -1, data_range = all_widths, cluster_coeffs = self.find_all_cluster_widths(new_index, new_x, new_y, power_for_width_estimation=power_for_width_estimation) self.time_check(t_start, "*** find widths: ") + post_coeffs, post_widths = self.convert_for_post_process(cluster_coeffs, all_widths) + post_coeffs, all_widths = self.post_process(post_coeffs, post_widths) self.d_print("OrderTraceAlg: write result to Pandas Dataframe", info=True) df = self.write_cluster_info_to_dataframe(all_widths, cluster_coeffs) return {'order_trace_result': df, 'cluster_index': new_index, 'cluster_x': new_x, 'cluster_y': new_y} + + def refine_order_trace(self, order_trace_path, is_output_file, orderlet_gap=2): + order_trace_data = pd.read_csv(order_trace_path, header=0, index_col=0) + coeffs, widths = self.convert_for_post_process(order_trace_data.values) + new_coeffs, new_widths = self.post_process(coeffs, widths, orderlet_gap=orderlet_gap) + + df = self.write_cluster_info_to_dataframe(new_widths, new_coeffs) + if is_output_file: + df.to_csv(order_trace_path) + return df \ No newline at end of file diff --git a/modules/order_trace/src/order_trace.py b/modules/order_trace/src/order_trace.py index 2f80c3f18..ced5c2e2b 100644 --- a/modules/order_trace/src/order_trace.py +++ b/modules/order_trace/src/order_trace.py @@ -28,6 +28,10 @@ if it is defined. - `action.args['is_output_file'] (boolean, optional)`: if the result is output to a file or data model extension. Defaults to True meaning output to a file. + - `action.args['orders_ccd'] (boolean, optional)`: total orders of the ccd. Defaults to -1. + - `action.args['do_post'] (boolean, optional)`: do post process only on existing order trace file. + - `action.args['orderlet_pixel_gaps'] (number, options)`: orderlet gap pixels between consecutive + orderlets, i.e. number of pixels to ignore between orderlets during extraction. Defaults to 2. - `context (keckdrpframework.models.processing_context.ProcessingContext)`: `context.config_path` contains the path of the config file defined for the module of order trace in the master @@ -50,6 +54,8 @@ - `alg (modules.order_trace.src.alg.OrderTraceAlg)`: Instance of `OrderTraceAlg` which has operation codes for the computation of order trace. - `poly_degree (int)`: Order of polynomial for order trace fitting. + - `do_post (bool)`: if doing post processing on existing order trace data. + - `orderlet_gap_pixels`: number of pixels to ignore between orderlets during extraction. * Method `__perform`: @@ -120,6 +126,9 @@ def __init__(self, self.result_path = None self.poly_degree = None self.expected_traces = None + self.do_post = False + self.orderlet_gap_pixels = 2 + self.orders_ccd = -1 if 'data_row_range' in args_keys and action.args['data_row_range'] is not None: self.row_range = self.find_range(action.args['data_row_range'], row) @@ -144,6 +153,16 @@ def __init__(self, if 'expected_traces' in args_keys and action.args['expected_traces'] is not None: self.expected_traces = action.args['expected_traces'] + if 'orders_ccd' in args_keys and action.args['orders_ccd'] is not None: + self.orders_ccd = action.args['orders_ccd'] + + if 'do_post' in args_keys and action.args['do_post'] is not None: + self.do_post = action.args['do_post'] + + if 'orderlet_gap_pixels' in args_keys and action.args['orderlet_gap_pixels'] is not None: + self.orderlet_gap_pixels = action.args['orderlet_gap_pixels'] + + # input configuration self.config = configparser.ConfigParser() try: @@ -162,7 +181,8 @@ def __init__(self, # Order trace algorithm setup self.alg = OrderTraceAlg(self.flat_data, poly_degree=self.poly_degree, - expected_traces=self.expected_traces, config=self.config, logger=self.logger) + expected_traces=self.expected_traces, config=self.config, logger=self.logger, + orders_ccd=self.orders_ccd) def _pre_condition(self) -> bool: """ @@ -190,6 +210,19 @@ def _perform(self): """ self.alg.set_data_range([self.col_range[0], self.col_range[1], self.row_range[0], self.row_range[1]]) + + # if order trace result file exists and do_post is True, then process the result only + if self.result_path and os.path.isfile(self.result_path) and os.path.exists(self.result_path) and self.do_post: + df = self.alg.refine_order_trace(self.result_path, self.is_output_file, orderlet_gap = self.orderlet_gap_pixels) + self.input.receipt_add_entry('OrderTrace', self.__module__, f'config_path={self.config_path}', 'PASS') + if self.logger: + self.logger.info("OrderTrace: refine existing order trace result "+self.result_path) + + if self.logger: + # self.logger.info("OrderTrace: Done!") + self.logger.warning("OrderTrace: Refinement is done!") + return Arguments(df) + # 1) Locate cluster if self.logger: #self.logger.info("OrderTrace: locating cluster...") @@ -220,8 +253,16 @@ def _perform(self): self.logger.warning("OrderTrace: finding width...") all_widths, cluster_coeffs = self.alg.find_all_cluster_widths(c_index, c_x, c_y, power_for_width_estimation=3) + # 7) post processing + if self.logger: + self.logger.warning('OrderTrace: post processing...') + + post_coeffs, post_widths = self.alg.convert_for_post_process(cluster_coeffs, all_widths) + + _, all_widths = self.alg.post_process(post_coeffs, post_widths, orderlet_gap=self.orderlet_gap_pixels) + + # 8) convert result to dataframe if self.logger: - #self.logger.info("OrderTrace: writing cluster into dataframe...") self.logger.warning("OrderTrace: writing cluster into dataframe...") df = self.alg.write_cluster_info_to_dataframe(all_widths, cluster_coeffs) diff --git a/modules/spectral_extraction/src/alg.py b/modules/spectral_extraction/src/alg.py index 2100cf345..fcbe28d6d 100644 --- a/modules/spectral_extraction/src/alg.py +++ b/modules/spectral_extraction/src/alg.py @@ -488,7 +488,7 @@ def compute_order_area(self, c_order, rectified_group, output_x_dim, output_y_d def data_extraction_for_optimal_extraction(self, data_group, y_mid, y_output_mid, input_widths, output_widths, - input_x, x_output_step, mask_height, is_debug): + input_x, x_output_step, mask_height, is_debug=False): """ Args: @@ -522,6 +522,7 @@ def data_extraction_for_optimal_extraction(self, data_group, s_data = np.zeros((p_height, p_width), dtype=float) if s_dt is not None else None f_data = np.zeros((p_height, p_width), dtype=float) if f_dt is not None else None + for i_x, p_x in enumerate(input_x): y_input = np.floor(input_widths + y_mid[i_x]).astype(int) y_input_idx = np.where((y_input <= (input_y_dim - 1)) & (y_input >= 0))[0] @@ -542,8 +543,8 @@ def data_extraction_for_optimal_extraction(self, data_group, return s_data, f_data, is_sdata_raw def outlier_rejection_for_optimal_extraction(self, s_data, f_data, input_x, x_output_step, is_sdata_raw, - input_widths, output_widths, y_mid, y_output_mid, - do_column=True, is_debug=False): + input_widths, output_widths, y_mid, y_output_mid, + do_column=True, is_debug=False): """ Args: @@ -556,8 +557,8 @@ def outlier_rejection_for_optimal_extraction(self, s_data, f_data, input_x, x_ou output_widths (numpy.ndarray): from lower to upper width in output domain y_mid (numpy.ndarray): y location of the trace in input domain y_output_mid (int): y location of the trace in output domain - do_column (bool): do outlier rejection compuatation column by column. - is_debug (bool): output debug message + do_columm (bool, optional): do outlier rejection column by column. + is_debug (bool, optional): output debug message Returns: t_mask (numpy.ndarray): a 2D mask denoting the rejected outlier with the same size as that of s_data. @@ -807,8 +808,8 @@ def collect_and_extract_spectrum_curve(self, data_group, order_idx, s_rate=1): is_debug = False mask_height = np.shape(out_data)[1] - #if self.order_name == 'GREEN_SKY_FLUX': - # if order_idx == 4: + #if self.order_name == 'GREEN_SCI_FLUX2': + # if order_idx == 1: # is_debug = True s_data, f_data, is_sdata_raw = self.data_extraction_for_optimal_extraction(data_group, y_mid, y_output_mid, input_widths, y_output_widths, diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index 35d01e6dc..fe8804638 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -211,6 +211,7 @@ lev0_flat_pattern = config.ARGUMENT.flat_file # use the full path of ourder trace files defind in config order_trace_flat = config.ARGUMENT.order_trace_flat output_order_trace = config.ARGUMENT.order_trace_files +orderlet_gap_pixels = config.ARGUMENT.orderlet_gap_pixels ## for spectral extraction: input and output # lev0_science_pattern: if watch mode => a single 2D file, /data/2D// @@ -265,6 +266,8 @@ if 'static' in reweighting_method: static_ccf_ratio = config.ARGUMENT.static_ccf_ratio else: static_ccf_ratio = None +rv_start_x = config.ARGUMENT.rv_start_x +rv_end_x = config.ARGUMENT.rv_end_x ## for ca_hk input 2D and output L1, ca_hk trace and wavelength solution tables # input_hk_pattern: for watch mode => a single 2D file, @@ -339,7 +342,8 @@ if do_order_trace: order_result_data = OrderTrace(flat_data, data_extension=ccd+ccd_stack, result_path=output_lev0_trace_csv, is_output_file=True, data_col_range=data_col_range, data_row_range=data_row_range, - fitting_poly_degree=poly_degree) + fitting_poly_degree=poly_degree, orders_ccd=orders_per_ccd[idx], + orderlet_gap_pixels=orderlet_gap_pixels) b_all_traces = b_all_traces and exists(output_lev0_trace_csv) @@ -618,7 +622,7 @@ if do_rv or do_rv_reweighting: rv_star_dir = masters_data_dir # L1 area to be processed - area_def = [[0, orders_per_ccd[0]-1, 500, -500], [0, orders_per_ccd[1]-1, 500, -500]] + area_def = [[0, orders_per_ccd[0]-1, rv_start_x, rv_end_x], [0, orders_per_ccd[1]-1, rv_start_x, rv_end_x]] if not lev1_list: lev1_list = find_files(input_lev1_pattern) From b6b4aa0f36a53e973371e6ef0614301bad57e80d Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 24 Oct 2023 09:05:19 -0700 Subject: [PATCH 80/86] update config file, kpf_masters_l1.cfg, per the new parameters defined in kpf_drp.cfg. --- configs/kpf_masters_l1.cfg | 6 ++++++ modules/order_trace/src/alg.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/configs/kpf_masters_l1.cfg b/configs/kpf_masters_l1.cfg index 79b8e1ebd..095790c43 100644 --- a/configs/kpf_masters_l1.cfg +++ b/configs/kpf_masters_l1.cfg @@ -63,6 +63,8 @@ order_trace_flat = /data/reference_fits/kpf_20230920_master_flat.fits order_trace_files = [ '/data/reference_fits/kpf_20230920_master_flat_GREEN_CCD.csv', '/data/reference_fits/kpf_20230920_master_flat_RED_CCD.csv'] fitting_poly_degree = 3 ccd_idx = [0, 1] +# number of pixels to ignore between orderlets during extraction +orderlet_gap_pixels = 1.5 # for spectral extraction: # - update orders_per_ccd, start_order, orderlet_names per new order trace result @@ -110,6 +112,10 @@ reweighting_enable_masks = [['espresso'], ['espresso']] ccf_ext = ['GREEN_CCF', 'RED_CCF'] rv_ext = RV static_ccf_ratio = ['/code/KPF-Pipeline/static/static_green_ccf_ratio_2.csv', '/code/KPF-Pipeline/static/static_red_ccf_ratio_2.csv'] +# starting and ending location for CCF calculation, >= 0, position relative to left end of the image, +# < 0, position relative to the right end of the image +rv_start_x = 500 +rv_end_x = -500 # for ca_hk: # hk_fiber_list: [] diff --git a/modules/order_trace/src/alg.py b/modules/order_trace/src/alg.py index cfcd93f7c..49bc910ab 100644 --- a/modules/order_trace/src/alg.py +++ b/modules/order_trace/src/alg.py @@ -3119,7 +3119,7 @@ def extract_order_trace(self, power_for_width_estimation: int = -1, data_range = self.time_check(t_start, "*** find widths: ") post_coeffs, post_widths = self.convert_for_post_process(cluster_coeffs, all_widths) - post_coeffs, all_widths = self.post_process(post_coeffs, post_widths) + _, all_widths = self.post_process(post_coeffs, post_widths) self.d_print("OrderTraceAlg: write result to Pandas Dataframe", info=True) df = self.write_cluster_info_to_dataframe(all_widths, cluster_coeffs) return {'order_trace_result': df, 'cluster_index': new_index, 'cluster_x': new_x, 'cluster_y': new_y} From 17019e21b7e9b083230bb07151fbeb73c3d1dda2 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 24 Oct 2023 10:39:29 -0700 Subject: [PATCH 81/86] fix regression test by updating order trace interface. --- modules/order_trace/src/alg.py | 21 ++++++++++++++------- modules/order_trace/src/order_trace.py | 21 ++++++++++++++------- recipes/kpf_drp.recipe | 8 ++++---- 3 files changed, 32 insertions(+), 18 deletions(-) diff --git a/modules/order_trace/src/alg.py b/modules/order_trace/src/alg.py index 49bc910ab..382f2d3cf 100644 --- a/modules/order_trace/src/alg.py +++ b/modules/order_trace/src/alg.py @@ -53,7 +53,8 @@ class OrderTraceAlg(ModuleAlgBase): data_range (list): Range of data to be traced, [, , , ]. original_size (list): Original size of the flat data, [, ]. poly_degree (int): Order of polynomial for order trace fitting. - orders_ccd (number, options)`: Total orders of the ccd. Defaults to -1. + orders_ccd (number, options): Total orders of the ccd. Defaults to -1. + do_post (bool, options): do post process to refine the upper/lower edges. Defaults to False. Raises: AttributeError: The ``Raises`` section is a list of all exceptions that are relevant to the interface. @@ -66,7 +67,8 @@ class OrderTraceAlg(ModuleAlgBase): LOWER = 0 name = 'OrderTrace' - def __init__(self, data, poly_degree=None, expected_traces=None, orders_ccd=-1, config=None, logger=None): + def __init__(self, data, poly_degree=None, expected_traces=None, orders_ccd=-1, do_post=False, + config=None, logger=None): if not isinstance(data, np.ndarray): raise TypeError('image data type error, cannot construct object from OrderTraceAlg') if not isinstance(config, ConfigParser): @@ -89,6 +91,7 @@ def __init__(self, data, poly_degree=None, expected_traces=None, orders_ccd=-1, self.trace_ratio = None self.expected_traces = expected_traces # this is useful for regression test self.orders_ccd = orders_ccd + self.do_post = do_post def get_config_value(self, param: str, default): """Get defined value from the config file. @@ -2835,7 +2838,7 @@ def float_to_string(afloat): new_str = f"{afloat:.4f}" return new_str - def post_process(self, orig_coeffs, orig_widths, orderlet_gap = 2): + def post_process(self, orig_coeffs, orig_widths, orderlet_gap=2): """ post process and refine the calculated widths to make the widths located closer to the valley between two consecutive orderlet traces and in the style of being more symmetric to the valley. Args: @@ -3045,7 +3048,8 @@ def convert_for_post_process(self, coeffs, widths = None): return new_coeffs, new_widths def extract_order_trace(self, power_for_width_estimation: int = -1, data_range = None, show_time: bool = False, - print_debug: str = None, rows_to_reset = None, cols_to_reset = None): + print_debug: str = None, rows_to_reset = None, cols_to_reset = None, + orderlet_gap_pixels=2): """ Order trace extraction. The order trace extraction includes the steps to smooth the image, locate the clusters, form clusters, @@ -3065,6 +3069,7 @@ def extract_order_trace(self, power_for_width_estimation: int = -1, data_range = Defaults to None. rows_to_reset (list, optional): Collection of rows to reset. Default to None. cols_to_reset (list, optional): Collection of columns to reset. Default to None. + orderlet_gap_pixels(number, optional): number of pixels to ignore between orderlets during extraction. Returns: dict: order trace extraction and analysis result, like:: @@ -3116,10 +3121,12 @@ def extract_order_trace(self, power_for_width_estimation: int = -1, data_range = self.d_print("OrderTraceAlg: find widths", info=True) all_widths, cluster_coeffs = self.find_all_cluster_widths(new_index, new_x, new_y, power_for_width_estimation=power_for_width_estimation) - self.time_check(t_start, "*** find widths: ") - post_coeffs, post_widths = self.convert_for_post_process(cluster_coeffs, all_widths) + if self.do_post: + self.time_check(t_start, "***OrderTraceAlg: post process: ") + post_coeffs, post_widths = self.convert_for_post_process(cluster_coeffs, all_widths) + + _, all_widths = self.post_process(post_coeffs, post_widths, orderlet_gap=orderlet_gap_pixels) - _, all_widths = self.post_process(post_coeffs, post_widths) self.d_print("OrderTraceAlg: write result to Pandas Dataframe", info=True) df = self.write_cluster_info_to_dataframe(all_widths, cluster_coeffs) return {'order_trace_result': df, 'cluster_index': new_index, 'cluster_x': new_x, 'cluster_y': new_y} diff --git a/modules/order_trace/src/order_trace.py b/modules/order_trace/src/order_trace.py index ced5c2e2b..944ccd4a4 100644 --- a/modules/order_trace/src/order_trace.py +++ b/modules/order_trace/src/order_trace.py @@ -32,6 +32,8 @@ - `action.args['do_post'] (boolean, optional)`: do post process only on existing order trace file. - `action.args['orderlet_pixel_gaps'] (number, options)`: orderlet gap pixels between consecutive orderlets, i.e. number of pixels to ignore between orderlets during extraction. Defaults to 2. + - `action.args['overwrite'] (bool, options)`: overwrite existing order trace file or not. + Defaults to False. - `context (keckdrpframework.models.processing_context.ProcessingContext)`: `context.config_path` contains the path of the config file defined for the module of order trace in the master @@ -56,6 +58,7 @@ - `poly_degree (int)`: Order of polynomial for order trace fitting. - `do_post (bool)`: if doing post processing on existing order trace data. - `orderlet_gap_pixels`: number of pixels to ignore between orderlets during extraction. + - `overwrite (bool, options)`: overwrite existing order trace file or not. Defaults to False. * Method `__perform`: @@ -129,6 +132,7 @@ def __init__(self, self.do_post = False self.orderlet_gap_pixels = 2 self.orders_ccd = -1 + self.overwrite = False if 'data_row_range' in args_keys and action.args['data_row_range'] is not None: self.row_range = self.find_range(action.args['data_row_range'], row) @@ -162,6 +166,8 @@ def __init__(self, if 'orderlet_gap_pixels' in args_keys and action.args['orderlet_gap_pixels'] is not None: self.orderlet_gap_pixels = action.args['orderlet_gap_pixels'] + if 'overwrite' in args_keys and action.args['overwrite'] is not None: + self.overwrite = action.args['overwrite'] # input configuration self.config = configparser.ConfigParser() @@ -182,7 +188,7 @@ def __init__(self, # Order trace algorithm setup self.alg = OrderTraceAlg(self.flat_data, poly_degree=self.poly_degree, expected_traces=self.expected_traces, config=self.config, logger=self.logger, - orders_ccd=self.orders_ccd) + orders_ccd=self.orders_ccd, do_post=self.do_post) def _pre_condition(self) -> bool: """ @@ -212,7 +218,8 @@ def _perform(self): self.row_range[0], self.row_range[1]]) # if order trace result file exists and do_post is True, then process the result only - if self.result_path and os.path.isfile(self.result_path) and os.path.exists(self.result_path) and self.do_post: + if self.result_path and os.path.isfile(self.result_path) and \ + os.path.exists(self.result_path) and (not self.overwrite) and self.do_post: df = self.alg.refine_order_trace(self.result_path, self.is_output_file, orderlet_gap = self.orderlet_gap_pixels) self.input.receipt_add_entry('OrderTrace', self.__module__, f'config_path={self.config_path}', 'PASS') if self.logger: @@ -254,12 +261,12 @@ def _perform(self): all_widths, cluster_coeffs = self.alg.find_all_cluster_widths(c_index, c_x, c_y, power_for_width_estimation=3) # 7) post processing - if self.logger: - self.logger.warning('OrderTrace: post processing...') - - post_coeffs, post_widths = self.alg.convert_for_post_process(cluster_coeffs, all_widths) + if self.do_post: + if self.logger: + self.logger.warning('OrderTrace: post processing...') - _, all_widths = self.alg.post_process(post_coeffs, post_widths, orderlet_gap=self.orderlet_gap_pixels) + post_coeffs, post_widths = self.alg.convert_for_post_process(cluster_coeffs, all_widths) + _, all_widths = self.alg.post_process(post_coeffs, post_widths, orderlet_gap=self.orderlet_gap_pixels) # 8) convert result to dataframe if self.logger: diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index fe8804638..3627e7a3d 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -320,7 +320,7 @@ trace_list = [] b_all_traces = False lev0_flat_rect = None ccd_stack = '_STACK' - +do_order_trace=True if do_order_trace: trace_list = [] flat_data = None @@ -338,12 +338,12 @@ if do_order_trace: # output_lev0_trace_csv = output_order_trace[idx] output_lev0_trace_csv = str_replace(input_flat_file, fits_ext, '_' + ccd + csv_ext); trace_list = trace_list + [output_lev0_trace_csv] - if not exists(output_lev0_trace_csv): + if overwrite or not exists(output_lev0_trace_csv): order_result_data = OrderTrace(flat_data, data_extension=ccd+ccd_stack, result_path=output_lev0_trace_csv, is_output_file=True, data_col_range=data_col_range, data_row_range=data_row_range, fitting_poly_degree=poly_degree, orders_ccd=orders_per_ccd[idx], - orderlet_gap_pixels=orderlet_gap_pixels) + do_post=True, orderlet_gap_pixels=orderlet_gap_pixels, overwrite=overwrite) b_all_traces = b_all_traces and exists(output_lev0_trace_csv) @@ -352,7 +352,7 @@ if do_order_trace: # generate rectification result to flat level0 to avoid repeating the same process in spectral extraction - if not exists(output_lev0_flat_rect): + if overwrite or not exists(output_lev0_flat_rect): # do rectification on the flat data in case the order trace result files for all ccds exist. # b_lev0_rect: a boolean to determine if continuing to do OrderRectification and produce output_lev0_flat_rect From 3aaf3cb2365909b357899eb41167174e05c8caa8 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 24 Oct 2023 10:45:37 -0700 Subject: [PATCH 82/86] remove unneeded line in the recipe. --- recipes/kpf_drp.recipe | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/kpf_drp.recipe b/recipes/kpf_drp.recipe index 3627e7a3d..3851a045e 100644 --- a/recipes/kpf_drp.recipe +++ b/recipes/kpf_drp.recipe @@ -320,7 +320,7 @@ trace_list = [] b_all_traces = False lev0_flat_rect = None ccd_stack = '_STACK' -do_order_trace=True + if do_order_trace: trace_list = [] flat_data = None From 5481ce145fb81af6c6f82e574472e497708b66e9 Mon Sep 17 00:00:00 2001 From: cwang2016 Date: Tue, 24 Oct 2023 16:28:50 -0700 Subject: [PATCH 83/86] reset the value of 'orderlet_gap_pixels' in config to be 1. --- configs/kpf_drp.cfg | 2 +- configs/kpf_masters_l1.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/kpf_drp.cfg b/configs/kpf_drp.cfg index 497bd4ee1..2345ed611 100644 --- a/configs/kpf_drp.cfg +++ b/configs/kpf_drp.cfg @@ -48,7 +48,7 @@ order_trace_files = [ '/data/reference_fits/kpf_20230920_master_flat_GREEN_CCD.c fitting_poly_degree = 3 ccd_idx = [0, 1] # number of pixels to ignore between orderlets during extraction -orderlet_gap_pixels = 1.5 +orderlet_gap_pixels = 1 # for spectral extraction: # - update orders_per_ccd, start_order, orderlet_names per new order trace result diff --git a/configs/kpf_masters_l1.cfg b/configs/kpf_masters_l1.cfg index 095790c43..36a5387d6 100644 --- a/configs/kpf_masters_l1.cfg +++ b/configs/kpf_masters_l1.cfg @@ -64,7 +64,7 @@ order_trace_files = [ '/data/reference_fits/kpf_20230920_master_flat_GREEN_CCD.c fitting_poly_degree = 3 ccd_idx = [0, 1] # number of pixels to ignore between orderlets during extraction -orderlet_gap_pixels = 1.5 +orderlet_gap_pixels = 1 # for spectral extraction: # - update orders_per_ccd, start_order, orderlet_names per new order trace result From e699689a647315edef35eec469820390f67b049a Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Wed, 25 Oct 2023 10:40:47 -0700 Subject: [PATCH 84/86] remove global definition --- modules/Utils/overscan_subtract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/Utils/overscan_subtract.py b/modules/Utils/overscan_subtract.py index c5c021097..274ed05de 100644 --- a/modules/Utils/overscan_subtract.py +++ b/modules/Utils/overscan_subtract.py @@ -517,7 +517,7 @@ def _perform(self): frames_data = np.array(frames_data) if self.mode == 'clippedmean': - global overscan_clipped_mean + # global overscan_clipped_mean overscan_clipped_mean = {} for frame in range(len(self.ffi_exts)): From 6506a6f894732e7085f4d7d2ea587129693e5ac8 Mon Sep 17 00:00:00 2001 From: Russ Date: Thu, 26 Oct 2023 08:44:55 -0700 Subject: [PATCH 85/86] Changed overscan_clipped_mean dictionary from a global variable to an object instance attribute. --- modules/Utils/overscan_subtract.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/Utils/overscan_subtract.py b/modules/Utils/overscan_subtract.py index 274ed05de..2a16c6adb 100644 --- a/modules/Utils/overscan_subtract.py +++ b/modules/Utils/overscan_subtract.py @@ -92,13 +92,13 @@ def __init__(self,action,context): self.n_sigma = self.action.args[13] except: self.n_sigma = 2.5 - + self.module_config_path = DEFAULT_CFG_PATH if self.ffi_exts[0] == 'RED_CCD': self.module_config_path = DEFAULT_CFG_PATH_RED elif self.ffi_exts[0] == 'GREEN_CCD': self.module_config_path = DEFAULT_CFG_PATH_GREEN - + print("{} class: self.module_config_path = {}".format(self.__class__.__name__,self.module_config_path)) print("Starting logger...") @@ -112,6 +112,9 @@ def __init__(self,action,context): self.logger.info('Started {}'.format(self.__class__.__name__)) self.logger.debug('module_config_path = {}'.format(self.module_config_path)) + if self.mode == 'clippedmean': + self.overscan_clipped_mean = {} + ######################################################################################## """ @@ -253,7 +256,7 @@ def clippedmean_subtraction(self,image,overscan_reg,ext): mask = b | c mx = ma.masked_array(a, mask) avg = ma.getdata(mx.mean()) - overscan_clipped_mean[ext] = avg.item() + self.overscan_clipped_mean[ext] = avg.item() self.logger.debug('---->{}.clippedmean_subtraction(): ext,overscan_reg,n_sigma,p16,med,p84,sigma,avg = {},{},{},{},{},{},{},{}'.\ format(self.__class__.__name__,ext,overscan_reg,n_sigma,p16,med,p84,sigma,avg)) @@ -480,7 +483,7 @@ def _perform(self): channel_exts.pop(3) channel_exts.pop(2) self.channel_datasec_nrows = 4080 - green2amp = True + green2amp = True elif self.ffi_exts[0] == 'RED_CCD': naxis2 = l0_obj.header['RED_AMP1']['NAXIS2'] if naxis2 >= 4000: @@ -516,10 +519,6 @@ def _perform(self): frames_data.append(data_gain_corr) frames_data = np.array(frames_data) - if self.mode == 'clippedmean': - # global overscan_clipped_mean - overscan_clipped_mean = {} - for frame in range(len(self.ffi_exts)): self.logger.debug('---->{}._perform(): frame = {}'.\ @@ -534,9 +533,9 @@ def _perform(self): if self.mode == 'clippedmean': i = 1 - for key in overscan_clipped_mean: + for key in self.overscan_clipped_mean: keywrd = "OSCANV" + str(i) - keyval = overscan_clipped_mean[key] + keyval = self.overscan_clipped_mean[key] keycmt = "Overscan clipped mean (e-), " + key l0_obj.header[self.ffi_exts[frame]][keywrd] = (keyval, keycmt) i = i + 1 From 9154ce502c5447ff6761c1d81e7f9a26030aded5 Mon Sep 17 00:00:00 2001 From: BJ Fulton Date: Fri, 27 Oct 2023 08:41:58 -0700 Subject: [PATCH 86/86] increment version --- kpfpipe/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kpfpipe/__init__.py b/kpfpipe/__init__.py index eeec21e88..285b86b81 100644 --- a/kpfpipe/__init__.py +++ b/kpfpipe/__init__.py @@ -7,4 +7,4 @@ import kpfpipe.cli -__version__ = '2.5.0' +__version__ = '2.5.1'