From b6378270bcc89b9608992a25358f69e005f3f551 Mon Sep 17 00:00:00 2001 From: mdlpstsci Date: Thu, 19 Jan 2023 11:18:24 -0500 Subject: [PATCH] Create new release candidate 351rc1 (#1492) * Reset crder units (#1485) * Report the correct value with the correct units for CRDER keywords * Add comment to CRDER keywords * Update WFPC2 RT to work with new DRZ name (#1487) * Avoid running runastrodriz on calibration data (#1488) * Fix logic in astroquery_utils to properly support regression testing (#1486) * Handle empty images more gracefully (#1490) * Update to use photutils catalog wcs correctly (#1489) * Let photeq fail without crashing pipeline (#1491) Co-authored-by: Warren J. Hack --- drizzlepac/align.py | 93 ++++++++++++++------ drizzlepac/haputils/align_utils.py | 14 ++- drizzlepac/haputils/analyze.py | 5 +- drizzlepac/haputils/astroquery_utils.py | 14 +++ drizzlepac/haputils/catalog_utils.py | 9 +- drizzlepac/runastrodriz.py | 109 ++++++++++++++---------- drizzlepac/util.py | 2 +- tests/wfpc2/test_wfpc2.py | 8 +- 8 files changed, 171 insertions(+), 83 deletions(-) diff --git a/drizzlepac/align.py b/drizzlepac/align.py index 30743c4a4..db827da59 100644 --- a/drizzlepac/align.py +++ b/drizzlepac/align.py @@ -80,7 +80,6 @@ def check_and_get_data(input_list: list, **pars: object) -> list: candidate_list = [] # File names gathered from *_asn.fits file ipppssoot_list = [] # ipppssoot names used to avoid duplicate downloads total_input_list = [] # Output full filename list of data on disk - member_suffix = '_flc.fits' # Loop over the input_list to determine if the item in the input_list is a full association file # (*_asn.fits), a full individual image file (aka singleton, *_flt.fits), or a root name specification @@ -99,24 +98,7 @@ def check_and_get_data(input_list: list, **pars: object) -> list: # in this manner (vs just the ipppssoot of the association). # This "if" block just collects the wanted full file names. if suffix == 'asn': - try: - asntab = Table.read(input_item, format='fits') - except FileNotFoundError: - log.error('File {} not found.'.format(input_item)) - return(empty_list) - for row in asntab: - if row['MEMTYPE'].startswith('PROD'): - continue - memname = row['MEMNAME'].lower().strip() - # Need to check if the MEMNAME is a full filename or an ipppssoot - if memname.find('_') != -1: - candidate_list.append(memname) - else: - # Define suffix for all members based on what files are present - if not os.path.exists(memname + member_suffix): - member_suffix = '_flt.fits' - - candidate_list.append(memname + member_suffix) + candidate_list.extend(_get_asn_members(input_item)) elif suffix in ['flc', 'flt', 'c0m']: if lc_input_item not in candidate_list: candidate_list.append(lc_input_item) @@ -132,6 +114,7 @@ def check_and_get_data(input_list: list, **pars: object) -> list: elif len(input_item) == 9: try: if input_item not in ipppssoot_list: + input_item = input_item.lower() # An ipppssoot of an individual file which is part of an association cannot be # retrieved from MAST retrieve_list = aqutils.retrieve_observation(input_item, **pars) @@ -144,8 +127,28 @@ def check_and_get_data(input_list: list, **pars: object) -> list: total_input_list += retrieve_list ipppssoot_list.append(input_item) else: - log.error('File {} cannot be retrieved from MAST.'.format(input_item)) - return(empty_list) + # log.error('File {} cannot be retrieved from MAST.'.format(input_item)) + # return(empty_list) + log.warn('File {} cannot be retrieved from MAST.'.format(input_item)) + log.warn(f" using pars: {pars}") + # look for already downloaded ASN and related files instead + # ASN filenames are the only ones that end in a digit + if input_item[-1].isdigit(): + _asn_name = f"{input_item}_asn.fits" + if not os.path.exists(_asn_name): + _ = aqutils.retrieve_observation([f"{input_item}"], + suffix=['ASN'], + clobber=True) + _local_files = _get_asn_members(_asn_name) + if _local_files: + log.warn(f"Using local files instead:\n {_local_files}") + total_input_list.extend(_local_files) + else: + _lfiles = os.listdir() + log.error(f"No suitable files found for input {input_item}") + log.error(f" in directory with files: \n {_lfiles}") + return(total_input_list) + except Exception: exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) @@ -162,7 +165,38 @@ def check_and_get_data(input_list: list, **pars: object) -> list: return(empty_list) log.info("TOTAL INPUT LIST: {}".format(total_input_list)) - return(total_input_list) + return total_input_list + +# ---------------------------------------------------------------------------------------------------------- + + +def _get_asn_members(asnfile): + + # default ASN member type + member_suffix = '_flc.fits' + + candidate_list = [] + try: + asntab = Table.read(asnfile, format='fits') + except FileNotFoundError: + log.error('File {} not found.'.format(asnfile)) + return ([]) + for row in asntab: + if row['MEMTYPE'].startswith('PROD'): + continue + memname = row['MEMNAME'].lower().strip() + # Need to check if the MEMNAME is a full filename or an ipppssoot + if memname.find('_') != -1: + candidate_list.append(memname) + else: + # Define suffix for all members based on what files are present + if not os.path.exists(memname + member_suffix): + member_suffix = '_flt.fits' + + candidate_list.append(memname + member_suffix) + + return candidate_list + # ------------------------------------------------------------------------------------------------------------ @@ -292,6 +326,7 @@ def perform_align(input_list, catalog_list, num_sources, archive=False, clobber= log.info(str(starting_dt)) imglist = check_and_get_data(input_list, archive=archive, clobber=clobber, product_type=product_type) log.info("SUCCESS") + log.info(f"Processing: {imglist}") log.info(make_label('Processing time of [STEP 1]', starting_dt)) starting_dt = datetime.datetime.now() @@ -1063,6 +1098,13 @@ def generate_astrometric_catalog(imglist, **pars): imglist : list List of one or more calibrated fits images that will be used for catalog generation. + output : str, optional + If specified as part of input pars dict, it provides the name of the output catalog file + + overwrite : bool, optional + If specified as part of the input pars dict, it specifies whether or not to overwrite any + catalog file already present with the same path/filename as specified in `output`. + Returns ======= ref_table : object @@ -1075,12 +1117,15 @@ def generate_astrometric_catalog(imglist, **pars): pars['output'] = 'ref_cat.ecsv' else: pars['output'] = None + + overwrite = pars.get('clobber', True) + out_catalog = amutils.create_astrometric_catalog(imglist, **pars) pars = temp_pars.copy() # if the catalog has contents, write the catalog to ascii text file if len(out_catalog) > 0 and pars['output']: catalog_filename = "refcatalog.cat" - out_catalog.write(catalog_filename, format="ascii.fast_commented_header") + out_catalog.write(catalog_filename, format="ascii.fast_commented_header", overwrite=overwrite) log.info("Wrote reference catalog {}.".format(catalog_filename)) return(out_catalog) @@ -1102,4 +1147,4 @@ def get_default_pars(instrument, detector, step='alignment', apars = par_class(full_cfg_index[step], condition, hap_pipeline_name, pars_dir, step, True, None) - return apars.outpars + return apars.outpars \ No newline at end of file diff --git a/drizzlepac/haputils/align_utils.py b/drizzlepac/haputils/align_utils.py index f0ca6dff5..5f5d22fe0 100755 --- a/drizzlepac/haputils/align_utils.py +++ b/drizzlepac/haputils/align_utils.py @@ -1320,12 +1320,18 @@ def update_image_wcs_info(tweakwcs_output, headerlet_filenames=None, fit_label=N updatehdr.update_wcs(hdulist, sci_extn, item.wcs, wcsname=wcs_name, reusename=True) info = item.meta['fit_info'] if info['catalog'] and info['catalog'] != '': - rms_ra_val = info['RMS_RA'].value if info['RMS_RA'] is not None else -1.0 - rms_dec_val = info['RMS_DEC'].value if info['RMS_DEC'] is not None else -1.0 + # Explicitly report the RMS values in units of mas. + rms_ra_val = info['RMS_RA'].mas if info['RMS_RA'] is not None else -1.0 + rms_dec_val = info['RMS_DEC'].mas if info['RMS_DEC'] is not None else -1.0 hdulist[sci_extn].header['RMS_RA'] = (rms_ra_val, RMS_RA_COMMENT) hdulist[sci_extn].header['RMS_DEC'] = (rms_dec_val, RMS_DEC_COMMENT) - hdulist[sci_extn].header['CRDER1'] = info['RMS_RA'].value/3600. if info['RMS_RA'] is not None else -1.0 - hdulist[sci_extn].header['CRDER2'] = info['RMS_DEC'].value/3600. if info['RMS_DEC'] is not None else -1.0 + # convert RMS values from units of mas to deg in order to be consistent + # with CUNIT keyword value, as per FITS Paper I standard. + # https://www.aanda.org/articles/aa/full/2002/45/aah3859/aah3859.right.html Section 2.6 + cr1_comment = RMS_RA_COMMENT.replace('mas', 'deg') + cr2_comment = RMS_DEC_COMMENT.replace('mas', 'deg') + hdulist[sci_extn].header['CRDER1'] = (info['RMS_RA'].deg, cr1_comment) if info['RMS_RA'] is not None else -1.0 + hdulist[sci_extn].header['CRDER2'] = (info['RMS_DEC'].deg, cr2_comment) if info['RMS_DEC'] is not None else -1.0 hdulist[sci_extn].header['NMATCHES'] = len(info['ref_mag']) if info['ref_mag'] is not None else 0 hdulist[sci_extn].header['FITGEOM'] = info['fitgeom'] if info['fitgeom'] is not None else 'N/A' else: diff --git a/drizzlepac/haputils/analyze.py b/drizzlepac/haputils/analyze.py index 60ed8f934..2898b77b0 100644 --- a/drizzlepac/haputils/analyze.py +++ b/drizzlepac/haputils/analyze.py @@ -617,9 +617,10 @@ def verify_guiding(filename, min_length=33): # Trying to ignore small sources (<= 4x4 pixels in size, or npixels < 17) # which are either noise peaks or head-on CRs. segm = detect_sources(imgarr, 0, npixels=17) - log.debug(f'Detected {segm.nlabels} raw sources in {filename}') - if segm.nlabels < 2: + if segm is None or segm.nlabels < 2: + log.debug(f'Did NOT detect enough raw sources in {filename} for guiding verification.') return False + log.debug(f'Detected {segm.nlabels} raw sources in {filename} for guiding verification.') src_cat = SourceCatalog(imgarr, segm) # Remove likely cosmic-rays based on central_moments classification diff --git a/drizzlepac/haputils/astroquery_utils.py b/drizzlepac/haputils/astroquery_utils.py index 900e1854b..20b9014f9 100644 --- a/drizzlepac/haputils/astroquery_utils.py +++ b/drizzlepac/haputils/astroquery_utils.py @@ -107,6 +107,7 @@ def retrieve_observation(obsid, suffix=['FLC'], archive=False, clobber=False, log.info( "WARNING: No FLC or FLT files found for {}.".format(obsid)) return local_files + all_images = data_products_by_id['productFilename'].tolist() log.info(all_images) if not clobber: @@ -118,9 +119,22 @@ def retrieve_observation(obsid, suffix=['FLC'], archive=False, clobber=False, rows_to_remove.append(row_idx) data_products_by_id.remove_rows(rows_to_remove) + # Protect against cases where all requested observations are already + # present on local disk and clobber was turned off, so there are no + # files to be downloaded. + if len(data_products_by_id) == 0: + log.warn("No new files identified to be retrieved.") + return local_files + manifest = Observations.download_products(data_products_by_id, mrp_only=False) + # Protect against any other problems with finding files to retrieve based on the + # input file specification. + if not manifest: + log.warn(f"File {data_products_by_id} could not be retrieved. No files returned.") + return local_files + if not clobber: for rownum in rows_to_remove[::-1]: if manifest: diff --git a/drizzlepac/haputils/catalog_utils.py b/drizzlepac/haputils/catalog_utils.py index bbbd72cce..cfe2ebfa6 100755 --- a/drizzlepac/haputils/catalog_utils.py +++ b/drizzlepac/haputils/catalog_utils.py @@ -2937,7 +2937,10 @@ def enforce_icrs_compatibility(catalog): # header keyword REFFRAME can be populated with anything specified by the user in the original # proposal. """ - if catalog._wcs.wcs.radesys.upper() not in RADESYS_OPTIONS: - catalog._wcs.wcs.radesys = 'ICRS' - log.warning(f"Assuming input coordinates are ICRS, instead of {catalog._wcs.wcs.radesys}") + # We need to check whether or not the catalog was generated with photutils<1.6.0 or not + # since photutils=1.6.0 (apparently) renamed the 'catalog._wcs' to 'catalog.wcs'. + cat_wcs = catalog.wcs if hasattr(catalog, 'wcs') else catalog._wcs + if cat_wcs.wcs.radesys.upper() not in RADESYS_OPTIONS: + cat_wcs.wcs.radesys = 'ICRS' + log.warning(f"Assuming input coordinates are ICRS, instead of {cat_wcs.wcs.radesys}") log.warning(f"Sky coordinates of source objects may not be accurate.") diff --git a/drizzlepac/runastrodriz.py b/drizzlepac/runastrodriz.py index c8aef6778..a238fa182 100755 --- a/drizzlepac/runastrodriz.py +++ b/drizzlepac/runastrodriz.py @@ -94,6 +94,7 @@ # THIRD-PARTY import numpy as np from astropy.io import fits +import photutils import stwcs from stwcs import wcsutil @@ -165,6 +166,10 @@ gsc240_date = '2017-10-01' apriori_priority = ['HSC', 'GSC', ''] +FILTER_NAMES = {'WFPC2': ['FILTNAM1', 'FILTNAM2'], + 'ACS': ['FILTER1', 'FILTER2'], + 'WFC3': ['FILTER']} + # default marker for trailer files __trlmarker__ = '*** astrodrizzle Processing Version ' + __version__ + '***\n' @@ -196,10 +201,12 @@ def process(inFile, force=False, newpath=None, num_cores=None, inmemory=True, init_time = time.time() trlmsg = "{}: Calibration pipeline processing of {} started.\n".format(init_time, inFile) trlmsg += __trlmarker__ - trlmsg += " drizzlepac version {}".format(drizzlepac.__version__) - trlmsg += " tweakwcs version {}".format(tweakwcs.__version__) - trlmsg += " stwcs version {}".format(stwcs.__version__) - trlmsg += " numpy version {}".format(np.__version__) + trlmsg += " drizzlepac version {}\n".format(drizzlepac.__version__) + trlmsg += " tweakwcs version {}\n".format(tweakwcs.__version__) + trlmsg += " stwcs version {}\n".format(stwcs.__version__) + trlmsg += " numpy version {}\n".format(np.__version__) + trlmsg += " photutils version {}\n".format(photutils.__version__) + pipeline_pars = PIPELINE_PARS.copy() _verify = True # Switch to control whether to verify alignment or not manifest_list = [] @@ -256,7 +263,7 @@ def process(inFile, force=False, newpath=None, num_cores=None, inmemory=True, print("ERROR: Input file - %s - does not exist." % inFilename) return except TypeError: - print("ERROR: Inappropriate input file.") + print("ERROR: Appropriate input file could not be found.") return # If newpath was specified, move all files to that directory for processing @@ -288,7 +295,11 @@ def process(inFile, force=False, newpath=None, num_cores=None, inmemory=True, # files from CRDS print(f"Updating distortion reference files for: {inFilename}") wfpc2Data.apply_bestrefs(inFilename) - photeq.photeq(files=inFilename, ref_phot_ext=3, readonly=False) + try: + photeq.photeq(files=inFilename, ref_phot_ext=3, readonly=False) + except Exception as err: + print(err) + print(f"WARNING: PHOTEQ was unable to run on {inFilename}") raw_suffix = '_d0m.fits' goodpix_name = 'GPIXELS' @@ -432,51 +443,50 @@ def process(inFile, force=False, newpath=None, num_cores=None, inmemory=True, for f in _calfiles+_calfiles_flc: processing_utils.compute_sregion(f) - # Run updatewcs on each list of images to define pipeline default WCS - # based on latest distortion models - # Always apply latest distortion to replace pipeline-default OPUS WCS - # for successful creation of updated headerlets for the cases where - # all inputs having EXPTIME==0 (for example) or guiding is bad. - updatewcs.updatewcs(_calfiles, use_db=False, checkfiles=False) - if _calfiles_flc: - updatewcs.updatewcs(_calfiles_flc, use_db=False, checkfiles=False) - - # Check to see whether or not guide star failure affected these observations - # They would show up as images all sources streaked as if taken in SCAN mode or with a GRISM - # - # Note: This functionality is intentionally turned off for pipeline processing at this time. - # However, a user may wish to invoke this functionality which is controlled by the - # parameter "do_verify_guiding". - if do_verify_guiding: - for fltimg in _calfiles: - flcimg = fltimg.replace('_flt.fits', '_flc.fits') - guide_img = flcimg if os.path.exists(flcimg) else fltimg - # We want to use the FLC image, if possible, to avoid any - # possible detection of CTE tails as false guide-star trailing lines - bad_guiding = analyze.verify_guiding(guide_img) - if bad_guiding: - # If the user did not specify they wanted a drizzle product no matter what... - if not force: - # Remove the affected image(s) from further processing - # except when user specifies they want to make a product no matter what (force=True) - _calfiles.remove(fltimg) - if os.path.exists(flcimg): - _calfiles_flc.remove(flcimg) - # If ANY input exposure has bad guiding, none of the data can be - # trusted. However, only allow alignment if the user forces the alignment. - if not force_alignment: - # Turn off any alignment to GAIA - # After all, if one exposure has bad guiding, - # none of the WCS information can be trusted. - align_to_gaia = False - align_with_apriori = False - # If we no longer have any valid images to process due to guiding problems, # set drizcorr to OMIT and finish processing gracefully. if len(_calfiles) == 0: dcorr = 'OMIT' if dcorr == 'PERFORM': + # Run updatewcs on each list of images to define pipeline default WCS + # based on latest distortion models + # Always apply latest distortion to replace pipeline-default OPUS WCS + # for successful creation of updated headerlets for the cases where + # all inputs having EXPTIME==0 (for example) or guiding is bad. + updatewcs.updatewcs(_calfiles, use_db=False, checkfiles=False) + if _calfiles_flc: + updatewcs.updatewcs(_calfiles_flc, use_db=False, checkfiles=False) + + # Check to see whether or not guide star failure affected these observations + # They would show up as images all sources streaked as if taken in SCAN mode or with a GRISM + # + # Note: This functionality is intentionally turned off for pipeline processing at this time. + # However, a user may wish to invoke this functionality which is controlled by the + # parameter "do_verify_guiding". + if do_verify_guiding: + for fltimg in _calfiles: + flcimg = fltimg.replace('_flt.fits', '_flc.fits') + guide_img = flcimg if os.path.exists(flcimg) else fltimg + # We want to use the FLC image, if possible, to avoid any + # possible detection of CTE tails as false guide-star trailing lines + bad_guiding = analyze.verify_guiding(guide_img) + if bad_guiding: + # If the user did not specify they wanted a drizzle product no matter what... + if not force: + # Remove the affected image(s) from further processing + # except when user specifies they want to make a product no matter what (force=True) + _calfiles.remove(fltimg) + if os.path.exists(flcimg): + _calfiles_flc.remove(flcimg) + # If ANY input exposure has bad guiding, none of the data can be + # trusted. However, only allow alignment if the user forces the alignment. + if not force_alignment: + # Turn off any alignment to GAIA + # After all, if one exposure has bad guiding, + # none of the WCS information can be trusted. + align_to_gaia = False + align_with_apriori = False """ Start updating the data and verifying that the new alignment is valid. @@ -2044,12 +2054,21 @@ def _analyze_exposure(filename): fhdu = fits.getheader(filename) targname = fhdu['targname'] + filts = fhdu['filt*'] + instrument = fhdu['instrume'] + + filters = [filts[filtname].strip() for filtname in FILTER_NAMES[instrument]] if any(x in targname for x in ['DARK', 'TUNG', 'BIAS', 'FLAT', 'DEUT', 'EARTH-CAL']): + print(f"ERROR: Inappropriate target with name {targname}") + process_exposure = False + if all(filter == '' for filter in filters): + print(f"ERROR: Inappropriate filter for exposure of {filters}") process_exposure = False return process_exposure + # Functions to support execution from the shell. def main(): diff --git a/drizzlepac/util.py b/drizzlepac/util.py index a43bc8753..d162aed0f 100644 --- a/drizzlepac/util.py +++ b/drizzlepac/util.py @@ -258,7 +258,7 @@ def output(msg): def output(msg): print(msg) - pkgs = ['numpy', 'astropy', 'stwcs'] + pkgs = ['numpy', 'astropy', 'stwcs', 'photutils'] if packages is not None: if not isinstance(packages, list): packages = [packages] diff --git a/tests/wfpc2/test_wfpc2.py b/tests/wfpc2/test_wfpc2.py index 405dbd11f..5306d191b 100644 --- a/tests/wfpc2/test_wfpc2.py +++ b/tests/wfpc2/test_wfpc2.py @@ -19,7 +19,7 @@ def test_waiver_single(self): for i in raw_inputs] output = 'wfpc2_single_waiver' - outfile = '{}_drz.fits'.format(output) + outfile = '{}_drw.fits'.format(output) reffile = 'reference_single_waiver.fits' # Update WCS for all inputs @@ -66,7 +66,7 @@ def test_waiver_asn(self): for i in raw_inputs] output = 'wfpc2_waiver' - outfile = '{}_drz.fits'.format(output) + outfile = '{}_drw.fits'.format(output) reffile = 'reference_wfpc2_asn_waiver.fits' # Update WCS for all inputs @@ -100,7 +100,7 @@ def test_wfpc2_single(self): for i in raw_inputs] output = 'wfpc2_single_mef' - outfile = '{}_drz.fits'.format(output) + outfile = '{}_drw.fits'.format(output) reffile = 'reference_single_mef.fits' # Update WCS for all inputs @@ -141,7 +141,7 @@ def test_mef_asn(self): for i in raw_inputs] output = 'wfpc2_mef' - outfile = '{}_drz.fits'.format(output) + outfile = '{}_drw.fits'.format(output) reffile = 'reference_wfpc2_asn_mef.fits' # Update WCS for all inputs