diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 74b08b1d4..f210a0301 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -20,9 +20,15 @@ of the list). - Detect extension name from WFPC2 flatfield files. [#1193] +- Refactored the build system to be PEP-517 ad PEP-518 complient. [#1244] + +- Fixed a bug in the drizzle algorithm due to which input pixels with + zero weights may still contribute to the output image. [#1222] + 3.3.0 (28-Sep-2021) =================== + This version includes all the functionality needed to generate source catalogs, both point source and extended (segment) source catalogs, during single-visit mosaic (SVM) processing. In fact, diff --git a/MANIFEST.in b/MANIFEST.in index 0bd0cb471..8adfc55cf 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ -include RELIC-INFO recursive-include doc * global-include *.py *.md *.notes *.yml *.toml global-include *.x *.c *.h *.par @@ -6,5 +5,4 @@ exclude */version.py prune .pytest_cache prune .eggs prune .git -prune relic prune drizzlepac/htmlhelp diff --git a/doc/source/conf.py b/doc/source/conf.py index 697d6b3ee..f5d08a19a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,7 +18,8 @@ import sys import sphinx -from distutils.version import LooseVersion +from packaging.version import Version + try: from ConfigParser import ConfigParser except ImportError: @@ -55,8 +56,8 @@ def setup(app): def check_sphinx_version(expected_version): - sphinx_version = LooseVersion(sphinx.__version__) - expected_version = LooseVersion(expected_version) + sphinx_version = Version(sphinx.__version__) + expected_version = Version(expected_version) if sphinx_version < expected_version: raise RuntimeError( "At least Sphinx version {0} is required to build this " @@ -105,7 +106,7 @@ def check_sphinx_version(expected_version): if on_rtd: extensions.append('sphinx.ext.mathjax') -elif LooseVersion(sphinx.__version__) < LooseVersion('1.4'): +elif Version(sphinx.__version__) < Version('1.4'): extensions.append('sphinx.ext.pngmath') else: @@ -131,14 +132,10 @@ def check_sphinx_version(expected_version): # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. -# -# The short X.Y version. -# version = '1.0.6' -from drizzlepac import __version__, __version_date__ -version = __version__ +from drizzlepac import __version__ as version # The full version, including alpha/beta/rc tags. # release = '1.0.6 (14-Aug-2012)' -release = "{:s} ({:s})".format(__version__, __version_date__) +release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/drizzlepac/__init__.py b/drizzlepac/__init__.py index ac1ad633d..fb830e3e5 100644 --- a/drizzlepac/__init__.py +++ b/drizzlepac/__init__.py @@ -15,8 +15,26 @@ """ import os +import re +import sys +from pkg_resources import get_distribution, DistributionNotFound -from .version import * +__version_commit__ = '' +_regex_git_hash = re.compile(r'.*\+g(\w+)') + +try: + __version__ = get_distribution(__name__).version +except DistributionNotFound: + __version__ = 'dev' + +if '+' in __version__: + commit = _regex_git_hash.match(__version__).groups() + if commit: + __version_commit__ = commit[0] + + +#if sys.version_info < (3, 8): +# raise ImportError("Drizzlepac requires Python 3.8 and above.") from . import ablot from . import adrizzle diff --git a/drizzlepac/ablot.py b/drizzlepac/ablot.py index 10ab528c9..93ee18447 100644 --- a/drizzlepac/ablot.py +++ b/drizzlepac/ablot.py @@ -27,7 +27,7 @@ print('\n Please check the installation of this package to insure C code was built successfully.') raise ImportError -from .version import * +from . import __version__ __all__ = ['blot', 'runBlot', 'help', 'getHelpAsString'] @@ -454,9 +454,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/adrizzle.py b/drizzlepac/adrizzle.py index 68bc1cd0b..7bcc498b0 100644 --- a/drizzlepac/adrizzle.py +++ b/drizzlepac/adrizzle.py @@ -18,7 +18,7 @@ import stwcs from stwcs import distortion -from .version import * +from . import __version__ try: from . import cdriz @@ -1195,9 +1195,7 @@ def getHelpAsString(docstring=False, show_ver=True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2 * os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/align.py b/drizzlepac/align.py index 57aa3db44..72a98dba2 100644 --- a/drizzlepac/align.py +++ b/drizzlepac/align.py @@ -32,7 +32,6 @@ SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s' __version__ = 0.0 -__version_date__ = '21-Aug-2019' def _init_logger(): log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, @@ -41,6 +40,10 @@ def _init_logger(): log = _init_logger() +# Initial values for the module log filename and the associated file handler used for the log +module_fh = None +module_logfile = "" + # ---------------------------------------------------------------------------------------------------------- @@ -166,7 +169,7 @@ def check_and_get_data(input_list: list, **pars: object) -> list: # ------------------------------------------------------------------------------------------------------------ def perform_align(input_list, catalog_list, num_sources, archive=False, clobber=False, debug=False, update_hdr_wcs=False, result=None, - runfile=None, print_fit_parameters=True, print_git_info=False, output=False, + runfile="temp_align.log", print_fit_parameters=True, print_git_info=False, output=False, headerlet_filenames=None, fit_label=None, product_type=None, **alignment_pars): """Actual Main calling function. @@ -243,20 +246,25 @@ def perform_align(input_list, catalog_list, num_sources, archive=False, clobber= Table which contains processing information and alignment results for every raw image evaluated """ - log.info("*** HAP PIPELINE Processing Version {!s} ({!s}) started at: {!s} ***\n".format(__version__, __version_date__, util._ptime()[0])) - if debug: loglevel = logutil.logging.DEBUG else: loglevel = logutil.logging.INFO - if runfile is not None: - loglevel = logutil.logging.DEBUG - fh = logutil.logging.FileHandler(runfile) - fh.setLevel(loglevel) - log.addHandler(fh) + # Need to ensure the logging works properly for the PyTests where each test starts with a fresh handler + global module_fh + global module_logfile + if module_fh is not None: + print("Removing old file handler for logging.") + log.removeHandler(module_fh) + + module_logfile = runfile.upper() + module_fh = logutil.logging.FileHandler(runfile) + module_fh.setLevel(loglevel) + log.addHandler(module_fh) log.setLevel(loglevel) + log.info(f"{__taskname__} Version {__version__}\n") # 0: print git info if print_git_info: @@ -354,8 +362,7 @@ def perform_align(input_list, catalog_list, num_sources, archive=False, clobber= index = np.where(alignment_table.filtered_table['imageName'] == imgname)[0][0] # First ensure sources were found - - if table is None or not table[1]: + if table is None: log.warning("No sources found in image {}".format(imgname)) alignment_table.filtered_table[:]['status'] = 1 alignment_table.filtered_table[:]['processMsg'] = "No sources found" @@ -473,7 +480,9 @@ def perform_align(input_list, catalog_list, num_sources, archive=False, clobber= alignment_table.filtered_table, (catalog_index < (len(catalog_list) - 1)), apars, - print_fit_parameters=print_fit_parameters) + print_fit_parameters=print_fit_parameters, + loglevel=loglevel, + runfile=runfile) alignment_table.filtered_table = filtered_table # save fit algorithm name to dictionary key "fit method" in imglist. @@ -642,7 +651,8 @@ def make_label(label, starting_dt): # ---------------------------------------------------------------------------------------------------------- -def determine_fit_quality(imglist, filtered_table, catalogs_remaining, align_pars, print_fit_parameters=True): +def determine_fit_quality(imglist, filtered_table, catalogs_remaining, align_pars, print_fit_parameters=True, + loglevel=logutil.logging.NOTSET, runfile="temp_align.log"): """Determine the quality of the fit to the data Parameters @@ -672,6 +682,13 @@ def determine_fit_quality(imglist, filtered_table, catalogs_remaining, align_par print_fit_parameters : bool Specify whether or not to print out FIT results for each chip + log_level : int, optional + The desired level of verboseness in the log statements displayed on the screen and written to the + .log file. Default value is 20, or 'info'. + + runfile : string + log file name + Returns ------- max_rms_val : float @@ -700,6 +717,25 @@ def determine_fit_quality(imglist, filtered_table, catalogs_remaining, align_par * fit compromised status (Boolean) * reason fit is considered 'compromised' (only populated if "compromised" field is "True") """ + + # Set up the log file handler and name of the log file + # If the log file handler were never set, the module_fh will be None. + # Only want to remove a file handler if there were one set in the first place. + global module_fh + global module_logfile + #if module_fh is not None and module_logfile != runfile.upper(): + if module_fh is not None: + print("Removing old file handler for logging.") + log.removeHandler(module_fh) + + module_logfile = runfile.upper() + module_fh = logutil.logging.FileHandler(runfile) + module_fh.setLevel(loglevel) + + log.addHandler(module_fh) + log.setLevel(loglevel) + log.info("Log file: {}".format(module_logfile)) + max_rms_val = 1e9 num_xmatches = 0 fit_status_dict = {} @@ -919,7 +955,7 @@ def determine_fit_quality(imglist, filtered_table, catalogs_remaining, align_par # ---------------------------------------------------------------------------------------------------------------------- def determine_fit_quality_mvm_interface(imglist, filtered_table, catalogs_remaining, ref_catalog_length, - align_pars, print_fit_parameters=True, loglevel=logutil.logging.NOTSET): + align_pars, print_fit_parameters=True, loglevel=logutil.logging.NOTSET, runfile="temp_align.log"): """Simple interface to allow MVM code to use determine_fit_quality(). Parameters @@ -949,6 +985,13 @@ def determine_fit_quality_mvm_interface(imglist, filtered_table, catalogs_remain print_fit_parameters : bool Specify whether or not to print out FIT results for each chip + log_level : int, optional + The desired level of verboseness in the log statements displayed on the screen and written to the + .log file. Default value is 20, or 'info'. + + runfile : string + log file name + Returns ------- is_good_fit : bool @@ -992,7 +1035,9 @@ def determine_fit_quality_mvm_interface(imglist, filtered_table, catalogs_remain filtered_table, catalogs_remaining, align_pars, - print_fit_parameters) + print_fit_parameters=print_fit_parameters, + loglevel=loglevel, + runfile=runfile) # Determine if the fit quality is acceptable if fit_quality in align_pars['determine_fit_quality']['GOOD_FIT_QUALITY_VALUES']: diff --git a/drizzlepac/astrodrizzle.py b/drizzlepac/astrodrizzle.py index 8bf465a34..1bdee9c3b 100644 --- a/drizzlepac/astrodrizzle.py +++ b/drizzlepac/astrodrizzle.py @@ -45,7 +45,7 @@ from . import staticMask from . import util from . import wcs_functions -from .version import * +from . import __version__ __taskname__ = "astrodrizzle" @@ -169,8 +169,8 @@ def run(configobj, wcsmap=None): clean = configobj['STATE OF INPUT FILES']['clean'] procSteps = util.ProcSteps() - print("AstroDrizzle Version {:s} ({:s}) started at: {:s}\n" - .format(__version__, __version_date__, util._ptime()[0])) + print("AstroDrizzle Version {:s} started at: {:s}\n" + .format(__version__, util._ptime()[0])) util.print_pkg_versions(log=log) log.debug('') @@ -294,9 +294,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/buildwcs.py b/drizzlepac/buildwcs.py index c6da5cb1c..06d430730 100644 --- a/drizzlepac/buildwcs.py +++ b/drizzlepac/buildwcs.py @@ -21,7 +21,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.1.1' -__version_date__ = '13-July-2020' # These default parameter values have the same keys as the parameters from # the configObj interface @@ -428,9 +427,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = os.linesep + ' '.join([__taskname__, 'Version', __version__]) + os.linesep else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/catalogs.py b/drizzlepac/catalogs.py index 8d13f8f47..8c6d50541 100644 --- a/drizzlepac/catalogs.py +++ b/drizzlepac/catalogs.py @@ -6,10 +6,8 @@ """ import os, sys import copy -from distutils.version import LooseVersion import numpy as np -#import pywcs import astropy from astropy import wcs as pywcs import astropy.coordinates as coords diff --git a/drizzlepac/createMedian.py b/drizzlepac/createMedian.py index 7273e56ec..3f26019ea 100644 --- a/drizzlepac/createMedian.py +++ b/drizzlepac/createMedian.py @@ -22,7 +22,7 @@ from . import processInput from .adrizzle import _single_step_num_ -from .version import * +from . import __version__ # look in drizzlepac for createMedian.cfg: __taskname__ = "drizzlepac.createMedian" @@ -509,8 +509,7 @@ def getHelpAsString(docstring=False, show_ver=True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + ' '.join([__taskname__, 'Version', __version__]) + os.linesep else: helpString = '' diff --git a/drizzlepac/devutils/analyze_mvm_gaia_alignment.py b/drizzlepac/devutils/analyze_mvm_gaia_alignment.py index 4bd37b5c8..cf95083ca 100644 --- a/drizzlepac/devutils/analyze_mvm_gaia_alignment.py +++ b/drizzlepac/devutils/analyze_mvm_gaia_alignment.py @@ -1,9 +1,8 @@ #!/usr/bin/env python -"""Quantify how well MVM products are aligned to GAIA sources found in the image footprint - - -NOTE: daostarfinder coords are 0-indexed.""" +"""Statistically quantify how well MVM products are aligned to GAIA sources found in the image footprint +defined by the SVM-processed fl(c/t).fits input files. Statistics are reported on the differences in X, Y and +RA, Dec positions of GAIA sources compared to positions of matching point-sources in the image footprint.""" # Standard library imports import argparse @@ -35,10 +34,102 @@ SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s' log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) + +# ============================================================================================================ + + +def find_and_match_sources(fwhm, mosaic_hdu, mosaic_wcs, mosaic_imgname, dao_mask_array, gaia_table, + xy_gaia_coords, diagnostic_mode=False, log_level=logutil.logging.INFO): + """Runs decutils.UserStarFinder to find sources in the image, then runs cu.getMatchedLists() to identify + sources both ID'd as a source by daofind and ID'd as a GAIA sources. It should be noted here that this + subroutine exists because the FWHM value returned by decutils.find_point_sources() doesn't always result + in enough matches or none at all. + + Parameters + ---------- + fwhm : float + FWHM value, in pixels, that decutils.UserStarFinder() will use as input + + mosaic_hdu : astropy fits.io.hdu object + FITS HDU of the user-specified MVM-processed mosaic image + + mosaic_wcs : stwcs HSTWCS object + The WCS information of the user-specified MVM-processed mosaic image + + mosaic_imgname : str + Name of the user-specified MVM-processed mosaic image + + dao_mask_array : numpy.ndarray + image mask that defines where UserStarFinder should look for sources, and where it should not. + + gaia_table : astropy table object + Table containing positions of GAIA sources found in the image footprint + + xy_gaia_coords : astropy table object + Table containing X, Y positions of GAIA sources found in the image footprint + + diagnostic_mode : bool, optional + If set to logical 'True', additional log messages will be displayed and additional files will be + created during the course of the run. Default value is logical 'False'. + + log_level : int, optional + The desired level of verboseness in the log statements displayed on the screen and written to the + .log file. Default value is 'INFO'. + + Returns + ------- + detection_table : Astropy table + Table of sources detected by decutils.UserStarFinder() + + matches_gaia_to_det : list + A list of the indices of GAIA sources that match detected sources + + matches_det_to_gaia : list + A list of the indices of detected sources that match GAIA sources + """ + log.setLevel(log_level) + daofind = decutils.UserStarFinder(fwhm=fwhm, threshold=0.0, coords=xy_gaia_coords, + sharplo=0.4, sharphi=0.9) + detection_table = daofind(mosaic_hdu["SCI"].data, mask=dao_mask_array) + detection_table.rename_column('xcentroid', 'X') + detection_table.rename_column('ycentroid', 'Y') + n_detection = len(detection_table) + n_gaia = len(gaia_table) + pct_detection = 100.0 * (float(n_detection) / float(n_gaia)) + log.info("Found {} peaks from {} GAIA source(s)".format(n_detection, n_gaia)) + log.info("{}% of GAIA sources detected".format(pct_detection)) + + # 4: convert UserStarFinder output x, y centroid positions to RA, DEC using step 1 WCS info + ra, dec = mosaic_wcs.all_pix2world(detection_table['X'], detection_table['Y'], 0) + ra_col = Column(name="RA", data=ra, dtype=np.float64) + dec_col = Column(name="DEC", data=dec, dtype=np.float64) + detection_table.add_columns([ra_col, dec_col], indexes=[3, 3]) + if diagnostic_mode: + write_region_file("test_detection.reg", detection_table, ['RA', 'DEC'], log_level=log_level) + + # 5: Identify and isolate X, Y, RA and DEC values common to both the gaia and detection tables. + # 5a: find sources common to both the gaia table and the detection table + try: + coo_prefix_string = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6)) + gaia_coo_filename = "{}_gaia.coo".format(coo_prefix_string) + det_coo_filename = "{}_det.coo".format(coo_prefix_string) + write_region_file(gaia_coo_filename, gaia_table, ['X', 'Y'], verbose=False) + write_region_file(det_coo_filename, detection_table, ['X', 'Y'], verbose=False) + matches_gaia_to_det, matches_det_to_gaia = cu.getMatchedLists([gaia_coo_filename, det_coo_filename], + [mosaic_imgname, mosaic_imgname], + [n_gaia, n_detection], + log_level) + finally: + for item in [det_coo_filename, gaia_coo_filename]: + if os.path.exists(item): + log.debug("Removing temp coord file {}".format(item)) + os.remove(item) + return detection_table, matches_gaia_to_det, matches_det_to_gaia # ============================================================================================================ -def perform(mosaic_imgname, flcflt_list, diagnostic_mode=False, log_level=logutil.logging.INFO, plot_output_dest="none"): +def perform(mosaic_imgname, flcflt_list=None, flcflt_listfile=None, min_n_matches=10, diagnostic_mode=False, + log_level=logutil.logging.INFO, plot_output_dest="none"): """ Statistically quantify quality of GAIA MVM alignment Parameters @@ -46,8 +137,21 @@ def perform(mosaic_imgname, flcflt_list, diagnostic_mode=False, log_level=loguti mosaic_imgname : str Name of the MVM-processed mosaic image to process - flcflt_list : list - lList of calibrated flc.fits and/or flt.fits images to process + flcflt_list : list, optional + List of calibrated flc.fits and/or flt.fits images to process. If not explicitly specified, the + default value is logical 'None'. NOTE: Users must specify a value for either 'flcflt_list' or + 'flcflt_listfile'. Both cannot be blank. + + flcflt_listfile : str, optional + Name of a text file containing a list of calibrated flc.fits and/or flt.fits images to process, one + per line. If not explicitly specified, the default value is logical 'None'. NOTE: Users must + specify a value for either 'flcflt_list' or 'flcflt_listfile'. Both cannot be blank. + + min_n_matches : int, optional + Minimum acceptable number of cross-matches found between the GAIA catalog and the catalog of detected + sources in the FOV the input flc/flt images. If the number of cross-matching sources returned by + find_and_match_sources() is less than this value, a hard exit will be triggered.If not explicitly + specified, the default value is 10. diagnostic_mode : bool, optional If set to logical 'True', additional log messages will be displayed and additional files will be @@ -67,21 +171,48 @@ def perform(mosaic_imgname, flcflt_list, diagnostic_mode=False, log_level=loguti Nothing! """ log.setLevel(log_level) + # Make sure either 'flcflt_list' or 'flcflt_listfile' is specified. + if flcflt_list is None and flcflt_listfile is None: + errmsg = "Users must specify a value for either 'flcflt_list' or 'flcflt_listfile'. " \ + "Both cannot be blank." + log.error(errmsg) + raise ValueError(errmsg) + if flcflt_list is not None and flcflt_listfile is not None: + errmsg = "Users must specify a value for either 'flcflt_list' or 'flcflt_listfile'. " \ + "Both cannot be specified." + log.error(errmsg) + raise ValueError(errmsg) + + # make sure 'plot_output_dest' has a valid input value. if plot_output_dest not in ['file', 'none', 'screen']: - errmsg = "'{}' is not a valid input for argument 'plot_output_dest'. Valid inputs are 'file', 'none', or 'screen'.".format(plot_output_dest) + errmsg = "'{}' is not a valid input for argument 'plot_output_dest'. Valid inputs are 'file', " \ + "'none', or 'screen'.".format(plot_output_dest) log.error(errmsg) raise ValueError(errmsg) - # 0: read in flc/flt fits files from user-specified fits file - with open(flcflt_list, mode='r') as imgfile: - imglist = imgfile.readlines() - for x in range(0, len(imglist)): - imglist[x] = imglist[x].strip() + + # -1: Get flc/flt list from user-specified list of files and/or user-specified list file read in flc/flt + # fits files from user-specified fits file + if flcflt_listfile: + with open(flcflt_listfile, mode='r') as imgfile: + imglist = imgfile.readlines() + for x in range(0, len(imglist)): + imglist[x] = imglist[x].strip() + if flcflt_list: + imglist = flcflt_list + + # 0: report the WCS name for each input image + padding = 5 + log.info("Summary of input image WCSNAME values") + log.info("Image Name{}WCSNAME".format(" "*(len(max(imglist, key=len)) - padding))) + for imgname in imglist: + log.info("{}{}{}".format(imgname, " " * padding, fits.getval(imgname, keyword="WCSNAME", + extname="SCI", extver=1))) # 1: generate WCS obj. for custom mosaic image mosaic_wcs = stwcs.wcsutil.HSTWCS(mosaic_imgname, ext=1) # 2a: generate table of all gaia sources in frame - gaia_table = amutils.create_astrometric_catalog(imglist, existing_wcs=mosaic_wcs, + gaia_table = amutils.create_astrometric_catalog(imglist, existing_wcs=mosaic_wcs, full_catalog=True, catalog='GAIAedr3', use_footprint=True) # 2b: Remove gaia sources outside footprint of input flc/flt images, add X and Y coord columns @@ -102,60 +233,44 @@ def perform(mosaic_imgname, flcflt_list, diagnostic_mode=False, log_level=loguti if diagnostic_mode: write_region_file("gaia_edr3_trimmed.reg", gaia_table, ['RA', 'DEC'], log_level=log_level) - # 3: feed x, y coords into photutils.detection.daostarfinder() as initial guesses to get actual centroid + # 3: feed x, y coords into photutils.detection.userstarfinder() as initial guesses to get actual centroid # positions of gaia sources # create mask image for source detection. Pixels with value of "0" are to processed, and those with value # of "1" will be omitted from processing. dao_mask_array = np.where(footprint.total_mask == 0, 1, 0) xy_gaia_coords = Table([gaia_table['X'].data.astype(np.int64), gaia_table['Y'].data.astype(np.int64)], names=('x_peak', 'y_peak')) - # the below line computes a FWHM value based on detected sources (not the gaia sources). The FWHM value - # doesn't kick out a lot of sources. - mpeaks, mfwhm = decutils.find_point_sources(mosaic_imgname, mask=np.invert(dao_mask_array), - def_fwhm=3.0, box_size=7, block_size=(1024, 1024), - diagnostic_mode=False) - # mfwhm = 25.0 # If it fails due to a lack of sources, use mfwhm = 25.0 instead. - daofind = decutils.UserStarFinder(fwhm=mfwhm, threshold=0.0, coords=xy_gaia_coords, - sharplo=0.4, sharphi=0.9) - detection_table = daofind(mosaic_hdu["SCI"].data, mask=dao_mask_array) - detection_table.rename_column('xcentroid', 'X') - detection_table.rename_column('ycentroid', 'Y') - n_detection = len(detection_table) - n_gaia = len(gaia_table) - pct_detection = 100.0 * (float(n_detection) / float(n_gaia)) - log.info("Found {} peaks from {} GAIA source(s)".format(n_detection, n_gaia)) - log.info("{}% of GAIA sources detected".format(pct_detection)) - - # 4: convert daostarfinder output x, y centroid positions to RA, DEC using step 1 WCS info - ra, dec = mosaic_wcs.all_pix2world(detection_table['X'], detection_table['Y'], 0) - ra_col = Column(name="RA", data=ra, dtype=np.float64) - dec_col = Column(name="DEC", data=dec, dtype=np.float64) - detection_table.add_columns([ra_col, dec_col], indexes=[3, 3]) - if diagnostic_mode: - write_region_file("test_detection.reg", detection_table, ['RA', 'DEC'], log_level=log_level) - - # 5: Identify and isolate X, Y, RA and DEC values common to both the gaia and detection tables. - # 5a: find sources common to both the gaia table and the detection table - try: - coo_prefix_string = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6)) - gaia_coo_filename = "{}_gaia.coo".format(coo_prefix_string) - det_coo_filename = "{}_det.coo".format(coo_prefix_string) - write_region_file(gaia_coo_filename, gaia_table, ['X', 'Y'], verbose=False) - write_region_file(det_coo_filename, detection_table, ['X', 'Y'], verbose=False) - matches_gaia_to_det, matches_det_to_gaia = cu.getMatchedLists([gaia_coo_filename, det_coo_filename], - [mosaic_imgname, mosaic_imgname], - [n_gaia, n_detection], - log_level) - if len(matches_gaia_to_det) == 0: - err_msg = "Error: No matching sources found." - log.error(err_msg) - raise Exception(err_msg) - finally: - for item in [det_coo_filename, gaia_coo_filename]: - if os.path.exists(item): - log.debug("Removing temp coord file {}".format(item)) - os.remove(item) - + # 4: compute FWHM for source finding based on sources in image + mpeaks, fwhm = decutils.find_point_sources(mosaic_imgname, mask=np.invert(dao_mask_array.astype(bool)).astype(np.int16), + def_fwhm=3.0, box_size=7, block_size=(1024, 1024), + diagnostic_mode=diagnostic_mode) + # 5: Attempt to find matching gaia sources and userStarFinder sources first using computed FWHM value then + # hard-wired value. + fwhm_values = [fwhm, 25.0] + for item in enumerate(fwhm_values): + ctr = item[0] + fwhm_value = item[1] + detection_table, matches_gaia_to_det, matches_det_to_gaia = find_and_match_sources(fwhm_value, + mosaic_hdu, + mosaic_wcs, + mosaic_imgname, + dao_mask_array, + gaia_table, + xy_gaia_coords, + diagnostic_mode=diagnostic_mode, + log_level=log_level) + if len(matches_gaia_to_det) > min_n_matches: + break + else: + if ctr == 0: + log.info("Not enough matching sources found. Trying again with FWHM = {}".format(fwhm_values[1])) + if ctr == 1: + err_msg = "Error: not enough matching sources found. Maybe try adjusting the value of " \ + "'min_n_matches' (Current value: {}).".format(min_n_matches) + log.error(err_msg) + raise Exception(err_msg) + gcol = ['X', 'Y', 'ref_epoch', 'RA', 'RA_error', 'DEC', 'DEC_error', + 'pm', 'pmra', 'pmra_error', 'pmdec', 'pmdec_error'] # 5b: Isolate sources common to both the gaia table and the detection table matched_values_dict = {} for col_title in ['X', 'Y', 'RA', 'DEC']: @@ -263,10 +378,12 @@ def perform(mosaic_imgname, flcflt_list, diagnostic_mode=False, log_level=loguti final_plot_filename = "{}_{}".format(plotfile_prefix, final_plot_filename) csl.pdf_merger(final_plot_filename, pdf_file_list) log.info("Sourcelist comparison plots saved to file {}.".format(final_plot_filename)) + # ============================================================================================================ -def write_region_file(filename, table_data, colnames, apply_zero_index_correction=False, log_level=logutil.logging.INFO, verbose=True): +def write_region_file(filename, table_data, colnames, apply_zero_index_correction=False, + log_level=logutil.logging.INFO, verbose=True): """Write out columns from user-specified table to ds9 region file Parameters @@ -275,7 +392,7 @@ def write_region_file(filename, table_data, colnames, apply_zero_index_correctio name of the output region file to be created table_data : astropy.Table - Table continaing values to be written out + Table containing values to be written out colnames : list list of the columns from table_data to write out @@ -317,16 +434,15 @@ def write_region_file(filename, table_data, colnames, apply_zero_index_correctio if __name__ == "__main__": - - log_level_dict = {"critical": logutil.logging.CRITICAL, - "error": logutil.logging.ERROR, - "warning": logutil.logging.WARNING, - "info": logutil.logging.INFO, - "debug": logutil.logging.DEBUG} # Parse command-line input args parser = argparse.ArgumentParser(description='Statistically quantify quality of GAIA MVM alignment') parser.add_argument('mosaic_imgname', help='Name of the MVM-processed mosaic image to process') - parser.add_argument('flcflt_list', help='list of calibrated flc.fits and/or flt.fits images to process') + g = parser.add_mutually_exclusive_group(required=True) + g.add_argument('-ff', '--flcflt_listfile', default='none', + help='text file containing a list of calibrated flc.fits and/or flt.fits images to ' + 'process, one per line') + g.add_argument('-fl', '--flcflt_list', default='none', nargs="+", + help='list of calibrated flc.fits and/or flt.fits images to process') parser.add_argument('-d', '--diagnostic_mode', required=False, action='store_true', help='If this option is turned on, additional log messages will be displayed and ' 'additional files will be created during the course of the run.') @@ -338,6 +454,12 @@ def write_region_file(filename, table_data, colnames, apply_zero_index_correctio 'Specifying "critical" will only record/display "critical" log statements, and ' 'specifying "error" will record/display both "error" and "critical" log statements, ' 'and so on.') + parser.add_argument('-m', '--min_n_matches', required=False, default=10, type=int, + help='Minimum acceptable number of cross-matches found between the GAIA catalog and ' + 'the catalog of detected sources in the FOV the input flc/flt images. If the ' + 'number of cross-matching sources returned by find_and_match_sources() is less ' + 'than this value, a hard exit will be triggered. If not explicitly specified, ' + 'the default value is 10.') parser.add_argument('-p', '--plot_output_dest', required=False, default='none', choices=['file', 'none', 'screen'], help='Destination to direct plots, "screen" simply displays them to the screen. ' @@ -345,6 +467,21 @@ def write_region_file(filename, table_data, colnames, apply_zero_index_correctio 'option turns off all plot generation. Default value is "none".') input_args = parser.parse_args() + # Prep inputs for execution of perform() + if input_args.flcflt_listfile == 'none': + input_args.flcflt_listfile = None + if input_args.flcflt_list == 'none': + input_args.flcflt_list = None + + log_level_dict = {"critical": logutil.logging.CRITICAL, + "error": logutil.logging.ERROR, + "warning": logutil.logging.WARNING, + "info": logutil.logging.INFO, + "debug": logutil.logging.DEBUG} + input_args.log_level = log_level_dict[input_args.log_level] + # Perform analysis - perform(input_args.mosaic_imgname, input_args.flcflt_list, diagnostic_mode=input_args.diagnostic_mode, - log_level=log_level_dict[input_args.log_level], plot_output_dest=input_args.plot_output_dest) + perform(input_args.mosaic_imgname, flcflt_list=input_args.flcflt_list, + flcflt_listfile=input_args.flcflt_listfile, diagnostic_mode=input_args.diagnostic_mode, + min_n_matches=input_args.min_n_matches, log_level=input_args.log_level, + plot_output_dest=input_args.plot_output_dest) diff --git a/drizzlepac/drizCR.py b/drizzlepac/drizCR.py index 38def5f0c..cebe859bc 100644 --- a/drizzlepac/drizCR.py +++ b/drizzlepac/drizCR.py @@ -19,7 +19,7 @@ from . import quickDeriv from . import util from . import processInput -from . version import __version__, __version_date__ +from . import __version__ if util.can_parallel: import multiprocessing @@ -376,9 +376,7 @@ def getHelpAsString(docstring=False, show_ver=True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = "\n{:s} Version {:s} updated on {:s}\n\n".format( - __taskname__, __version__, __version_date__ - ) + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' diff --git a/drizzlepac/hapmultisequencer.py b/drizzlepac/hapmultisequencer.py index 3528c83fc..c6ae3db2e 100644 --- a/drizzlepac/hapmultisequencer.py +++ b/drizzlepac/hapmultisequencer.py @@ -70,7 +70,6 @@ log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) __version__ = 0.1 -__version_date__ = '01-May-2020' # Environment variable which controls the quality assurance testing # for the Single Visit Mosaic processing. @@ -209,8 +208,9 @@ def create_drizzle_products(total_obj_list, custom_limits=None): # create the drizzle-combined filtered image, the drizzled exposure (aka single) images, # and finally the drizzle-combined total detection image. for filt_obj in total_obj_list: - filt_obj.rules_file = rules_files[filt_obj.edp_list[0].full_filename] - + filt_obj.rules_file = proc_utils.get_rules_file(filt_obj.edp_list[0].full_filename, + rules_type='MVM', + rules_root=filt_obj.drizzle_filename) log.info("~" * 118) # Get the common WCS for all images which are part of a total detection product, # where the total detection product is detector-dependent. diff --git a/drizzlepac/hapsequencer.py b/drizzlepac/hapsequencer.py index b223c668a..1e0107977 100755 --- a/drizzlepac/hapsequencer.py +++ b/drizzlepac/hapsequencer.py @@ -81,7 +81,6 @@ log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) __version__ = 0.1 -__version_date__ = '07-Nov-2019' # Environment variable which controls the quality assurance testing # for the Single Visit Mosaic processing. @@ -386,8 +385,9 @@ def create_drizzle_products(total_obj_list): # Create drizzle-combined filter image as well as the single exposure drizzled image for filt_obj in total_obj.fdp_list: log.info("~" * 118) - filt_obj.rules_file = rules_files[filt_obj.edp_list[0].full_filename] - + filt_obj.rules_file = proc_utils.get_rules_file(filt_obj.edp_list[0].full_filename, + rules_root=filt_obj.drizzle_filename) + print(f"Filter RULES_FILE: {filt_obj.rules_file}") log.info("CREATE DRIZZLE-COMBINED FILTER IMAGE: {}\n".format(filt_obj.drizzle_filename)) filt_obj.wcs_drizzle_product(meta_wcs) product_list.append(filt_obj.drizzle_filename) @@ -408,7 +408,9 @@ def create_drizzle_products(total_obj_list): # Create drizzle-combined total detection image after the drizzle-combined filter image and # drizzled exposure images in order to take advantage of the cosmic ray flagging. log.info("CREATE DRIZZLE-COMBINED TOTAL IMAGE: {}\n".format(total_obj.drizzle_filename)) - total_obj.rules_file = total_obj.fdp_list[0].rules_file + total_obj.rules_file = proc_utils.get_rules_file(total_obj.edp_list[0].full_filename, + rules_root=total_obj.drizzle_filename) + print(f"Total product RULES_FILE: {total_obj.rules_file}") total_obj.wcs_drizzle_product(meta_wcs) product_list.append(total_obj.drizzle_filename) product_list.append(total_obj.trl_filename) @@ -667,7 +669,7 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_ if h0: co_inst = h0["INSTRUME"].lower() co_root = h0["ROOTNAME"].lower() - tokens_tuple = (co_inst, co_root[1:4], co_root[4:6], "manifest.txt") + tokens_tuple = (co_inst, co_root[1:4], co_root[4:6], "manifest.txt") manifest_name = "_".join(tokens_tuple) # Problem case - just give it the base name @@ -1240,14 +1242,14 @@ def archive_alternate_wcs(filename): ------- Nothing - Note: There is no strict form for the HDRNAME. For HAP, HDRNAME is of the form + Note: There is no strict form for the HDRNAME. For HAP, HDRNAME is of the form hst_proposid_visit_instrument_detector_filter_ipppssoo_fl[t|c]_wcsname-hlet.fits. Ex. hst_9029_01_acs_wfc_f775w_j8ca01at_flc_IDC_0461802ej-FIT_SVM_GAIAeDR3-hlet.fits """ # Get all the alternate WCSNAMEs in the science header wcs_key_dict = wcsutil.altwcs.wcsnames(filename, ext=1, include_primary=True) - + # Loop over the WCSNAMEs looking for the HDRNAMEs. If a corresponding # HDRNAME does not exist, create one. header = fits.getheader(filename, ext=0) @@ -1260,7 +1262,7 @@ def archive_alternate_wcs(filename): hdrname = header[keyword] # Handle the case where the HDRNAME keyword does not exist - create # a value from the FITS filename by removing the ".fits" suffix and - # adding information. + # adding information. except KeyError: hdrname = header["FILENAME"][:-5] + "_" + wcsname + "-hlet.fits" diff --git a/drizzlepac/haputils/align_utils.py b/drizzlepac/haputils/align_utils.py index 44c34d4ad..b33c6caae 100755 --- a/drizzlepac/haputils/align_utils.py +++ b/drizzlepac/haputils/align_utils.py @@ -4,8 +4,8 @@ import sys import traceback import warnings +from packaging.version import Version -from distutils.version import LooseVersion from collections import OrderedDict import numpy as np @@ -68,7 +68,7 @@ class AlignmentTable: * **apply_fit** : Updates all input image WCSs with the result of the selected 'best' fit """ - def __init__(self, input_list, clobber=False, dqname='DQ', + def __init__(self, input_list, clobber=False, dqname='DQ', process_type='', log_level=logutil.logging.NOTSET, **alignment_pars): """ Parameters @@ -83,6 +83,10 @@ def __init__(self, input_list, clobber=False, dqname='DQ', Allows the user to customize the name of the extension (`extname`) containing the data quality flags to be applied to the data during source identification. + process_type : str, optional + Specifies what type of data processing is being done on the input data. + Values include: '' (default for pipeline processing), 'SVM', 'MVM'. + log_level : int, optional Set the logging level for this processing @@ -141,7 +145,7 @@ def __init__(self, input_list, clobber=False, dqname='DQ', # Apply filter to input observations to insure that they meet minimum criteria for being able to be aligned log.info( "{} AlignmentTable: Filter STEP {}".format("-" * 20, "-" * 63)) - self.filtered_table = analyze.analyze_data(input_list, type="SVM") + self.filtered_table = analyze.analyze_data(input_list, type=process_type) log.debug("Input sorted as: \n{}".format(self.filtered_table)) if self.filtered_table['doProcess'].sum() == 0: @@ -664,7 +668,7 @@ def build_dqmask(self, chip=None): # astropy's code returned the opposite bitmask from what was originally # defined by stsci.tools own bitmask code. - if LooseVersion(stsci.tools.__version__) >= '4.0.0': + if Version(stsci.tools.__version__) >= Version('4.0.0'): dqmask = ~dqmask return dqmask @@ -1218,9 +1222,14 @@ def update_image_wcs_info(tweakwcs_output, headerlet_filenames=None, fit_label=N wcs_name = '{}-FIT_{}_{}'.format(wname, fit_label, item.meta['fit_info']['catalog']) # establish correct mapping to the science extensions - sci_ext_dict = {} - for sci_ext_ctr in range(1, num_sci_ext + 1): - sci_ext_dict["{}".format(sci_ext_ctr)] = fileutil.findExtname(hdulist, 'sci', extver=sci_ext_ctr) + try: + sci_ext_dict = {} + for sci_ext_ctr in range(1, num_sci_ext + 1): + sci_ext_dict["{}".format(sci_ext_ctr)] = fileutil.findExtname(hdulist, 'sci', extver=sci_ext_ctr) + except Exception: + exc_type, exc_value, exc_tb = sys.exc_info() + traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) + logging.exception("message") # update header with new WCS info sci_extn = sci_ext_dict["{}".format(item.meta['chip'])] diff --git a/drizzlepac/haputils/analyze.py b/drizzlepac/haputils/analyze.py index 99b32dbc2..6b30ea6cd 100644 --- a/drizzlepac/haputils/analyze.py +++ b/drizzlepac/haputils/analyze.py @@ -415,7 +415,7 @@ def analyze_data(input_file_list, log_level=logutil.logging.DEBUG, type=""): split_sfilter = sfilter.upper().split('_') for item in split_sfilter: # This is the only circumstance when Grism/Prism data WILL be processed. - if item.startswith(('G', 'PR')) and not is_zero and type.upper() != "MVM": + if item.startswith(('G', 'PR')) and not is_zero and type.upper() == "SVM": no_proc_key = None no_proc_value = None log.info("The Grism/Prism data, {}, will be processed.".format(input_file)) diff --git a/drizzlepac/haputils/astrometric_utils.py b/drizzlepac/haputils/astrometric_utils.py index 70b773fc7..4f6054f6a 100644 --- a/drizzlepac/haputils/astrometric_utils.py +++ b/drizzlepac/haputils/astrometric_utils.py @@ -19,8 +19,8 @@ import inspect import sys import time -from distutils.version import LooseVersion import copy +from packaging.version import Version import numpy as np import scipy.stats as st @@ -46,7 +46,7 @@ from astropy.utils.decorators import deprecated import photutils # needed to check version -if LooseVersion(photutils.__version__) < '1.1.0': +if Version(photutils.__version__) < Version('1.1.0'): OLD_PHOTUTILS = True from photutils.segmentation import (detect_sources, deblend_sources, make_source_mask) @@ -1377,7 +1377,7 @@ def generate_source_catalog(image, dqname="DQ", output=False, fwhm=3.0, dqmask = np.bitwise_or(non_sat_mask, grown_sat_mask) # astropy's code returned the opposite bitmask from what was originally # defined by stsci.tools own bitmask code. - if LooseVersion(stsci.tools.__version__) >= '4.0.0': + if Version(stsci.tools.__version__) >= Version('4.0.0'): dqmask = ~dqmask if numWht > 0: diff --git a/drizzlepac/haputils/catalog_utils.py b/drizzlepac/haputils/catalog_utils.py index 6c5f5c60a..771122fb6 100755 --- a/drizzlepac/haputils/catalog_utils.py +++ b/drizzlepac/haputils/catalog_utils.py @@ -4,7 +4,7 @@ import copy import pickle # FIX Remove import sys -from distutils.version import LooseVersion +from packaging.version import Version from astropy.io import fits as fits from astropy.stats import sigma_clipped_stats @@ -15,7 +15,7 @@ from scipy import ndimage, stats import photutils # needed to check version -if LooseVersion(photutils.__version__) < '1.1.0': +if Version(photutils.__version__) < Version('1.1.0'): OLD_PHOTUTILS = True from photutils.segmentation import (detect_sources, source_properties, deblend_sources) @@ -2726,7 +2726,7 @@ def write_catalog(self, reject_catalogs): # Fill the nans and masked values with numeric data self.source_cat = fill_nans_maskvalues (self.source_cat, fill_value=-9999.0) - + # Write out catalog to ecsv file self.source_cat.write(self.sourcelist_filename, format=self.catalog_format) log.info("Wrote catalog file '{}' containing {} sources".format(self.sourcelist_filename, len(self.source_cat))) diff --git a/drizzlepac/haputils/diagnostic_utils.py b/drizzlepac/haputils/diagnostic_utils.py index c57b9efa7..5e798308b 100644 --- a/drizzlepac/haputils/diagnostic_utils.py +++ b/drizzlepac/haputils/diagnostic_utils.py @@ -176,7 +176,10 @@ def _instantiate(self): self.out_dict['general information'][dict_keys[key]] = self.header[key] # Now, add items which require more interpretation try: - self.out_dict['general information']['visit'] = self.header['linenum'].split(".")[0] + if self.header['primesi'].lower() == self.header['instrume']: + self.out_dict['general information']['visit'] = self.header['linenum'].split(".")[0] + else: + self.out_dict['general information']['visit'] = self.header['filename'].split("_")[2] except: self.out_dict['general information']['visit'] = self.header['filename'].split("_")[2] # determine filter... diff --git a/drizzlepac/haputils/generate_custom_svm_param_file.py b/drizzlepac/haputils/generate_custom_svm_param_file.py index 3b5a5039f..9dc9fb90e 100644 --- a/drizzlepac/haputils/generate_custom_svm_param_file.py +++ b/drizzlepac/haputils/generate_custom_svm_param_file.py @@ -51,7 +51,6 @@ log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) __version__ = 0.1 -__version_date__ = '04-Dec-2020' # ---------------------------------------------------------------------------------------------------------------------- diff --git a/drizzlepac/haputils/make_poller_files.py b/drizzlepac/haputils/make_poller_files.py index 46d873173..fc2fa8cf2 100644 --- a/drizzlepac/haputils/make_poller_files.py +++ b/drizzlepac/haputils/make_poller_files.py @@ -103,7 +103,10 @@ def generate_poller_file(input_list, poller_file_type='svm', output_poller_filen imghdr = imghdu[0].header linelist.append("{}".format(imghdr['proposid'])) linelist.append(imgname.split("_")[-2][1:4].upper()) - linelist.append(imghdr['linenum'].split(".")[0]) + if imghdr['primesi'].lower() == imghdr['instrume']: + linelist.append(imghdr['linenum'].split(".")[0]) + else: + linelist.append(imghdr['rootname'][-5:-3].upper()) linelist.append("{}".format(imghdr['exptime'])) if imghdr['INSTRUME'].lower() == "acs": filter = poller_utils.determine_filter_name("{};{}".format(imghdr['FILTER1'], imghdr['FILTER2'])) diff --git a/drizzlepac/haputils/pandas_utils.py b/drizzlepac/haputils/pandas_utils.py index e911a9026..ebe09f58b 100644 --- a/drizzlepac/haputils/pandas_utils.py +++ b/drizzlepac/haputils/pandas_utils.py @@ -30,7 +30,6 @@ log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) __version__ = 0.1 -__version_date__ = '08-Jun-2020' DETECTOR_LEGEND = {'UVIS': 'magenta', 'IR': 'red', 'WFC': 'blue', 'SBC': 'yellow', 'HRC': 'black'} @@ -42,26 +41,26 @@ def get_pandas_data(storage_filename, data_columns, log_level=logutil.logging.NO ========== storage_filename : str Name of the file created by the harvester. - + data_columns : list List of column names to be extracted from the input dataframe. Returns ======= data_colsDF : Pandas dataframe - Dataframe which is a subset of the input Pandas dataframe. + Dataframe which is a subset of the input Pandas dataframe. The subset dataframe consists of only the requested columns and rows where all of the requested columns did not contain NaNs. """ - + # Instantiate a Pandas Dataframe Reader (lazy instantiation) df_handle = PandasDFReader(storage_filename, log_level=log_level) # In this particular case, the names of the desired columns do not # have to be further manipulated, for example, to add dataset specific # names. - # + # # Get the relevant column data, eliminating all rows which have NaNs # in any of the relevant columns. try: diff --git a/drizzlepac/haputils/processing_utils.py b/drizzlepac/haputils/processing_utils.py index 432d4f8ec..0e6ded8e4 100644 --- a/drizzlepac/haputils/processing_utils.py +++ b/drizzlepac/haputils/processing_utils.py @@ -24,7 +24,7 @@ log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout) -def get_rules_file(product, rules_type=""): +def get_rules_file(product, rules_type="", rules_root=None): """Copies default HAP rules file to local directory. This function enforces the naming convention for rules files @@ -50,6 +50,13 @@ def get_rules_file(product, rules_type=""): exposure. Valid values: blank/empty string (''), 'SVM' or 'svm' for SVM processing (default) and 'MVM' or 'mvm' for MVM processing. + rules_root : str, optional + Name of output product to use as the rootname for the output rules file. + Specifying this filename indicates that the default rules for combining + multiple inputs should be used. + If None, output rules file will be derived from the product filename and + indicates that the rules file should be specific to a single input. + Returns ------- new_rules_name : str @@ -59,11 +66,21 @@ def get_rules_file(product, rules_type=""): # and to insure all values are converted to lower-case for use in # filenames rules_type = "" if rules_type == None else rules_type.strip(' ').lower() - hdu, closefits = _process_input(product) - rootname = '_'.join(product.split("_")[:-1]) phdu = hdu[0].header instrument = phdu['instrume'] + + # Append '_single' to rules_type if single image product + if rules_root is None: + if rules_type == "": + rules_type = 'single' + else: + rules_type = '_'.join([rules_type, 'single']) + rootname = '_'.join(product.split("_")[:-1]) + else: + rootname = '_'.join(rules_root.split("_")[:-1]) + + # Create rules name prefix here # The trailing rstrip guards against a blank rules_type, which # is the default for SVM processing. @@ -74,7 +91,7 @@ def get_rules_file(product, rules_type=""): new_rules_name = "{}_header_hap.rules".format(rootname) rules_filename = os.path.join(base_dir, 'pars', def_rules_name) new_rules_filename = os.path.join(os.getcwd(), new_rules_name) - + log.debug(f'Copying \n\t{rules_filename} \nto \n\t{new_rules_filename}') if new_rules_name not in os.listdir('.'): shutil.copy(rules_filename, new_rules_filename) diff --git a/drizzlepac/haputils/product.py b/drizzlepac/haputils/product.py index 9077cb40d..7ebe5c98d 100755 --- a/drizzlepac/haputils/product.py +++ b/drizzlepac/haputils/product.py @@ -137,7 +137,7 @@ def generate_metawcs(self): return meta_wcs - def align_to_gaia(self, catalog_list=[], output=True, + def align_to_gaia(self, catalog_list=[], output=True, process_type='SVM', fit_label='SVM', align_table=None, fitgeom=''): """Extract the flt/flc filenames from the exposure product list, as well as the corresponding headerlet filenames to use legacy alignment @@ -183,6 +183,7 @@ def align_to_gaia(self, catalog_list=[], output=True, # If necessary, generate the alignment table only once if align_table is None: align_table = align_utils.AlignmentTable(exposure_filenames, + process_type=process_type, log_level=self.log_level, **alignment_pars) align_table.find_alignment_sources(output=output, crclean=crclean) @@ -287,13 +288,25 @@ def align_to_gaia(self, catalog_list=[], output=True, alignment_pars['determine_fit_quality']['do_consistency_check'] = False # Evaluate the quality of the fit + # Need to create this log file specifically for the PyTest testing environment + # when a single Python session is running and the tests are looking for dataset-specific + # log files. + log_file = "temp_align.log" is_good_fit, _, _, _, _, _ = align.determine_fit_quality_mvm_interface(align_table.imglist, align_table.filtered_table, more_catalogs, num_cat, alignment_pars, print_fit_parameters=True, - loglevel=self.log_level) + loglevel=self.log_level, + runfile=log_file) + + # Clean up the temporary log file as the contents are captured + try: + os.remove(log_file) + except OSError as error: + log.warning("Unable to remove file {}.".format(log_file)) + log.warning("Output trailer files may contain duplicate information.") # Ensure the original parameters stay intact for the iterations # as the perform_fit() modifies the fitgeom @@ -315,8 +328,9 @@ def align_to_gaia(self, catalog_list=[], output=True, except Exception: log.info("Problem with fit done for catalog '{}' with method '{}' using fit geometry '{}'.". format(catalog_item, method_name, mosaic_fitgeom)) - traceback.print_exc() - + exc_type, exc_value, exc_tb = sys.exc_info() + traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) + logging.exception("message") # Try again with a different fit geometry algorithm mosaic_fitgeom_index -= 1 diff --git a/drizzlepac/haputils/which_skycell.py b/drizzlepac/haputils/which_skycell.py index d97cec147..e9ae0f40b 100644 --- a/drizzlepac/haputils/which_skycell.py +++ b/drizzlepac/haputils/which_skycell.py @@ -24,7 +24,6 @@ __version__ = 0.1 -__version_date__ = '08-Sept-2021' # ------------------------------------------------------------------------------------------------------------ diff --git a/drizzlepac/imageObject.py b/drizzlepac/imageObject.py index 9f2dc24e9..f355bbfd3 100644 --- a/drizzlepac/imageObject.py +++ b/drizzlepac/imageObject.py @@ -16,7 +16,7 @@ from . import util from . import wcs_functions from . import buildmask -from .version import * +from . import __version__ __all__ = ['baseImageObject', 'imageObject', 'WCSObject'] diff --git a/drizzlepac/imagefindpars.py b/drizzlepac/imagefindpars.py index 7c662cf0b..3d3ecd6aa 100644 --- a/drizzlepac/imagefindpars.py +++ b/drizzlepac/imagefindpars.py @@ -7,7 +7,7 @@ import os, string from stsci.tools import teal from . import util -from .version import * +from . import __version__ __taskname__ = 'drizzlepac.imagefindpars' @@ -47,9 +47,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/linearfit.py b/drizzlepac/linearfit.py index 05ee1f59e..0ba3a420a 100644 --- a/drizzlepac/linearfit.py +++ b/drizzlepac/linearfit.py @@ -19,7 +19,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.4.0' -__version_date__ = '10-Oct-2014' log = logutil.create_logger(__name__, level=logutil.logging.NOTSET) diff --git a/drizzlepac/make_custom_mosaic.py b/drizzlepac/make_custom_mosaic.py index 5e12ec08b..577887f69 100644 --- a/drizzlepac/make_custom_mosaic.py +++ b/drizzlepac/make_custom_mosaic.py @@ -90,7 +90,6 @@ format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) __version__ = 0.1 -__version_date__ = '14-July-2021' # ------------------------------------------------------------------------------------------------------------ @@ -125,13 +124,13 @@ def calc_skycell_dist(x, y, x_ref, y_ref): def create_input_image_list(user_input): """Create list of input images based in user input from command-line - + Parameters ---------- user_input : str - Search pattern to be used to identify images to process or the name of a text file containing a list + Search pattern to be used to identify images to process or the name of a text file containing a list of images to process - + Returns ------- img_list : list diff --git a/drizzlepac/mapreg.py b/drizzlepac/mapreg.py index 479f85340..f76e0cab3 100644 --- a/drizzlepac/mapreg.py +++ b/drizzlepac/mapreg.py @@ -20,7 +20,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.1' -__version_date__ = '11-Nov-2013' __taskname__ = 'mapreg' __author__ = 'Mihai Cara' @@ -1119,9 +1118,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/mdriz.py b/drizzlepac/mdriz.py index faa37332f..7076cfdba 100644 --- a/drizzlepac/mdriz.py +++ b/drizzlepac/mdriz.py @@ -10,7 +10,7 @@ import getopt import sys, os from drizzlepac.astrodrizzle import AstroDrizzle -from drizzlepac.version import __version__ +from drizzlepac import __version__ from drizzlepac import util diff --git a/drizzlepac/minmed.py b/drizzlepac/minmed.py index 5f3ab9b98..716b89d31 100644 --- a/drizzlepac/minmed.py +++ b/drizzlepac/minmed.py @@ -28,7 +28,7 @@ import numpy as np from scipy import signal from stsci.image.numcombine import numCombine, num_combine -from .version import * +from . import __version__ class minmed: """ **DEPRECATED** Create a median array, rejecting the highest pixel and diff --git a/drizzlepac/nicmosData.py b/drizzlepac/nicmosData.py index 1a793151f..3f63f86e0 100644 --- a/drizzlepac/nicmosData.py +++ b/drizzlepac/nicmosData.py @@ -6,11 +6,13 @@ :License: :doc:`LICENSE` """ -from stsci.tools import fileutil -from nictools import readTDD import numpy as np + +from stsci.tools import fileutil + from .imageObject import imageObject + class NICMOSInputImage(imageObject): SEPARATOR = '_' @@ -179,7 +181,7 @@ def getdarkimg(self,chip): # Read the temperature dependeant dark file. The name for the file is taken from # the TEMPFILE keyword in the primary header. - tddobj = readTDD.fromcalfile(self.name) + tddobj = fromcalfile(self.name) if tddobj is None: return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent() @@ -409,3 +411,63 @@ def setInstrumentParameters(self, instrpars): # Convert the science data to electrons if specified by the user. self.doUnitConversions() + + +def fromcalfile(filename): + """ + fromcalfile: function that returns a darkobject instance given the + name of a cal.fits file as input. If there is no TEMPFILE keyword + in the primary header of the cal.fits file or if the file specified + by TEMPFILE cannot be found, a None object is returned. + """ + hdulist = fileutil.openImage(filename) + + if 'TEMPFILE' in hdulist[0].header: + if tddfile == 'N/A': + return None + else: + tddfile = hdulist[0].header['TEMPFILE'] + tddhdulist = fileutil.openImage(tddfile) + return darkobject(tddhdulist) + else: + return None + + +class darkobject(object): + def __init__(hdulist): + """ + darkobject: This class takes as input a pyfits hdulist object. + The linear dark and amp glow noise componenets are then extracted + from the hdulist. + """ + + self.lindark = hdulist['LIN'] + self.ampglow = hdulist['AMPGLOW'] + + def getlindark(self): + """ + getlindark: darkobject method which is used to return the linear + dark component from a NICMOS temperature dependent dark file. + """ + return self.lindata.data + + def getampglow(self): + """ + getampglow: darkobject method which us used to return the amp + glow component from a NICMOS temperature dependent dark file. + """ + return self.ampglow.data + + def getlindarkheader(self): + """ + getlindarkheader: darkobject method used to return the header + information of the linear dark entension of a TDD file. + """ + return self.lindata.header + + def getampglowheader(self): + """ + getampglowheader: darkobject method used to return the header + information of the amp glow entension of a TDD file. + """ + return self.ampglow.header diff --git a/drizzlepac/outputimage.py b/drizzlepac/outputimage.py index 79d9473ca..5f8f266a5 100644 --- a/drizzlepac/outputimage.py +++ b/drizzlepac/outputimage.py @@ -10,7 +10,7 @@ from stsci.tools import fileutil, logutil from . import wcs_functions -from . import version +from . import __version__ from . import updatehdr from fitsblender import blendheaders @@ -265,7 +265,7 @@ def writeFITS(self, template, sciarr, whtarr, ctxarr=None, prihdu.header.set('EXTEND', value=True, after='NAXIS') prihdu.header['NEXTEND'] = nextend prihdu.header['FILENAME'] = self.output - prihdu.header['PROD_VER'] = 'DrizzlePac {}'.format(version.__version__) + prihdu.header['PROD_VER'] = 'DrizzlePac {}'.format(__version__) prihdu.header['DRIZPARS'] = (logfile, "Logfile for processing") # Update the ROOTNAME with the new value as well diff --git a/drizzlepac/pars/acs_header_hap.rules b/drizzlepac/pars/acs_header_hap.rules index 944688a75..9e5b52ba3 100644 --- a/drizzlepac/pars/acs_header_hap.rules +++ b/drizzlepac/pars/acs_header_hap.rules @@ -401,13 +401,30 @@ MTFLAG MTFLAG first # Header Keyword Rules for remaining keywords # ################################################################################ +# +# Keywords which need to be deleted for MVM products +# These are not relevant for exposures from multiple proposals +# +################################################################################ + IPPPSSOO + ASN_ID + ASN_MTYP + ASN_TAB + GYROMODE + SUNANGLE + MOONANGL + EXPFLAG + LINENUM + QUALCOM1 + QUALCOM2 + QUALCOM3 +################################################################################ FILTER1 FILTER1 multi FILTER2 FILTER2 multi GOODMAX GOODMAX max GOODMEAN GOODMEAN mean GOODMIN GOODMIN min INHERIT INHERIT first # maintain IRAF compatibility -LINENUM LINENUM first LRFWAVE LRFWAVE first NCOMBINE NCOMBINE sum MDRIZSKY MDRIZSKY mean @@ -459,7 +476,6 @@ DGEOFILE DGEOFILE multi DIRIMAGE DIRIMAGE multi DQICORR DQICORR multi DRIZCORR DRIZCORR multi -EXPFLAG EXPFLAG multi EXPSCORR EXPSCORR multi FGSLOCK FGSLOCK multi FLASHCUR FLASHCUR multi @@ -475,7 +491,6 @@ FW2OFFST FW2OFFST first FWSERROR FWSERROR multi FWSOFFST FWSOFFST first GRAPHTAB GRAPHTAB multi -GYROMODE GYROMODE multi IDCTAB IDCTAB multi IMPHTTAB IMPHTTAB multi LFLGCORR LFLGCORR multi @@ -484,7 +499,6 @@ LTM1_1 LTM1_1 float_one LTM2_2 LTM2_2 float_one MDRIZTAB MDRIZTAB multi MEANEXP MEANEXP first -MOONANGL MOONANGL first NRPTEXP NRPTEXP first OSCNTAB OSCNTAB multi P1_ANGLE P1_ANGLE first @@ -529,7 +543,6 @@ SKYSUB SKYSUB multi SKYSUM SKYSUM sum SPOTTAB SPOTTAB multi SUBARRAY SUBARRAY first -SUNANGLE SUNANGLE first SUN_ALT SUN_ALT first TIME-OBS TIME-OBS first WRTERR WRTERR multi diff --git a/drizzlepac/pars/acs_mvm_header_hap.rules b/drizzlepac/pars/acs_mvm_header_hap.rules index 6142feeb9..d4e270fb4 100644 --- a/drizzlepac/pars/acs_mvm_header_hap.rules +++ b/drizzlepac/pars/acs_mvm_header_hap.rules @@ -412,7 +412,7 @@ MTFLAG MTFLAG first / PROPOSAL INFORMATION GYROMODE SUNANGLE - MOONANGLE + MOONANGL EXPFLAG QUALCOM1 QUALCOM2 @@ -475,7 +475,6 @@ FW2ERROR FW2ERROR multi FW2OFFST FW2OFFST first FWSERROR FWSERROR multi FWSOFFST FWSOFFST first -GYROMODE GYROMODE multi LTM1_1 LTM1_1 float_one LTM2_2 LTM2_2 float_one MEANEXP MEANEXP first diff --git a/drizzlepac/pars/acs_mvm_single_header_hap.rules b/drizzlepac/pars/acs_mvm_single_header_hap.rules new file mode 100644 index 000000000..45b4dd4ae --- /dev/null +++ b/drizzlepac/pars/acs_mvm_single_header_hap.rules @@ -0,0 +1,477 @@ +!VERSION = 1.1 +!INSTRUMENT = ACS +################################################################################ +# +# Header keyword rules +# +# Columns definitions: +# Column 1: header keyword from input header or '' +# Column 2: [optional] name of table column for recording values from +# keyword specified in the first column from each input image +# =or= name of keyword to be updated in output image header +# Column 3: [optional] function to use to create output header value +# (output keyword name must be specified in second column) +# +# Any line that starts with '' indicates that that keyword +# or set of keywords for that header section should be deleted from the +# output header. +# +# Supported functions: first, last, min, max, mean, sum, stddev, multi +# +# Any keyword without a function will be copied to a table column with the +# name given in the second column, or first column if only 1 column has been +# specified. These keywords will also be removed from the output header unless +# another rule for the same keyword (1st column) has been specified with a +# function named in the 3rd column. +# +# All keywords *not specified in this rules file* will be derived from the first +# input image's header and used unchanged to create the final output header(s). +# So, any keyword with a rule that adds that keyword to a table will be removed from +# the output headers unless additional rules are provided to specify what values +# should be kept in the header for that keyword. +## +# Final header output will use the same formatting and order of keywords defined +# by the first image's headers. +# +# Rules for headers from all image extensions can be included in the same +# file without regard for order, although keeping them organized by extension +# makes the file easier to maintain and update. +# +# The order of the rules will determine the order of the columns in the +# final output table. As a result, the rules for EXTNAME and EXTVER are +# associated with ROOTNAME, rather than the SCI header, in order to make +# rows of the table easier to identify. +# +# Comments appended to the end of a rule will be ignored when reading the +# rules. All comments start with '#'. +# +# +################################################################################ +# +# Table Keyword Rules +# +################################################################################ +ROOTNAME +EXTNAME +EXTVER +A_0_2 +A_0_3 +A_0_4 +A_1_1 +A_1_2 +A_1_3 +A_2_0 +A_2_1 +A_2_2 +A_3_0 +A_3_1 +A_4_0 +ACQNAME +A_ORDER +APERTURE +ASN_ID +ASN_MTYP +ASN_TAB +ATODCORR +ATODGNA +ATODGNB +ATODGNC +ATODGND +ATODTAB +B_0_2 +B_0_3 +B_0_4 +B_1_1 +B_1_2 +B_1_3 +B_2_0 +B_2_1 +B_2_2 +B_3_0 +B_3_1 +B_4_0 +BADINPDQ +BIASCORR +BIASFILE +BIASLEVA +BIASLEVB +BIASLEVC +BIASLEVD +BINAXIS1 +BINAXIS2 +BITPIX +BLEVCORR +B_ORDER +BPIXTAB +BUNIT +CAL_VER +CBLKSIZ +CCDAMP +CCDCHIP +CCDGAIN +CCDOFSTA +CCDOFSTB +CCDOFSTC +CCDOFSTD +CCDTAB +CD1_1 +CD1_2 +CD2_1 +CD2_2 +CENTERA1 +CENTERA2 +CFLTFILE +COMPTAB +COMPTYP +CRCORR +CRMASK +CRPIX1 +CRPIX2 +CRRADIUS +CRREJTAB +CRSIGMAS +CRSPLIT +CRTHRESH +CRVAL1 +CRVAL2 +CTE_NAME +CTE_VER +CTEDIR +CTEIMAGE +CTYPE1 +CTYPE2 +D2IMFILE +DARKCORR +DARKFILE +DATE +DATE-OBS +DEC_APER +DEC_TARG +DETECTOR +DFLTFILE +DGEOFILE +DIRIMAGE +DQICORR +DRIZCORR +DRKCFILE +EQUINOX +ERRCNT +EXPEND +EXPFLAG +EXPNAME +EXPSCORR +EXPSTART +EXPTIME +EXTEND +FGSLOCK +FILENAME +FILETYPE +FILLCNT +FILTER1 +FILTER2 +FLASHCUR +FLASHDUR +FLASHSTA +FLATCORR +FLSHCORR +FLSHFILE +FW1ERROR +FW1OFFST +FW2ERROR +FW2OFFST +FWSERROR +FWSOFFST +GCOUNT +GLINCORR +GLOBLIM +GLOBRATE +GOODMAX +GOODMEAN +GOODMIN +GRAPHTAB +GYROMODE +IDCSCALE +IDCTAB +IDCTHETA +IDCV2REF +IDCV3REF +IMAGETYP +IMPHTTAB +INHERIT +INITGUES +INSTRUME +JWROTYPE +LFLGCORR +LFLTFILE +LINENUM +LOSTPIX +LRC_FAIL +LRC_XSTS +LRFWAVE +LTM1_1 +LTM2_2 +LTV1 +LTV2 +MDECODT1 +MDECODT2 +MDRIZSKY +MDRIZTAB +MEANBLEV +MEANDARK +MEANEXP +MEANFLSH +MLINTAB +MOFFSET1 +MOFFSET2 +MOONANGL +MTFLAG +NAXIS +NAXIS1 +NAXIS2 +NCOMBINE +NEXTEND +NGOODPIX +NPOLFILE +NRPTEXP +OBSMODE +OBSTYPE +OCD1_1 +OCD1_2 +OCD2_1 +OCD2_2 +OCRPIX1 +OCRPIX2 +OCRVAL1 +OCRVAL2 +OCTYPE1 +OCTYPE2 +OCX10 +OCX11 +OCY10 +OCY11 +ONAXIS1 +ONAXIS2 +OORIENTA +OPUS_VER +ORIENTAT +ORIGIN +OSCNTAB +P1_ANGLE +P1_CENTR +P1_FRAME +P1_LSPAC +P1_NPTS +P1_ORINT +P1_PSPAC +P1_PURPS +P1_SHAPE +PA_APER +PATTERN1 +PATTSTEP +PA_V3 +PCOUNT +PCTECORR +PCTEFRAC +PCTENSMD +PCTERNCL +PCTESHFT +PCTESMIT +PCTETAB +PFLTFILE +PHOTBW +PHOTCORR +PHOTFLAM +PHOTMODE +PHOTPLAM +PHOTTAB +PHOTZPT +PODPSFF +POSTARG1 +POSTARG2 +PRIMESI +PR_INV_F +PR_INV_L +PR_INV_M +PROCTIME +PROPAPER +PROPOSID +QUALCOM1 +QUALCOM2 +QUALCOM3 +QUALITY +RA_APER +RA_TARG +READNSEA +READNSEB +READNSEC +READNSED +REFFRAME +REJ_RATE +RPTCORR +SCALENSE +SCLAMP +SDQFLAGS +SHADCORR +SHADFILE +SHUTRPOS +SIMPLE +SIZAXIS1 +SIZAXIS2 +SKYSUB +SKYSUM +SNRMAX +SNRMEAN +SNRMIN +SOFTERRS +SPOTTAB +STATFLAG +STDCFFF +STDCFFP +SUBARRAY +SUN_ALT +SUNANGLE +TARGNAME +TDDALPHA +TDDBETA +TELESCOP +TIME-OBS +T_SGSTAR +VAFACTOR +WCSAXES +WCSCDATE +WFCMPRSD +WRTERR +XTENSION +WCSNAME +WCSTYPE +# +# WCS Related Keyword Rules +# These move any OPUS-generated WCS values to the table +# +WCSNAMEO +WCSAXESO +LONPOLEO +LATPOLEO +RESTFRQO +RESTWAVO +CD1_1O +CD1_2O +CD2_1O +CD2_2O +CDELT1O +CDELT2O +CRPIX1O +CRPIX2O +CRVAL1O +CRVAL2O +CTYPE1O +CTYPE2O +CUNIT1O +CUNIT2O +################################################################################ +# +# Header Keyword Rules REQUIRED for CAOM +# +################################################################################ +PROPOSID PROPOSID first +TARGNAME TARGNAME first +PR_INV_L PR_INV_L first +PR_INV_F PR_INV_F first +PR_INV_M PR_INV_M first +RA_TARG RA_TARG first +DEC_TARG DEC_TARG first +INSTRUME INSTRUME first +DETECTOR DETECTOR first +APERTURE APERTURE multi +FILTER1 FILTER first # May need to modify this as '-' combined value +EXPEND EXPEND max +EXPSTART EXPSTART min +EXPTIME TEXPTIME sum +EXPTIME EXPTIME sum +EXPSTART DATE-BEG min # convert value to iso format -- separately? +EXPEND DATE-END max # convert value to iso format -- separately? +IMAGETYP IMAGETYP first +OBSMODE OBSMODE multi +OBSTYPE OBSTYPE first +EQUINOX EQUINOX first +REFFRAME REFFRAME multi +MTFLAG MTFLAG first +################################################################################ +# +# Keywords which need to be kept in product header +# +################################################################################ +FILTER1 FILTER1 multi +FILTER2 FILTER2 multi +GOODMAX GOODMAX max +GOODMEAN GOODMEAN mean +GOODMIN GOODMIN min +INHERIT INHERIT first # maintain IRAF compatibility +LRFWAVE LRFWAVE first +NCOMBINE NCOMBINE sum +MDRIZSKY MDRIZSKY mean +PHOTBW PHOTBW mean +PHOTFLAM PHOTFLAM mean +PHOTMODE PHOTMODE first +PHOTPLAM PHOTPLAM mean +PHOTZPT PHOTZPT mean +SNRMAX SNRMAX max +SNRMEAN SNRMEAN mean +SNRMIN SNRMIN min +TELESCOP TELESCOP first +PA_V3 PA_V3 zero +### rules below were added 05Jun2012,in response to Dorothy Fraquelli guidance re: DADS +ATODGNA ATODGNA first +ATODGNB ATODGNB first +ATODGNC ATODGNC first +ATODGND ATODGND first +BADINPDQ BADINPDQ sum +CCDCHIP CCDCHIP first +CCDGAIN CCDGAIN first +CCDOFSTA CCDOFSTA first +CCDOFSTB CCDOFSTB first +CCDOFSTC CCDOFSTC first +CCDOFSTD CCDOFSTD first +CRMASK CRMASK first +CRRADIUS CRRADIUS first +CRSPLIT CRSPLIT first +CRTHRESH CRTHRESH first +CTEDIR CTEDIR multi +CTEIMAGE CTEIMAGE first +DATE-OBS DATE-OBS first +DEC_APER DEC_APER first +DIRIMAGE DIRIMAGE multi +FGSLOCK FGSLOCK multi +FLASHCUR FLASHCUR multi +FLASHDUR FLASHDUR first +FLASHSTA FLASHSTA first +FW1ERROR FW1ERROR multi +FW1OFFST FW1OFFST first +FW2ERROR FW2ERROR multi +FW2OFFST FW2OFFST first +FWSERROR FWSERROR multi +FWSOFFST FWSOFFST first +GYROMODE GYROMODE multi +LTM1_1 LTM1_1 float_one +LTM2_2 LTM2_2 float_one +MEANEXP MEANEXP first +NRPTEXP NRPTEXP first +POSTARG1 POSTARG1 first +POSTARG2 POSTARG2 first +PRIMESI PRIMESI multi +PROPAPER PROPAPER multi +RA_APER RA_APER first +READNSEA READNSEA first +READNSEB READNSEB first +READNSEC READNSEC first +READNSED READNSED first +REJ_RATE REJ_RATE first +SCALENSE SCALENSE first +SCLAMP SCLAMP multi +SHUTRPOS SHUTRPOS multi +SKYSUB SKYSUB multi +SKYSUM SKYSUM sum +SUBARRAY SUBARRAY first +SUN_ALT SUN_ALT first +TIME-OBS TIME-OBS first +WRTERR WRTERR multi diff --git a/drizzlepac/pars/acs_single_header_hap.rules b/drizzlepac/pars/acs_single_header_hap.rules new file mode 100644 index 000000000..944688a75 --- /dev/null +++ b/drizzlepac/pars/acs_single_header_hap.rules @@ -0,0 +1,535 @@ +!VERSION = 1.1 +!INSTRUMENT = ACS +################################################################################ +# +# Header keyword rules +# +# Columns definitions: +# Column 1: header keyword from input header or '' +# Column 2: [optional] name of table column for recording values from +# keyword specified in the first column from each input image +# =or= name of keyword to be updated in output image header +# Column 3: [optional] function to use to create output header value +# (output keyword name must be specified in second column) +# +# Any line that starts with '' indicates that that keyword +# or set of keywords for that header section should be deleted from the +# output header. +# +# Supported functions: first, last, min, max, mean, sum, stddev, multi +# +# Any keyword without a function will be copied to a table column with the +# name given in the second column, or first column if only 1 column has been +# specified. These keywords will also be removed from the output header unless +# another rule for the same keyword (1st column) has been specified with a +# function named in the 3rd column. +# +# All keywords *not specified in this rules file* will be derived from the first +# input image's header and used unchanged to create the final output header(s). +# So, any keyword with a rule that adds that keyword to a table will be removed from +# the output headers unless additional rules are provided to specify what values +# should be kept in the header for that keyword. +## +# Final header output will use the same formatting and order of keywords defined +# by the first image's headers. +# +# Rules for headers from all image extensions can be included in the same +# file without regard for order, although keeping them organized by extension +# makes the file easier to maintain and update. +# +# The order of the rules will determine the order of the columns in the +# final output table. As a result, the rules for EXTNAME and EXTVER are +# associated with ROOTNAME, rather than the SCI header, in order to make +# rows of the table easier to identify. +# +# Comments appended to the end of a rule will be ignored when reading the +# rules. All comments start with '#'. +# +# +################################################################################ +# +# Table Keyword Rules +# +################################################################################ +ROOTNAME +EXTNAME +EXTVER +A_0_2 +A_0_3 +A_0_4 +A_1_1 +A_1_2 +A_1_3 +A_2_0 +A_2_1 +A_2_2 +A_3_0 +A_3_1 +A_4_0 +ACQNAME +A_ORDER +APERTURE +ASN_ID +ASN_MTYP +ASN_TAB +ATODCORR +ATODGNA +ATODGNB +ATODGNC +ATODGND +ATODTAB +B_0_2 +B_0_3 +B_0_4 +B_1_1 +B_1_2 +B_1_3 +B_2_0 +B_2_1 +B_2_2 +B_3_0 +B_3_1 +B_4_0 +BADINPDQ +BIASCORR +BIASFILE +BIASLEVA +BIASLEVB +BIASLEVC +BIASLEVD +BINAXIS1 +BINAXIS2 +BITPIX +BLEVCORR +B_ORDER +BPIXTAB +BUNIT +CAL_VER +CBLKSIZ +CCDAMP +CCDCHIP +CCDGAIN +CCDOFSTA +CCDOFSTB +CCDOFSTC +CCDOFSTD +CCDTAB +CD1_1 +CD1_2 +CD2_1 +CD2_2 +CENTERA1 +CENTERA2 +CFLTFILE +COMPTAB +COMPTYP +CRCORR +CRMASK +CRPIX1 +CRPIX2 +CRRADIUS +CRREJTAB +CRSIGMAS +CRSPLIT +CRTHRESH +CRVAL1 +CRVAL2 +CTE_NAME +CTE_VER +CTEDIR +CTEIMAGE +CTYPE1 +CTYPE2 +D2IMFILE +DARKCORR +DARKFILE +DATE +DATE-OBS +DEC_APER +DEC_TARG +DETECTOR +DFLTFILE +DGEOFILE +DIRIMAGE +DQICORR +DRIZCORR +DRKCFILE +EQUINOX +ERRCNT +EXPEND +EXPFLAG +EXPNAME +EXPSCORR +EXPSTART +EXPTIME +EXTEND +FGSLOCK +FILENAME +FILETYPE +FILLCNT +FILTER1 +FILTER2 +FLASHCUR +FLASHDUR +FLASHSTA +FLATCORR +FLSHCORR +FLSHFILE +FW1ERROR +FW1OFFST +FW2ERROR +FW2OFFST +FWSERROR +FWSOFFST +GCOUNT +GLINCORR +GLOBLIM +GLOBRATE +GOODMAX +GOODMEAN +GOODMIN +GRAPHTAB +GYROMODE +IDCSCALE +IDCTAB +IDCTHETA +IDCV2REF +IDCV3REF +IMAGETYP +IMPHTTAB +INHERIT +INITGUES +INSTRUME +JWROTYPE +LFLGCORR +LFLTFILE +LINENUM +LOSTPIX +LRC_FAIL +LRC_XSTS +LRFWAVE +LTM1_1 +LTM2_2 +LTV1 +LTV2 +MDECODT1 +MDECODT2 +MDRIZSKY +MDRIZTAB +MEANBLEV +MEANDARK +MEANEXP +MEANFLSH +MLINTAB +MOFFSET1 +MOFFSET2 +MOONANGL +MTFLAG +NAXIS +NAXIS1 +NAXIS2 +NCOMBINE +NEXTEND +NGOODPIX +NPOLFILE +NRPTEXP +OBSMODE +OBSTYPE +OCD1_1 +OCD1_2 +OCD2_1 +OCD2_2 +OCRPIX1 +OCRPIX2 +OCRVAL1 +OCRVAL2 +OCTYPE1 +OCTYPE2 +OCX10 +OCX11 +OCY10 +OCY11 +ONAXIS1 +ONAXIS2 +OORIENTA +OPUS_VER +ORIENTAT +ORIGIN +OSCNTAB +P1_ANGLE +P1_CENTR +P1_FRAME +P1_LSPAC +P1_NPTS +P1_ORINT +P1_PSPAC +P1_PURPS +P1_SHAPE +PA_APER +PATTERN1 +PATTSTEP +PA_V3 +PCOUNT +PCTECORR +PCTEFRAC +PCTENSMD +PCTERNCL +PCTESHFT +PCTESMIT +PCTETAB +PFLTFILE +PHOTBW +PHOTCORR +PHOTFLAM +PHOTMODE +PHOTPLAM +PHOTTAB +PHOTZPT +PODPSFF +POSTARG1 +POSTARG2 +PRIMESI +PR_INV_F +PR_INV_L +PR_INV_M +PROCTIME +PROPAPER +PROPOSID +QUALCOM1 +QUALCOM2 +QUALCOM3 +QUALITY +RA_APER +RA_TARG +READNSEA +READNSEB +READNSEC +READNSED +REFFRAME +REJ_RATE +RPTCORR +SCALENSE +SCLAMP +SDQFLAGS +SHADCORR +SHADFILE +SHUTRPOS +SIMPLE +SIZAXIS1 +SIZAXIS2 +SKYSUB +SKYSUM +SNRMAX +SNRMEAN +SNRMIN +SOFTERRS +SPOTTAB +STATFLAG +STDCFFF +STDCFFP +SUBARRAY +SUN_ALT +SUNANGLE +TARGNAME +TDDALPHA +TDDBETA +TELESCOP +TIME-OBS +T_SGSTAR +VAFACTOR +WCSAXES +WCSCDATE +WFCMPRSD +WRTERR +XTENSION +WCSNAME +WCSTYPE +# +# WCS Related Keyword Rules +# These move any OPUS-generated WCS values to the table +# +WCSNAMEO +WCSAXESO +LONPOLEO +LATPOLEO +RESTFRQO +RESTWAVO +CD1_1O +CD1_2O +CD2_1O +CD2_2O +CDELT1O +CDELT2O +CRPIX1O +CRPIX2O +CRVAL1O +CRVAL2O +CTYPE1O +CTYPE2O +CUNIT1O +CUNIT2O +################################################################################ +# +# Header Keyword Rules REQUIRED for CAOM +# +################################################################################ +PROPOSID PROPOSID first +TARGNAME TARGNAME first +PR_INV_L PR_INV_L first +PR_INV_F PR_INV_F first +PR_INV_M PR_INV_M first +RA_TARG RA_TARG first +DEC_TARG DEC_TARG first +INSTRUME INSTRUME first +DETECTOR DETECTOR first +APERTURE APERTURE multi +FILTER1 FILTER first # May need to modify this as '-' combined value +EXPEND EXPEND max +EXPSTART EXPSTART min +EXPTIME TEXPTIME sum +EXPTIME EXPTIME sum +EXPSTART DATE-BEG min # convert value to iso format -- separately? +EXPEND DATE-END max # convert value to iso format -- separately? +IMAGETYP IMAGETYP first +OBSMODE OBSMODE multi +OBSTYPE OBSTYPE first +EQUINOX EQUINOX first +REFFRAME REFFRAME multi +MTFLAG MTFLAG first +################################################################################ +# +# Header Keyword Rules for remaining keywords +# +################################################################################ +FILTER1 FILTER1 multi +FILTER2 FILTER2 multi +GOODMAX GOODMAX max +GOODMEAN GOODMEAN mean +GOODMIN GOODMIN min +INHERIT INHERIT first # maintain IRAF compatibility +LINENUM LINENUM first +LRFWAVE LRFWAVE first +NCOMBINE NCOMBINE sum +MDRIZSKY MDRIZSKY mean +PHOTBW PHOTBW mean +PHOTFLAM PHOTFLAM mean +PHOTMODE PHOTMODE first +PHOTPLAM PHOTPLAM mean +PHOTZPT PHOTZPT mean +SNRMAX SNRMAX max +SNRMEAN SNRMEAN mean +SNRMIN SNRMIN min +TELESCOP TELESCOP first +PA_V3 PA_V3 first +### rules below were added 05Jun2012,in response to Dorothy Fraquelli guidance re: DADS +ATODCORR ATODCORR multi +ATODGNA ATODGNA first +ATODGNB ATODGNB first +ATODGNC ATODGNC first +ATODGND ATODGND first +ATODTAB ATODTAB multi +BADINPDQ BADINPDQ sum +BIASCORR BIASCORR multi +BIASFILE BIASFILE multi +BLEVCORR BLEVCORR multi +BPIXTAB BPIXTAB multi +CCDCHIP CCDCHIP first +CCDGAIN CCDGAIN first +CCDOFSTA CCDOFSTA first +CCDOFSTB CCDOFSTB first +CCDOFSTC CCDOFSTC first +CCDOFSTD CCDOFSTD first +CCDTAB CCDTAB multi +CFLTFILE CFLTFILE multi +COMPTAB COMPTAB multi +CRCORR CRCORR multi +CRMASK CRMASK first +CRRADIUS CRRADIUS first +CRREJTAB CRREJTAB multi +CRSPLIT CRSPLIT first +CRTHRESH CRTHRESH first +CTEDIR CTEDIR multi +CTEIMAGE CTEIMAGE first +DARKCORR DARKCORR multi +DARKFILE DARKFILE multi +DATE-OBS DATE-OBS first +DEC_APER DEC_APER first +DFLTFILE DFLTFILE multi +DGEOFILE DGEOFILE multi +DIRIMAGE DIRIMAGE multi +DQICORR DQICORR multi +DRIZCORR DRIZCORR multi +EXPFLAG EXPFLAG multi +EXPSCORR EXPSCORR multi +FGSLOCK FGSLOCK multi +FLASHCUR FLASHCUR multi +FLASHDUR FLASHDUR first +FLASHSTA FLASHSTA first +FLATCORR FLATCORR multi +FLSHCORR FLSHCORR multi +FLSHFILE FLSHFILE multi +FW1ERROR FW1ERROR multi +FW1OFFST FW1OFFST first +FW2ERROR FW2ERROR multi +FW2OFFST FW2OFFST first +FWSERROR FWSERROR multi +FWSOFFST FWSOFFST first +GRAPHTAB GRAPHTAB multi +GYROMODE GYROMODE multi +IDCTAB IDCTAB multi +IMPHTTAB IMPHTTAB multi +LFLGCORR LFLGCORR multi +LFLTFILE LFLTFILE multi +LTM1_1 LTM1_1 float_one +LTM2_2 LTM2_2 float_one +MDRIZTAB MDRIZTAB multi +MEANEXP MEANEXP first +MOONANGL MOONANGL first +NRPTEXP NRPTEXP first +OSCNTAB OSCNTAB multi +P1_ANGLE P1_ANGLE first +P1_CENTR P1_CENTR multi +P1_FRAME P1_FRAME multi +P1_LSPAC P1_LSPAC first +P1_NPTS P1_NPTS first +P1_ORINT P1_ORINT first +P1_PSPAC P1_PSPAC first +P1_PURPS P1_PURPS multi +P1_SHAPE P1_SHAPE multi +P2_ANGLE P2_ANGLE first +P2_CENTR P2_CENTR multi +P2_FRAME P2_FRAME multi +P2_LSPAC P2_LSPAC first +P2_NPTS P2_NPTS first +P2_ORINT P2_ORINT first +P2_PSPAC P2_PSPAC first +P2_PURPS P2_PURPS multi +P2_SHAPE P2_SHAPE multi +PATTERN1 PATTERN1 multi +PATTERN2 PATTERN2 multi +PATTSTEP PATTSTEP first +PHOTCORR PHOTCORR multi +PHOTTAB PHOTTAB multi +POSTARG1 POSTARG1 first +POSTARG2 POSTARG2 first +PRIMESI PRIMESI multi +PROPAPER PROPAPER multi +RA_APER RA_APER first +READNSEA READNSEA first +READNSEB READNSEB first +READNSEC READNSEC first +READNSED READNSED first +REJ_RATE REJ_RATE first +SCALENSE SCALENSE first +SCLAMP SCLAMP multi +SHADCORR SHADCORR multi +SHADFILE SHADFILE multi +SHUTRPOS SHUTRPOS multi +SKYSUB SKYSUB multi +SKYSUM SKYSUM sum +SPOTTAB SPOTTAB multi +SUBARRAY SUBARRAY first +SUNANGLE SUNANGLE first +SUN_ALT SUN_ALT first +TIME-OBS TIME-OBS first +WRTERR WRTERR multi diff --git a/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json index 489e325b9..d462dbbe5 100644 --- a/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json @@ -35,7 +35,7 @@ "nlevels": 64, "contrast": 0.001, "border": 10, - "rw2d_size": 24, + "rw2d_size": 23, "rw2d_nsigma": 10.0, "rw2d_biggest_source": 0.045, "rw2d_source_fraction": 0.15, diff --git a/drizzlepac/pars/wfc3_header_hap.rules b/drizzlepac/pars/wfc3_header_hap.rules index 8e5fbbab4..eae0a7f6c 100644 --- a/drizzlepac/pars/wfc3_header_hap.rules +++ b/drizzlepac/pars/wfc3_header_hap.rules @@ -358,10 +358,25 @@ EQUINOX EQUINOX first # Header Keyword Rules # ################################################################################ +# +# Keywords which need to be deleted for MVM products +# These are not relevant for exposures from multiple proposals +# +################################################################################ + IPPPSSOO + ASN_ID + ASN_MTYP + ASN_TAB + GYROMODE + SUNANGLE + MOONANGL + EXPFLAG + LINENUM + QUALCOM1 + QUALCOM2 + QUALCOM3 +################################################################################ APERTURE APERTURE multi -ASN_ID ASN_ID first -ASN_MTYP ASN_MTYP multi -ASN_TAB ASN_TAB multi ATODCORR ATODCORR multi ATODGNA ATODGNA first ATODGNB ATODGNB first @@ -422,7 +437,6 @@ DGEOFILE DGEOFILE multi DIRIMAGE DIRIMAGE multi DQICORR DQICORR multi DRIZCORR DRIZCORR multi -EXPFLAG EXPFLAG multi EXPNAME EXPNAME first EXPSCORR EXPSCORR multi EXTVER EXTVER first @@ -437,13 +451,11 @@ FLATCORR FLATCORR multi FLSHCORR FLSHCORR multi FLSHFILE FLSHFILE multi GRAPHTAB GRAPHTAB multi -GYROMODE GYROMODE multi IDCTAB IDCTAB multi INHERIT INHERIT first # maintains IRAF compatibility INITGUES INITGUES multi INSTRUME INSTRUME first LFLTFILE LFLTFILE multi -LINENUM LINENUM first LTM1_1 LTM1_1 float_one LTM2_2 LTM2_2 float_one LTV1 LTV1 first @@ -451,7 +463,6 @@ LTV2 LTV2 first MDRIZTAB MDRIZTAB multi MEANEXP MEANEXP first MEANFLSH MEANFLSH first -MOONANGL MOONANGL first MTFLAG MTFLAG first NCOMBINE NCOMBINE sum NLINCORR NLINCORR multi @@ -497,9 +508,6 @@ PRIMESI PRIMESI multi PROCTIME PROCTIME first PROPAPER PROPAPER multi PROPOSID PROPOSID first -QUALCOM1 QUALCOM1 multi -QUALCOM2 QUALCOM2 multi -QUALCOM3 QUALCOM3 multi QUALITY QUALITY multi RA_APER RA_APER first READNSEA READNSEA first @@ -527,7 +535,6 @@ STDCFFF STDCFFF multi STDCFFP STDCFFP multi SUBARRAY SUBARRAY first SUBTYPE SUBTYPE multi -SUNANGLE SUNANGLE first T_SGSTAR T_SGSTAR multi TARGNAME TARGNAME first TDFTRANS TDFTRANS sum diff --git a/drizzlepac/pars/wfc3_mvm_header_hap.rules b/drizzlepac/pars/wfc3_mvm_header_hap.rules index a19231076..73e2aa032 100644 --- a/drizzlepac/pars/wfc3_mvm_header_hap.rules +++ b/drizzlepac/pars/wfc3_mvm_header_hap.rules @@ -370,7 +370,7 @@ EQUINOX EQUINOX first / PROPOSAL INFORMATION GYROMODE SUNANGLE - MOONANGLE + MOONANGL EXPFLAG QUALCOM1 QUALCOM2 diff --git a/drizzlepac/pars/wfc3_mvm_single_header_hap.rules b/drizzlepac/pars/wfc3_mvm_single_header_hap.rules new file mode 100644 index 000000000..80867627d --- /dev/null +++ b/drizzlepac/pars/wfc3_mvm_single_header_hap.rules @@ -0,0 +1,502 @@ +!VERSION = 1.1 +!INSTRUMENT = WFC3 +################################################################################ +# +# Header keyword rules +# +# Columns definitions: +# Column 1: header keyword from input header or '' +# Column 2: [optional] name of table column for recording values from +# keyword specified in the first column from each input image +# =or= name of keyword to be updated in output image header +# Column 3: [optional] function to use to create output header value +# (output keyword name must be specified in second column) +# +# Any line that starts with '' indicates that that keyword +# or set of keywords for that header section should be deleted from the +# output header. +# +# Supported functions: first, last, min, max, mean, sum, stddev, multi +# +# Any keyword without a function will be copied to a table column with the +# name given in the second column, or first column if only 1 column has been +# specified. These keywords will also be removed from the output header unless +# another rule for the same keyword (1st column) has been specified with a +# function named in the 3rd column. +# +# All keywords *not specified in this rules file* will be derived from the first +# input image's header and used unchanged to create the final output header(s). +# So, any keyword with a rule that adds that keyword to a table will be removed from +# the output headers unless additional rules are provided to specify what values +# should be kept in the header for that keyword. +## +# Final header output will use the same formatting and order of keywords defined +# by the first image's headers. +# +# Rules for headers from all image extensions can be included in the same +# file without regard for order, although keeping them organized by extension +# makes the file easier to maintain and update. +# +# The order of the rules will determine the order of the columns in the +# final output table. As a result, the rules for EXTNAME and EXTVER are +# associated with ROOTNAME, rather than the SCI header, in order to make +# rows of the table easier to identify. +# +# Comments appended to the end of a rule will be ignored when reading the +# rules. All comments start with '#'. +# +# +################################################################################ +# +# Table Keyword Rules +# +################################################################################ +ROOTNAME +EXTNAME +EXTVER +A_0_2 +A_0_3 +A_0_4 +A_1_1 +A_1_2 +A_1_3 +A_2_0 +A_2_1 +A_2_2 +A_3_0 +A_3_1 +A_4_0 +A_ORDER +APERTURE +ASN_ID +ASN_MTYP +ASN_TAB +ATODCORR +ATODGNA +ATODGNB +ATODGNC +ATODGND +ATODTAB +B_0_2 +B_0_3 +B_0_4 +B_1_1 +B_1_2 +B_1_3 +B_2_0 +B_2_1 +B_2_2 +B_3_0 +B_3_1 +B_4_0 +B_ORDER +BADINPDQ +BIASCORR +BIASFILE +BIASLEVA +BIASLEVB +BIASLEVC +BIASLEVD +BINAXIS1 +BINAXIS2 +BLEVCORR +BPIXTAB +BUNIT +CAL_VER +CCDAMP +CCDCHIP +CCDGAIN +CCDOFSAB +CCDOFSCD +CCDOFSTA +CCDOFSTB +CCDOFSTC +CCDOFSTD +CCDTAB +CD1_1 +CD1_2 +CD2_1 +CD2_2 +CENTERA1 +CENTERA2 +CHINJECT +COMPTAB +CRCORR +CRMASK +CRPIX1 +CRPIX2 +CRRADIUS +CRREJTAB +CRSIGMAS +CRSPLIT +CRTHRESH +CRVAL1 +CRVAL2 +CTEDIR +CTEIMAGE +CTYPE1 +CTYPE2 +DARKCORR +DARKFILE +DATAMAX +DATAMIN +DATE +DATE-OBS +DEC_APER +DEC_TARG +DELTATIM +DETECTOR +DFLTFILE +DGEOFILE +DIRIMAGE +DQICORR +DRIZCORR +EQUINOX +ERRCNT +EXPEND +EXPFLAG +EXPNAME +EXPSCORR +EXPSTART +EXPTIME +FGSLOCK +FILENAME +FILETYPE +FILLCNT +FILTER +FLASHCUR +FLASHDUR +FLASHSTA +FLATCORR +FLSHCORR +FLSHFILE +GOODMAX +GOODMEAN +GOODMIN +GRAPHTAB +GYROMODE +IDCSCALE +IDCTAB +IDCTHETA +IDCV2REF +IDCV3REF +IMAGETYP +INHERIT +INITGUES +INSTRUME +IRAF-TLM +LFLTFILE +LINENUM +LTM1_1 +LTM2_2 +LTV1 +LTV2 +MDRIZSKY +MDRIZTAB +MEANBLEV +MEANDARK +MEANEXP +MEANFLSH +MOONANGL +MTFLAG +NAXIS1 +NAXIS2 +NCOMBINE +NGOODPIX +NLINCORR +NLINFILE +NRPTEXP +NSAMP +OBSMODE +OBSTYPE +OCD1_1 +OCD1_2 +OCD2_1 +OCD2_2 +OCRPIX1 +OCRPIX2 +OCRVAL1 +OCRVAL2 +OCTYPE1 +OCTYPE2 +OCX10 +OCX11 +OCY10 +OCY11 +ONAXIS1 +ONAXIS2 +OORIENTA +OPUS_VER +ORIENTAT +ORIGIN +OSCNTAB +P1_ANGLE +P1_CENTR +P1_FRAME +P1_LSPAC +P1_NPTS +P1_ORINT +P1_PSPAC +P1_PURPS +P1_SHAPE +P2_ANGLE +P2_CENTR +P2_FRAME +P2_LSPAC +P2_NPTS +P2_ORINT +P2_PSPAC +P2_PURPS +P2_SHAPE +PA_APER +PA_V3 +PATTERN1 +PATTERN2 +PATTSTEP +PFLTFILE +PHOTBW +PHOTCORR +PHOTFLAM +PHOTFNU +PHOTMODE +PHOTPLAM +PHOTZPT +PODPSFF +POSTARG1 +POSTARG2 +PR_INV_F +PR_INV_L +PR_INV_M +PRIMESI +PROCTIME +PROPAPER +PROPOSID +QUALCOM1 +QUALCOM2 +QUALCOM3 +QUALITY +RA_APER +RA_TARG +READNSEA +READNSEB +READNSEC +READNSED +REFFRAME +REJ_RATE +ROUTTIME +RPTCORR +SAA_DARK +SAA_EXIT +SAA_TIME +SAACRMAP +SAMP_SEQ +SAMPNUM +SAMPTIME +SAMPZERO +SCALENSE +SCLAMP +SDQFLAGS +SHADCORR +SHADFILE +SHUTRPOS +SIMPLE +SIZAXIS1 +SIZAXIS2 +SKYSUB +SKYSUM +SNRMAX +SNRMEAN +SNRMIN +SOFTERRS +STDCFFF +STDCFFP +SUBARRAY +SUBTYPE +SUN_ALT +SUNANGLE +T_SGSTAR +TARGNAME +TDFTRANS +TELESCOP +TIME-OBS +UNITCORR +VAFACTOR +WCSAXES +WCSCDATE +ZOFFCORR +ZSIGCORR +WCSNAME +WCSTYPE +################################################################################ +# +# Header Keyword Rules REQUIRED for CAOM +# +################################################################################ +PROPOSID PROPOSID first +TARGNAME TARGNAME first +PR_INV_L PR_INV_L first +PR_INV_F PR_INV_F first +PR_INV_M PR_INV_M first +RA_TARG RA_TARG first +DEC_TARG DEC_TARG first +INSTRUME INSTRUME first +DETECTOR DETECTOR first +APERTURE APERTURE multi +FILTER FILTER first # May need to modify this as '-' combined value +EXPEND EXPEND max +EXPSTART EXPSTART min +EXPTIME TEXPTIME sum +EXPTIME EXPTIME sum +EXPSTART DATE-BEG min # convert value to iso format -- separately? +EXPEND DATE-END max # convert value to iso format -- separately? +IMAGETYP IMAGETYP first +OBSMODE OBSMODE multi +OBSTYPE OBSTYPE first +EQUINOX EQUINOX first +################################################################################ +# +# Header Keyword Rules +# +################################################################################ +################################################################################ +# +# Keywords which need to be kept in product header +# +################################################################################ +APERTURE APERTURE multi +ATODGNA ATODGNA first +ATODGNB ATODGNB first +ATODGNC ATODGNC first +ATODGND ATODGND first +BADINPDQ BADINPDQ sum +BIASLEVA BIASLEVA first +BIASLEVB BIASLEVB first +BIASLEVC BIASLEVC first +BIASLEVD BIASLEVD first +BINAXIS1 BINAXIS1 first +BINAXIS2 BINAXIS2 first +BUNIT BUNIT first +CAL_VER CAL_VER first +CCDAMP CCDAMP first +CCDCHIP CCDCHIP first +CCDGAIN CCDGAIN first +CCDOFSTA CCDOFSTA first +CCDOFSTB CCDOFSTB first +CCDOFSTC CCDOFSTC first +CCDOFSTD CCDOFSTD first +CD1_1 CD1_1 first +CD1_2 CD1_2 first +CD2_1 CD2_1 first +CD2_2 CD2_2 first +CENTERA1 CENTERA1 first +CENTERA2 CENTERA2 first +CHINJECT CHINJECT multi +CRMASK CRMASK first +CRPIX1 CRPIX1 first +CRPIX2 CRPIX2 first +CRRADIUS CRRADIUS first +CRSIGMAS CRSIGMAS multi +CRSPLIT CRSPLIT first +CRTHRESH CRTHRESH first +CTEDIR CTEDIR multi +CTEIMAGE CTEIMAGE first +CTYPE1 CTYPE1 multi +CTYPE2 CTYPE2 multi +CRVAL1 CRVAL1 first +CRVAL2 CRVAL2 first +DATE-OBS DATE-OBS first +DEC_APER DEC_APER first +DELTATIM DELTATIM first +DIRIMAGE DIRIMAGE multi +EXPNAME EXPNAME first +EXTVER EXTVER first +FGSLOCK FGSLOCK multi +FILENAME FILENAME multi +FILETYPE FILETYPE multi +FILTER FILTER multi +FLASHCUR FLASHCUR multi +FLASHDUR FLASHDUR first +FLASHSTA FLASHSTA first +GYROMODE GYROMODE multi +INHERIT INHERIT first # maintains IRAF compatibility +INITGUES INITGUES multi +INSTRUME INSTRUME first +LINENUM LINENUM first +LTM1_1 LTM1_1 float_one +LTM2_2 LTM2_2 float_one +LTV1 LTV1 first +LTV2 LTV2 first +MEANEXP MEANEXP first +MEANFLSH MEANFLSH first +MTFLAG MTFLAG first +NCOMBINE NCOMBINE sum +NRPTEXP NRPTEXP first +NSAMP NSAMP first +OPUS_VER OPUS_VER first +ORIENTAT ORIENTAT first +P1_ANGLE P1_ANGLE first +P1_CENTR P1_CENTR multi +P1_FRAME P1_FRAME multi +P1_LSPAC P1_LSPAC first +P1_NPTS P1_NPTS first +P1_ORINT P1_ORINT first +P1_PSPAC P1_PSPAC first +P1_PURPS P1_PURPS multi +P1_SHAPE P1_SHAPE multi +P2_ANGLE P2_ANGLE first +P2_CENTR P2_CENTR multi +P2_FRAME P2_FRAME multi +P2_LSPAC P2_LSPAC first +P2_NPTS P2_NPTS first +P2_ORINT P2_ORINT first +P2_PSPAC P2_PSPAC first +P2_PURPS P2_PURPS multi +P2_SHAPE P2_SHAPE multi +PA_APER PA_APER zero +PA_V3 PA_V3 zero +PATTERN1 PATTERN1 multi +PATTERN2 PATTERN2 multi +PATTSTEP PATTSTEP first +PHOTBW PHOTBW mean +PHOTFLAM PHOTFLAM mean +PHOTFNU PHOTFNU mean +PHOTMODE PHOTMODE first +PHOTPLAM PHOTPLAM mean +PHOTZPT PHOTZPT mean +PODPSFF PODPSFF multi +PRIMESI PRIMESI multi +PROCTIME PROCTIME first +PROPAPER PROPAPER multi +PROPOSID PROPOSID first +QUALITY QUALITY multi +RA_APER RA_APER first +READNSEA READNSEA first +READNSEB READNSEB first +READNSEC READNSEC first +READNSED READNSED first +REFFRAME REFFRAME multi +ROOTNAME ROOTNAME first +ROUTTIME ROUTTIME first +SAACRMAP SAACRMAP multi +SAMP_SEQ SAMP_SEQ first +SAMPNUM SAMPNUM first +SAMPTIME SAMPTIME first +SAMPZERO SAMPZERO first +SCALENSE SCALENSE first +SCLAMP SCLAMP multi +SDQFLAGS SDQFLAGS first +SIZAXIS1 SIZAXIS1 first +SIZAXIS2 SIZAXIS2 first +SOFTERRS SOFTERRS sum +STDCFFF STDCFFF multi +STDCFFP STDCFFP multi +SUBARRAY SUBARRAY first +SUBTYPE SUBTYPE multi +T_SGSTAR T_SGSTAR multi +TARGNAME TARGNAME first +TDFTRANS TDFTRANS sum +TELESCOP TELESCOP first +TIME-OBS TIME-OBS first +WCSAXES WCSAXES first +WCSCDATE WCSCDATE first +WCSNAME WCSNAME first diff --git a/drizzlepac/pars/wfc3_single_header_hap.rules b/drizzlepac/pars/wfc3_single_header_hap.rules new file mode 100644 index 000000000..8e5fbbab4 --- /dev/null +++ b/drizzlepac/pars/wfc3_single_header_hap.rules @@ -0,0 +1,541 @@ +!VERSION = 1.1 +!INSTRUMENT = WFC3 +################################################################################ +# +# Header keyword rules +# +# Columns definitions: +# Column 1: header keyword from input header or '' +# Column 2: [optional] name of table column for recording values from +# keyword specified in the first column from each input image +# =or= name of keyword to be updated in output image header +# Column 3: [optional] function to use to create output header value +# (output keyword name must be specified in second column) +# +# Any line that starts with '' indicates that that keyword +# or set of keywords for that header section should be deleted from the +# output header. +# +# Supported functions: first, last, min, max, mean, sum, stddev, multi +# +# Any keyword without a function will be copied to a table column with the +# name given in the second column, or first column if only 1 column has been +# specified. These keywords will also be removed from the output header unless +# another rule for the same keyword (1st column) has been specified with a +# function named in the 3rd column. +# +# All keywords *not specified in this rules file* will be derived from the first +# input image's header and used unchanged to create the final output header(s). +# So, any keyword with a rule that adds that keyword to a table will be removed from +# the output headers unless additional rules are provided to specify what values +# should be kept in the header for that keyword. +## +# Final header output will use the same formatting and order of keywords defined +# by the first image's headers. +# +# Rules for headers from all image extensions can be included in the same +# file without regard for order, although keeping them organized by extension +# makes the file easier to maintain and update. +# +# The order of the rules will determine the order of the columns in the +# final output table. As a result, the rules for EXTNAME and EXTVER are +# associated with ROOTNAME, rather than the SCI header, in order to make +# rows of the table easier to identify. +# +# Comments appended to the end of a rule will be ignored when reading the +# rules. All comments start with '#'. +# +# +################################################################################ +# +# Table Keyword Rules +# +################################################################################ +ROOTNAME +EXTNAME +EXTVER +A_0_2 +A_0_3 +A_0_4 +A_1_1 +A_1_2 +A_1_3 +A_2_0 +A_2_1 +A_2_2 +A_3_0 +A_3_1 +A_4_0 +A_ORDER +APERTURE +ASN_ID +ASN_MTYP +ASN_TAB +ATODCORR +ATODGNA +ATODGNB +ATODGNC +ATODGND +ATODTAB +B_0_2 +B_0_3 +B_0_4 +B_1_1 +B_1_2 +B_1_3 +B_2_0 +B_2_1 +B_2_2 +B_3_0 +B_3_1 +B_4_0 +B_ORDER +BADINPDQ +BIASCORR +BIASFILE +BIASLEVA +BIASLEVB +BIASLEVC +BIASLEVD +BINAXIS1 +BINAXIS2 +BLEVCORR +BPIXTAB +BUNIT +CAL_VER +CCDAMP +CCDCHIP +CCDGAIN +CCDOFSAB +CCDOFSCD +CCDOFSTA +CCDOFSTB +CCDOFSTC +CCDOFSTD +CCDTAB +CD1_1 +CD1_2 +CD2_1 +CD2_2 +CENTERA1 +CENTERA2 +CHINJECT +COMPTAB +CRCORR +CRMASK +CRPIX1 +CRPIX2 +CRRADIUS +CRREJTAB +CRSIGMAS +CRSPLIT +CRTHRESH +CRVAL1 +CRVAL2 +CTEDIR +CTEIMAGE +CTYPE1 +CTYPE2 +DARKCORR +DARKFILE +DATAMAX +DATAMIN +DATE +DATE-OBS +DEC_APER +DEC_TARG +DELTATIM +DETECTOR +DFLTFILE +DGEOFILE +DIRIMAGE +DQICORR +DRIZCORR +EQUINOX +ERRCNT +EXPEND +EXPFLAG +EXPNAME +EXPSCORR +EXPSTART +EXPTIME +FGSLOCK +FILENAME +FILETYPE +FILLCNT +FILTER +FLASHCUR +FLASHDUR +FLASHSTA +FLATCORR +FLSHCORR +FLSHFILE +GOODMAX +GOODMEAN +GOODMIN +GRAPHTAB +GYROMODE +IDCSCALE +IDCTAB +IDCTHETA +IDCV2REF +IDCV3REF +IMAGETYP +INHERIT +INITGUES +INSTRUME +IRAF-TLM +LFLTFILE +LINENUM +LTM1_1 +LTM2_2 +LTV1 +LTV2 +MDRIZSKY +MDRIZTAB +MEANBLEV +MEANDARK +MEANEXP +MEANFLSH +MOONANGL +MTFLAG +NAXIS1 +NAXIS2 +NCOMBINE +NGOODPIX +NLINCORR +NLINFILE +NRPTEXP +NSAMP +OBSMODE +OBSTYPE +OCD1_1 +OCD1_2 +OCD2_1 +OCD2_2 +OCRPIX1 +OCRPIX2 +OCRVAL1 +OCRVAL2 +OCTYPE1 +OCTYPE2 +OCX10 +OCX11 +OCY10 +OCY11 +ONAXIS1 +ONAXIS2 +OORIENTA +OPUS_VER +ORIENTAT +ORIGIN +OSCNTAB +P1_ANGLE +P1_CENTR +P1_FRAME +P1_LSPAC +P1_NPTS +P1_ORINT +P1_PSPAC +P1_PURPS +P1_SHAPE +P2_ANGLE +P2_CENTR +P2_FRAME +P2_LSPAC +P2_NPTS +P2_ORINT +P2_PSPAC +P2_PURPS +P2_SHAPE +PA_APER +PA_V3 +PATTERN1 +PATTERN2 +PATTSTEP +PFLTFILE +PHOTBW +PHOTCORR +PHOTFLAM +PHOTFNU +PHOTMODE +PHOTPLAM +PHOTZPT +PODPSFF +POSTARG1 +POSTARG2 +PR_INV_F +PR_INV_L +PR_INV_M +PRIMESI +PROCTIME +PROPAPER +PROPOSID +QUALCOM1 +QUALCOM2 +QUALCOM3 +QUALITY +RA_APER +RA_TARG +READNSEA +READNSEB +READNSEC +READNSED +REFFRAME +REJ_RATE +ROUTTIME +RPTCORR +SAA_DARK +SAA_EXIT +SAA_TIME +SAACRMAP +SAMP_SEQ +SAMPNUM +SAMPTIME +SAMPZERO +SCALENSE +SCLAMP +SDQFLAGS +SHADCORR +SHADFILE +SHUTRPOS +SIMPLE +SIZAXIS1 +SIZAXIS2 +SKYSUB +SKYSUM +SNRMAX +SNRMEAN +SNRMIN +SOFTERRS +STDCFFF +STDCFFP +SUBARRAY +SUBTYPE +SUN_ALT +SUNANGLE +T_SGSTAR +TARGNAME +TDFTRANS +TELESCOP +TIME-OBS +UNITCORR +VAFACTOR +WCSAXES +WCSCDATE +ZOFFCORR +ZSIGCORR +WCSNAME +WCSTYPE +################################################################################ +# +# Header Keyword Rules REQUIRED for CAOM +# +################################################################################ +PROPOSID PROPOSID first +TARGNAME TARGNAME first +PR_INV_L PR_INV_L first +PR_INV_F PR_INV_F first +PR_INV_M PR_INV_M first +RA_TARG RA_TARG first +DEC_TARG DEC_TARG first +INSTRUME INSTRUME first +DETECTOR DETECTOR first +APERTURE APERTURE multi +FILTER FILTER first # May need to modify this as '-' combined value +EXPEND EXPEND max +EXPSTART EXPSTART min +EXPTIME TEXPTIME sum +EXPTIME EXPTIME sum +EXPSTART DATE-BEG min # convert value to iso format -- separately? +EXPEND DATE-END max # convert value to iso format -- separately? +IMAGETYP IMAGETYP first +OBSMODE OBSMODE multi +OBSTYPE OBSTYPE first +EQUINOX EQUINOX first +################################################################################ +# +# Header Keyword Rules +# +################################################################################ +APERTURE APERTURE multi +ASN_ID ASN_ID first +ASN_MTYP ASN_MTYP multi +ASN_TAB ASN_TAB multi +ATODCORR ATODCORR multi +ATODGNA ATODGNA first +ATODGNB ATODGNB first +ATODGNC ATODGNC first +ATODGND ATODGND first +ATODTAB ATODTAB multi +BADINPDQ BADINPDQ sum +BIASCORR BIASCORR multi +BIASFILE BIASFILE multi +BIASLEVA BIASLEVA first +BIASLEVB BIASLEVB first +BIASLEVC BIASLEVC first +BIASLEVD BIASLEVD first +BINAXIS1 BINAXIS1 first +BINAXIS2 BINAXIS2 first +BLEVCORR BLEVCORR multi +BPIXTAB BPIXTAB multi +BUNIT BUNIT first +CAL_VER CAL_VER first +CCDAMP CCDAMP first +CCDCHIP CCDCHIP first +CCDGAIN CCDGAIN first +CCDOFSTA CCDOFSTA first +CCDOFSTB CCDOFSTB first +CCDOFSTC CCDOFSTC first +CCDOFSTD CCDOFSTD first +CCDTAB CCDTAB multi +CD1_1 CD1_1 first +CD1_2 CD1_2 first +CD2_1 CD2_1 first +CD2_2 CD2_2 first +CENTERA1 CENTERA1 first +CENTERA2 CENTERA2 first +CHINJECT CHINJECT multi +COMPTAB COMPTAB multi +CRCORR CRCORR multi +CRMASK CRMASK first +CRPIX1 CRPIX1 first +CRPIX2 CRPIX2 first +CRRADIUS CRRADIUS first +CRREJTAB CRREJTAB multi +CRSIGMAS CRSIGMAS multi +CRSPLIT CRSPLIT first +CRTHRESH CRTHRESH first +CTEDIR CTEDIR multi +CTEIMAGE CTEIMAGE first +CTYPE1 CTYPE1 multi +CTYPE2 CTYPE2 multi +CRVAL1 CRVAL1 first +CRVAL2 CRVAL2 first +DARKCORR DARKCORR multi +DARKFILE DARKFILE multi +DATE-OBS DATE-OBS first +DEC_APER DEC_APER first +DELTATIM DELTATIM first +DFLTFILE DFLTFILE multi +DGEOFILE DGEOFILE multi +DIRIMAGE DIRIMAGE multi +DQICORR DQICORR multi +DRIZCORR DRIZCORR multi +EXPFLAG EXPFLAG multi +EXPNAME EXPNAME first +EXPSCORR EXPSCORR multi +EXTVER EXTVER first +FGSLOCK FGSLOCK multi +FILENAME FILENAME multi +FILETYPE FILETYPE multi +FILTER FILTER multi +FLASHCUR FLASHCUR multi +FLASHDUR FLASHDUR first +FLASHSTA FLASHSTA first +FLATCORR FLATCORR multi +FLSHCORR FLSHCORR multi +FLSHFILE FLSHFILE multi +GRAPHTAB GRAPHTAB multi +GYROMODE GYROMODE multi +IDCTAB IDCTAB multi +INHERIT INHERIT first # maintains IRAF compatibility +INITGUES INITGUES multi +INSTRUME INSTRUME first +LFLTFILE LFLTFILE multi +LINENUM LINENUM first +LTM1_1 LTM1_1 float_one +LTM2_2 LTM2_2 float_one +LTV1 LTV1 first +LTV2 LTV2 first +MDRIZTAB MDRIZTAB multi +MEANEXP MEANEXP first +MEANFLSH MEANFLSH first +MOONANGL MOONANGL first +MTFLAG MTFLAG first +NCOMBINE NCOMBINE sum +NLINCORR NLINCORR multi +NLINFILE NLINFILE multi +NRPTEXP NRPTEXP first +NSAMP NSAMP first +OPUS_VER OPUS_VER first +ORIENTAT ORIENTAT first +OSCNTAB OSCNTAB multi +P1_ANGLE P1_ANGLE first +P1_CENTR P1_CENTR multi +P1_FRAME P1_FRAME multi +P1_LSPAC P1_LSPAC first +P1_NPTS P1_NPTS first +P1_ORINT P1_ORINT first +P1_PSPAC P1_PSPAC first +P1_PURPS P1_PURPS multi +P1_SHAPE P1_SHAPE multi +P2_ANGLE P2_ANGLE first +P2_CENTR P2_CENTR multi +P2_FRAME P2_FRAME multi +P2_LSPAC P2_LSPAC first +P2_NPTS P2_NPTS first +P2_ORINT P2_ORINT first +P2_PSPAC P2_PSPAC first +P2_PURPS P2_PURPS multi +P2_SHAPE P2_SHAPE multi +PA_APER PA_APER first +PA_V3 PA_V3 first +PATTERN1 PATTERN1 multi +PATTERN2 PATTERN2 multi +PATTSTEP PATTSTEP first +PFLTFILE PFLTFILE multi +PHOTBW PHOTBW mean +PHOTCORR PHOTCORR multi +PHOTFLAM PHOTFLAM mean +PHOTFNU PHOTFNU mean +PHOTMODE PHOTMODE first +PHOTPLAM PHOTPLAM mean +PHOTZPT PHOTZPT mean +PODPSFF PODPSFF multi +PRIMESI PRIMESI multi +PROCTIME PROCTIME first +PROPAPER PROPAPER multi +PROPOSID PROPOSID first +QUALCOM1 QUALCOM1 multi +QUALCOM2 QUALCOM2 multi +QUALCOM3 QUALCOM3 multi +QUALITY QUALITY multi +RA_APER RA_APER first +READNSEA READNSEA first +READNSEB READNSEB first +READNSEC READNSEC first +READNSED READNSED first +REFFRAME REFFRAME multi +ROOTNAME ROOTNAME first +ROUTTIME ROUTTIME first +RPTCORR RPTCORR multi +SAACRMAP SAACRMAP multi +SAMP_SEQ SAMP_SEQ first +SAMPNUM SAMPNUM first +SAMPTIME SAMPTIME first +SAMPZERO SAMPZERO first +SCALENSE SCALENSE first +SCLAMP SCLAMP multi +SDQFLAGS SDQFLAGS first +SHADCORR SHADCORR multi +SHADFILE SHADFILE multi +SIZAXIS1 SIZAXIS1 first +SIZAXIS2 SIZAXIS2 first +SOFTERRS SOFTERRS sum +STDCFFF STDCFFF multi +STDCFFP STDCFFP multi +SUBARRAY SUBARRAY first +SUBTYPE SUBTYPE multi +SUNANGLE SUNANGLE first +T_SGSTAR T_SGSTAR multi +TARGNAME TARGNAME first +TDFTRANS TDFTRANS sum +TELESCOP TELESCOP first +TIME-OBS TIME-OBS first +UNITCORR UNITCORR multi +WCSAXES WCSAXES first +WCSCDATE WCSCDATE first +WCSNAME WCSNAME first +ZOFFCORR ZOFFCORR multi +ZSIGCORR ZSIGCORR multi diff --git a/drizzlepac/photeq.py b/drizzlepac/photeq.py index 2b1e1d67a..c9da12a81 100644 --- a/drizzlepac/photeq.py +++ b/drizzlepac/photeq.py @@ -12,7 +12,6 @@ __all__ = ['photeq'] __taskname__ = 'drizzlepac.photeq' __version__ = '0.2' -__version_date__ = '06-Nov-2015' __author__ = 'Mihai Cara' # HISTORY: @@ -242,7 +241,7 @@ def photeq(files='*_flt.fits', sciext='SCI', errext='ERR', # BEGIN: _mlinfo("***** {0} started on {1}".format(__taskname__, runtime_begin)) - _mlinfo(" Version {0} ({1})".format(__version__, __version_date__)) + _mlinfo(" Version {0} ".format(__version__)) # check that extension names are strings (or None for error ext): if sciext is None: @@ -639,9 +638,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/pixreplace.py b/drizzlepac/pixreplace.py index 348b28c1f..619ad9724 100644 --- a/drizzlepac/pixreplace.py +++ b/drizzlepac/pixreplace.py @@ -63,7 +63,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.1' -__version_date__ = '27-May-2015' __taskname__ = 'pixreplace' @@ -159,9 +158,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/pixtopix.py b/drizzlepac/pixtopix.py index 84e10d0e2..9428ad932 100644 --- a/drizzlepac/pixtopix.py +++ b/drizzlepac/pixtopix.py @@ -102,7 +102,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.2' -__version_date__ = '19-Mar-2019' __taskname__ = 'pixtopix' @@ -257,9 +256,7 @@ def getHelpAsString(docstring=False, show_ver=True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = '\n' + ' '.join( - [__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + '\n\n' + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/pixtosky.py b/drizzlepac/pixtosky.py index 051efc725..d5c0424cd 100644 --- a/drizzlepac/pixtosky.py +++ b/drizzlepac/pixtosky.py @@ -93,7 +93,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.1' -__version_date__ = '20-Jan-2011' __taskname__ = 'pixtosky' @@ -242,9 +241,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/quickDeriv.py b/drizzlepac/quickDeriv.py index 11a39f269..e8beff066 100644 --- a/drizzlepac/quickDeriv.py +++ b/drizzlepac/quickDeriv.py @@ -13,7 +13,7 @@ # Version 0.1.0: created -- CJH # import numpy as np -from .version import * +from . import __version__ def qderiv(array): # TAKE THE ABSOLUTE DERIVATIVE OF A NUMARRY OBJECT """Take the absolute derivate of an image in memory.""" diff --git a/drizzlepac/refimagefindpars.py b/drizzlepac/refimagefindpars.py index b176c1534..d6b8766f5 100644 --- a/drizzlepac/refimagefindpars.py +++ b/drizzlepac/refimagefindpars.py @@ -8,7 +8,7 @@ import os from stsci.tools import teal from . import util -from .version import * +from . import __version__ __taskname__ = 'drizzlepac.refimagefindpars' @@ -48,9 +48,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/regfilter.py b/drizzlepac/regfilter.py index 907045f39..9741cd8c7 100644 --- a/drizzlepac/regfilter.py +++ b/drizzlepac/regfilter.py @@ -8,7 +8,6 @@ """ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.1' -__version_date__ = '17-Nov-2013' __author__ = 'Mihai Cara' diff --git a/drizzlepac/resetbits.py b/drizzlepac/resetbits.py index 9fe79c3fa..3d59f1b4b 100644 --- a/drizzlepac/resetbits.py +++ b/drizzlepac/resetbits.py @@ -75,7 +75,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '1.0.1' -__version_date__ = '23-March-2017' log = logutil.create_logger(__name__, level=logutil.logging.NOTSET) @@ -185,7 +184,7 @@ def main(): if (help): print(__doc__) - print("\t", __version__+'('+__version_date__+')') + print("\t", __version__) else: reset_dq_bits(args[0],args[1],args[2], args[3]) @@ -229,9 +228,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/runastrodriz.py b/drizzlepac/runastrodriz.py index f89205a03..e8d0b28eb 100755 --- a/drizzlepac/runastrodriz.py +++ b/drizzlepac/runastrodriz.py @@ -123,7 +123,6 @@ # Local variables __version__ = "2.4.0" -__version_date__ = "(17-Dec-2021)" # Implement WIN specific check RM_LOGFILES = False if sys.platform.startswith('win') else True @@ -161,7 +160,7 @@ # default marker for trailer files -__trlmarker__ = '*** astrodrizzle Processing Version ' + __version__ + __version_date__ + '***\n' +__trlmarker__ = '*** astrodrizzle Processing Version ' + __version__ + '***\n' envvar_bool_dict = {'off': False, 'on': True, 'no': False, 'yes': True, 'false': False, 'true': True} envvar_dict = {'off': 'off', 'on': 'on', 'yes': 'on', 'no': 'off', 'true': 'on', 'false': 'off'} @@ -999,6 +998,8 @@ def verify_alignment(inlist, calfiles, calfiles_flc, trlfile, if align_update_files and align_table: # Apply headerlets from alignment to FLT version of the files for fltfile, flcfile in zip(align_update_files, alignfiles): + # Update non-headerlet-based keywords in fltfile + _update_wcs_fit_keywords(fltfile, flcfile) row = align_table[align_table['imageName'] == flcfile] headerlet_file = row['headerletFile'][0] if headerlet_file not in ["None", '']: @@ -1677,6 +1678,45 @@ def update_active_wcs(filename, wcsname, logfile=None): if logfile: _updateTrlFile(logfile, update_msg) +def _update_wcs_fit_keywords(fltfile, flcfile): + """Update the header of the FLT file with the a posteriori fit results""" + fit_kws_sci = [('RMS_RA', -1.0), ('RMS_DEC', -1.0), + ('CRDER1', -1.0), ('CRDER2', -1.0), + ('NMATCHES', 0), ('FITGEOM', 'N/A'), + ('HDRNAME', '')] + + hdulist = fits.open(fltfile, mode='update') + hdulist_flc = fits.open(flcfile) # source header + + if 'HISTORY' in hdulist[0].header: + after_kw = None + before_kw = 'HISTORY' + elif 'ASN_MTYP' in hdulist[0].header: + after_kw = 'ASN_MTYP' + before_kw = None + else: + after_kw = hdulist[0].header.cards[-1][0] + before_kw = None + + hdulist[0].header.set('UPWCSVER', value=hdulist_flc[0].header['UPWCSVER'], + comment="Version of STWCS used to update the WCS", + after=after_kw, before=before_kw) + hdulist[0].header.set('PYWCSVER', value=hdulist_flc[0].header['PYWCSVER'], + comment="Version of Astropy used to update the WCS", + after='UPWCSVER') + + num_sci_ext = amutils.countExtn(hdulist) + for extnum in range(1, num_sci_ext+1): + sci_extn = ('SCI', extnum) + for kw in fit_kws_sci: + src_hdr = hdulist_flc[sci_extn].header + hdulist[sci_extn].header.set(kw[0], value=src_hdr[kw[0]], after='WCSNAME') + + hdulist.flush() + hdulist.close() + hdulist_flc.close() + del hdulist + del hdulist_flc def _lowerAsn(asnfile): """ Create a copy of the original asn file and change @@ -1885,7 +1925,7 @@ def main(): newdir = args[-1] if (help): print(__doc__) - print("\t", __version__ + '(' + __version_date__ + ')') + print("\t", __version__) else: try: process(args[0], force=force, newpath=newdir, num_cores=num_cores, diff --git a/drizzlepac/runmultihap.py b/drizzlepac/runmultihap.py index 29c212803..b7795f22b 100755 --- a/drizzlepac/runmultihap.py +++ b/drizzlepac/runmultihap.py @@ -31,7 +31,6 @@ # Local variables __version__ = "0.1.1" -__version_date__ = "(16-Oct-2019)" # # These lines (or something similar) will be needed in the HAP processing code # diff --git a/drizzlepac/runsinglehap.py b/drizzlepac/runsinglehap.py index 1beaecb62..3c0289916 100755 --- a/drizzlepac/runsinglehap.py +++ b/drizzlepac/runsinglehap.py @@ -37,7 +37,6 @@ # Local variables __version__ = "0.1.1" -__version_date__ = "(16-Oct-2019)" # # These lines (or something similar) will be needed in the HAP processing code # diff --git a/drizzlepac/sky.py b/drizzlepac/sky.py index 3ab09eb74..577938f50 100644 --- a/drizzlepac/sky.py +++ b/drizzlepac/sky.py @@ -26,7 +26,7 @@ from .imageObject import imageObject from . import util -from .version import * +from . import __version__ __taskname__= "drizzlepac.sky" #looks in drizzlepac for sky.cfg @@ -861,9 +861,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/skytopix.py b/drizzlepac/skytopix.py index f306b0a07..666e36c8f 100644 --- a/drizzlepac/skytopix.py +++ b/drizzlepac/skytopix.py @@ -81,7 +81,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.1' -__version_date__ = '25-Feb-2011' __taskname__ = 'skytopix' @@ -213,9 +212,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/staticMask.py b/drizzlepac/staticMask.py index 262bf3a32..db8692b22 100644 --- a/drizzlepac/staticMask.py +++ b/drizzlepac/staticMask.py @@ -11,7 +11,6 @@ """ import os import sys -from distutils.version import LooseVersion import numpy as np from stsci.tools import fileutil, teal, logutil @@ -317,9 +316,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/tweakback.py b/drizzlepac/tweakback.py index a83c679fa..be0183dd1 100644 --- a/drizzlepac/tweakback.py +++ b/drizzlepac/tweakback.py @@ -32,7 +32,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '0.4.1' -__version_date__ = '13-July-2020' log = logutil.create_logger(__name__, level=logutil.logging.NOTSET) @@ -132,8 +131,8 @@ def tweakback(drzfile, input=None, origwcs = None, stwcs.wcsutil.altwcs: Alternate WCS implementation """ - print("TweakBack Version {:s}({:s}) started at: {:s}\n" - .format(__version__,__version_date__,util._ptime()[0])) + print("TweakBack Version {:s} started at: {:s}\n" + .format(__version__, util._ptime()[0])) # Interpret input list/string into list of filename(s) fltfiles = parseinput.parseinput(input)[0] @@ -434,9 +433,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/tweakreg.py b/drizzlepac/tweakreg.py index fffcda73f..cc675e96c 100644 --- a/drizzlepac/tweakreg.py +++ b/drizzlepac/tweakreg.py @@ -17,14 +17,13 @@ from . import util -# __version__ and __version_date__ are defined here, prior to the importing -# of the modules below, so that those modules can use the values -# from these variable definitions, allowing the values to be designated +# __version__ is defined here, prior to the importing +# of the modules below, so that those modules can use the value +# from this variable definition, allowing the value to be designated # in one location only. # # This is specifically NOT intended to match the package-wide version information. __version__ = '1.4.7' -__version_date__ = '18-April-2018' from . import tweakutils from . import imgclasses @@ -96,8 +95,8 @@ def run(configobj): """ Primary Python interface for image registration code This task replaces 'tweakshifts' """ - print('TweakReg Version %s(%s) started at: %s \n'%( - __version__,__version_date__,util._ptime()[0])) + print('TweakReg Version %s started at: %s \n'%( + __version__, util._ptime()[0])) util.print_pkg_versions() # make sure 'updatewcs' is set to False when running from GUI or if missing @@ -878,9 +877,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/updatehdr.py b/drizzlepac/updatehdr.py index ccdff6b23..59a7a63ae 100644 --- a/drizzlepac/updatehdr.py +++ b/drizzlepac/updatehdr.py @@ -22,7 +22,6 @@ from . import linearfit __version__ = '0.4.0' -__version_date__ = '13-July-2020' log = logutil.create_logger(__name__, level=logutil.logging.NOTSET) diff --git a/drizzlepac/updatenpol.py b/drizzlepac/updatenpol.py index febca79fe..f43bc4847 100755 --- a/drizzlepac/updatenpol.py +++ b/drizzlepac/updatenpol.py @@ -69,7 +69,6 @@ # This is specifically NOT intended to match the package-wide version information. __version__ = '1.1.0' -__version_date__ = '16-Aug-2011' import os,sys,shutil @@ -156,7 +155,7 @@ def update(input,refdir="jref$",local=None,interactive=False,wcsupdate=True): that can be sorted out later if we get into that situation at all. """ - print('UPDATENPOL Version',__version__+'('+__version_date__+')') + print(f'UPDATENPOL Version {__version__}') # expand (as needed) the list of input files files,fcol = parseinput.parseinput(input) @@ -323,7 +322,7 @@ def main(): args.append('jref$') if (help): print(__doc__) - print("\t", __version__+'('+__version_date__+')') + print(f"\t{__version__}") else: update(args[:-1],args[-1],local=local,interactive=interactive) @@ -395,9 +394,7 @@ def getHelpAsString(docstring = False, show_ver = True): if docstring or (not docstring and not os.path.exists(htmlfile)): if show_ver: - helpString = os.linesep + \ - ' '.join([__taskname__, 'Version', __version__, - ' updated on ', __version_date__]) + 2*os.linesep + helpString = f"\n{__taskname__} Version {__version__}\n" else: helpString = '' if os.path.exists(helpfile): diff --git a/drizzlepac/util.py b/drizzlepac/util.py index 981eda12c..7611c043d 100644 --- a/drizzlepac/util.py +++ b/drizzlepac/util.py @@ -26,7 +26,7 @@ from stwcs import wcsutil from stwcs.wcsutil import altwcs -from .version import * +#from .version import * __fits_version__ = astropy.__version__ __numpy_version__ = np.__version__ diff --git a/drizzlepac/wfc3Data.py b/drizzlepac/wfc3Data.py index 2d632be91..62412347d 100644 --- a/drizzlepac/wfc3Data.py +++ b/drizzlepac/wfc3Data.py @@ -7,7 +7,6 @@ """ from stsci.tools import fileutil -from nictools import readTDD from .imageObject import imageObject import numpy as np diff --git a/pyproject.toml b/pyproject.toml index 12ecb5d8d..911470c19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,9 @@ [build-system] +requires = ["setuptools>=42.0", + "setuptools_scm[toml]>=3.4", + "wheel", + "oldest-supported-numpy", + "astropy<5.0"] build-backend = "setuptools.build_meta" -requires = ["setuptools>=30.3.0", "wheel", "oldest-supported-numpy", "astropy"] -[tool.stsci-bot] - -[tool.stsci-bot.milestones] - -enabled = false - -[tool.stsci-bot.changelog_checker] - -filename = "CHANGELOG.rst" +[tool.setuptools_scm] diff --git a/setup.cfg b/setup.cfg index 6361972bf..8a323d09f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,79 @@ +[metadata] +name = drizzlepac +description = HST image combination using drizzle +long_description = 'drizzle tools: combines astronomical images, including ' + 'modeling distortion, removing cosmic rays, and generally ' + 'improving fidelity of data in the final image', +long_description_content_type = text/plain +author = Megan Sosey, Warren Hack, Christopher Hanley, Chris Sontag, Mihai Cara +license = BSD-3-Clause +url = https://github.com/spacetelescope/drizzlepac +project_urls = + Tracker = https://github.com/spacetelescope/drizzlepac/issues + Documentation = https://drizzlepac.readthedocs.io/en/latest + Source Code = https://github.com/spacetelescope/drizzlepac +classifiers = + Intended Audience :: Science/Research + Topic :: Scientific/Engineering :: Astronomy + License :: OSI Approved :: BSD License + Operating System :: MacOS :: MacOS X + Operating System :: POSIX + Programming Language :: Python :: 3 + Programming Language :: Python :: 3 :: Only + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 +[options] +zip_safe = False +python_requires = >=3.8 +setup_requires= + setuptool_scm +install_requires = + astropy<5.0.0 + fitsblender + scipy + matplotlib + stsci.tools>=4.0 + stsci.image>=2.3.4 + stsci.imagestats + stsci.skypac>=1.0.7 + stsci.stimage + stwcs>=1.5.3 + tweakwcs>=0.7.2 + stregion + requests + scikit-learn>=0.20 + # HAP-pipeline specific: + bokeh + pandas + spherical_geometry>=1.2.22 + astroquery>=0.4 + photutils>=1.0.0 + lxml + PyPDF2 + scikit-image>=0.14.2 + tables + +[options.extras_require] + +test = + ci_watson + crds + pytest + pytest-remotedata + +docs = + sphinx + stsci_rtd_theme + packaging +[options.entry_points] +console_scripts = + mdriz = drizzlepac.mdriz:main + resetbits=drizzlepac.resetbits:main + updatenpol=drizzlepac.updatenpol:main + runastrodriz=drizzlepac.runastrodriz:main + runsinglehap=drizzlepac.runsinglehap:main + runmultihap=drizzlepac.runmultihap:main + [flake8] count = True doctests = True @@ -9,26 +85,19 @@ exclude = doc/source/conf.py, build, dist, - relic ignore = E501, E402, F403 [tool:pytest] -minversion = 3 +minversion = 5 #addopts = -norecursedirs = .eggs build relic +norecursedirs = .eggs build junit_family = xunit2 inputs_root = drizzlepac results_root = drizzlepac-results -[bdist_wheel] -# This flag says that the code is written to work on both Python 2 and Python -# 3. If at all possible, it is good practice to do this. If you cannot, you -# will need to generate wheels for each Python version that you support. -universal=1 - [build_sphinx] builder = html warning-is-error = 1 diff --git a/setup.py b/setup.py index 2f3ebae26..1c9ebace3 100755 --- a/setup.py +++ b/setup.py @@ -1,6 +1,5 @@ #!/usr/bin/env python -import os -import pkgutil +import os.path import sys import numpy @@ -8,31 +7,8 @@ from glob import glob from setuptools import setup, find_packages, Extension -from subprocess import check_call, CalledProcessError -if not pkgutil.find_loader('relic'): - relic_local = os.path.exists('relic') - relic_submodule = (relic_local and - os.path.exists('.gitmodules') and - not os.listdir('relic')) - try: - if relic_submodule: - check_call(['git', 'submodule', 'update', '--init', '--recursive']) - elif not relic_local: - check_call(['git', 'clone', 'https://github.com/spacetelescope/relic.git']) - - sys.path.insert(1, 'relic') - except CalledProcessError as e: - print(e) - exit(1) - -import relic.release - -PACKAGENAME = 'drizzlepac' -version = relic.release.get_info() -relic.release.write_template(version, PACKAGENAME) - # Setup C module include directories include_dirs = [] numpy_includes = [numpy.get_include()] @@ -52,63 +28,9 @@ ('__STDC__', 1) ] -TESTS_REQUIRE = [ - 'ci_watson', - 'crds', - 'scikit-image>=0.14.2', - 'pytest', - 'pytest-remotedata' -] - setup( - name=PACKAGENAME, - version=version.pep386, - author='Megan Sosey, Warren Hack, Christopher Hanley, ' - 'Chris Sontag, Mihai Cara', - author_email='help@stsci.edu', - description='drizzle tools: combines astronomical images, including ' - 'modeling distortion, removing cosmic rays, and generally ' - 'improving fidelity of data in the final image', - url='https://github.com/spacetelescope/drizzlepac', - classifiers=[ - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: BSD License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering :: Astronomy', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - python_requires='>=3.6', - install_requires=[ - 'astropy<5.0.0', - 'fitsblender', - 'nictools', - 'numpy>=1.19', - 'scipy', - 'matplotlib', - 'scikit-learn>=0.20', - 'stsci.tools>=4.0', - 'stsci.image>=2.3.4', - 'stsci.imagestats', - 'stsci.skypac>=1.0.7', - 'stsci.stimage', - 'stwcs>=1.5.3', - 'tweakwcs>=0.7.2', - 'stregion', - 'requests', - # HAP-pipeline specific: - "spherical_geometry>=1.2.22", - 'astroquery>=0.4', - 'bokeh', - 'pandas', - 'photutils>=1.0.0', - 'lxml', - 'PyPDF2', - 'scikit-image', - ], - extras_require={ - 'test': TESTS_REQUIRE - }, + use_scm_version=True, + setup_requires=["setuptools_scm"], packages=find_packages(), package_data={ '': ['README.md', 'LICENSE.txt'], @@ -139,25 +61,10 @@ 'htmlhelp/_*/*/*', ] }, - entry_points={ - 'console_scripts': [ - 'mdriz=drizzlepac.mdriz:main', - 'resetbits=drizzlepac.resetbits:main', - 'updatenpol=drizzlepac.updatenpol:main', - 'runastrodriz=drizzlepac.runastrodriz:main', - 'runsinglehap=drizzlepac.runsinglehap:main', - 'runmultihap=drizzlepac.runmultihap:main' - ], - }, ext_modules=[ Extension('drizzlepac.cdriz', glob('src/*.c'), include_dirs=include_dirs, define_macros=define_macros), ], - project_urls={ - 'Bug Reports': 'https://github.com/spacetelescope/drizzlepac/issues/', - 'Source': 'https://github.com/spacetelescope/drizzlepac/', - 'Help': 'https://hsthelp.stsci.edu/', - }, ) diff --git a/src/cdrizzlebox.c b/src/cdrizzlebox.c index 58d927cde..c3d29c894 100644 --- a/src/cdrizzlebox.c +++ b/src/cdrizzlebox.c @@ -322,16 +322,18 @@ update_context(struct driz_param_t* p, const integer_t ii, const integer_t jj, inline_macro static void update_data(struct driz_param_t* p, const integer_t ii, const integer_t jj, const float d, const float vc, const float dow) { - const double vc_plus_dow = vc + dow; + double vc_plus_dow; + + if (dow == 0.0f) return; + + vc_plus_dow = vc + dow; /* Just a simple calculation without logical tests */ if (vc == 0.0) { *output_data_ptr(p, ii, jj) = d; } else { - if (vc_plus_dow != 0.0) { - *output_data_ptr(p, ii, jj) = - (*output_data_ptr(p, ii, jj) * vc + dow * d) / (vc_plus_dow); - } + *output_data_ptr(p, ii, jj) = + (*output_data_ptr(p, ii, jj) * vc + dow * d) / (vc_plus_dow); } *output_counts_ptr(p, ii, jj) = vc_plus_dow; diff --git a/tests/drizzle/test_drizzle.py b/tests/drizzle/test_drizzle.py new file mode 100644 index 000000000..c6c5daf8a --- /dev/null +++ b/tests/drizzle/test_drizzle.py @@ -0,0 +1,52 @@ +import pytest + +import numpy as np +from astropy import wcs + +from drizzlepac import cdriz + +@pytest.mark.parametrize( + 'kernel', ['square', 'point', 'turbo', 'gaussian', 'lanczos3'] +) +def test_zero_input_weight(kernel): + """ + Test do_driz square kernel with grid + """ + # initialize input: + insci = np.ones((200, 400), dtype=np.float32) + inwht = np.ones((200, 400), dtype=np.float32) + inwht[:, 150:155] = 0 + + # initialize output: + outsci = np.zeros((210, 410), dtype=np.float32) + outwht = np.zeros((210, 410), dtype=np.float32) + outctx = np.zeros((210, 410), dtype=np.int32) + + # define coordinate mapping: + w1 = wcs.WCS() + w1.wcs.ctype = ['RA---CAR', 'DEC--CAR'] + w1.wcs.crpix = [201, 101] + w1.wcs.crval = [10, 10] + w1.wcs.cdelt = [1e-3, 1e-3] + w1.wcs.set() + + w2 = wcs.WCS() + w2.wcs.ctype = ['RA---CAR', 'DEC--CAR'] + w2.wcs.crpix = [206, 106] + w2.wcs.crval = [10, 10] + w2.wcs.cdelt = [1e-3, 1e-3] + w2.wcs.set() + + mapping = cdriz.DefaultWCSMapping(w1, w2, 400, 200, 1) + + # resample: + cdriz.tdriz( + insci, inwht, outsci, outwht, + outctx, 1, 0, 1, 1, 200, + 1.0, 1.0, 1.0, 'center', 1.0, + kernel, 'cps', 1.0, 1.0, + 'INDEF', 0, 0, 1, mapping + ) + + # check that no pixel with 0 weight has any counts: + assert np.allclose(np.sum(np.abs(outsci[(outwht == 0)])), 0) diff --git a/tests/hap/test_acs_hrc_sbc_input.out b/tests/hap/acs_hrc_sbc_input.out similarity index 100% rename from tests/hap/test_acs_hrc_sbc_input.out rename to tests/hap/acs_hrc_sbc_input.out diff --git a/tests/hap/template_svm_demo.py b/tests/hap/template_svm_demo.py index 8e5205db1..8a8c5babc 100644 --- a/tests/hap/template_svm_demo.py +++ b/tests/hap/template_svm_demo.py @@ -165,14 +165,19 @@ def test_svm_wcs(gather_output_data): assert len(set(wcsnames)) == 1, f"WCSNAMES are not all the same: {wcsnames}" -def test_svm_cat_sources(gather_output_data): - # Check the output catalogs should contain > 0 measured sources - cat_files = [files for files in gather_output_data if files.lower().endswith("-cat.ecsv")] +# Due to the way the catalogs are filtered, check the size of the total catalog and one of the filter +# catalogs separately. The total catalog has the row removed for each source where the constituent +# filter catalogs *ALL* have flag>5. +def test_svm_point_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("point-cat.ecsv")] valid_tables = {} - for cat in cat_files: + for cat in tdp_files: table_length = len(ascii.read(cat, format="ecsv")) - print("\ntest_svm_cat_sources. Number of sources in catalog {} is {}.".format(cat, table_length)) + print("\ntest_svm_point_total_cat. Number of sources in catalog {} is {}.".format(cat, table_length)) valid_tables[cat] = table_length > 0 bad_tables = [cat for cat in cat_files if not valid_tables[cat]] assert len(bad_tables) == 0, f"Catalog file(s) {bad_tables} is/are unexpectedly empty" + + diff --git a/tests/hap/test_pipeline.py b/tests/hap/test_pipeline.py index c14e9f866..2b104cb05 100644 --- a/tests/hap/test_pipeline.py +++ b/tests/hap/test_pipeline.py @@ -122,7 +122,7 @@ class TestSingleton(BaseWFC3Pipeline): 'dataset_names', ['iaaua1n4q', 'iacs01t4q'] ) - @pytest.mark.skip + #@pytest.mark.skip def test_astrometric_singleton(self, dataset_names): """ Tests pipeline-style processing of a singleton exposure using runastrodriz. """ diff --git a/tests/hap/test_svm_canary.py b/tests/hap/test_svm_canary.py new file mode 100644 index 000000000..a646e9d9a --- /dev/null +++ b/tests/hap/test_svm_canary.py @@ -0,0 +1,155 @@ +""" This module tests full pipeline SVM processing as a demonstration template. + +""" +import datetime +import glob +import os +import pytest +import numpy as np + +from drizzlepac.haputils import astroquery_utils as aqutils +from drizzlepac import runsinglehap +from astropy.io import fits, ascii +from pathlib import Path + +""" + test_svm_demo.py + + This test file can be executed in the following manner: + $ pytest -s --basetemp=/internal/hladata/yourUniqueDirectoryHere test_svm.py >& test_svm.log & + $ tail -f test_svm.log + * Note: When running this test, the `--basetemp` directory should be set to a unique + existing directory to avoid deleting previous test output. + * The POLLER_FILE exists in the tests/hap directory. + +""" + +WCS_SUB_NAME = "FIT_SVM_GAIA" +POLLER_FILE = "acs_hrc_sbc_input.out" + +def read_csv_for_filenames(): + # Read the CSV poller file residing in the tests directory to extract the individual visit FLT/FLC filenames + path = os.path.join(os.path.dirname(__file__), POLLER_FILE) + table = ascii.read(path, format="no_header") + filename_column = table.colnames[0] + filenames = list(table[filename_column]) + print("\nread_csv_for_filenames. Filesnames from poller: {}".format(filenames)) + + return filenames + + +def gather_data_for_processing(tmp_path_factory): + # create working directory specified for the test + curdir = tmp_path_factory.mktemp(os.path.basename(__file__)) + os.chdir(curdir) + + # Establish FLC/FLT lists and obtain the requested data + flc_flag = "" + flt_flag = "" + # In order to obtain individual FLC or FLT images from MAST (if the files are not reside on disk) which + # may be part of an ASN, use only IPPPSS with a wildcard. The unwanted images have to be removed + # after-the-fact. + filenames = read_csv_for_filenames() + + for fn in filenames: + if fn.lower().endswith("flc.fits") and flc_flag == "": + flc_flag = fn[0:6] + "*" + elif fn.lower().endswith("flt.fits") and flt_flag == "": + flt_flag = fn[0:6] + "*" + + # If both flags have been set, then break out the loop early. It may be + # that all files have to be checked which means the for loop continues + # until its natural completion. + if flc_flag and flt_flag: + break + + # Get test data through astroquery - only retrieve the pipeline processed FLC and/or FLT files + # (e.g., j*_flc.fits) as necessary. The logic here and the above for loop is an attempt to + # avoid downloading too many images which are not needed for processing. + flcfiles = [] + fltfiles = [] + if flc_flag: + flcfiles = aqutils.retrieve_observation(flc_flag, suffix=["FLC"], product_type="pipeline") + if flt_flag: + fltfiles = aqutils.retrieve_observation(flt_flag, suffix=["FLT"], product_type="pipeline") + + flcfiles.extend(fltfiles) + + # Keep only the files which exist in BOTH lists for processing + files_to_process = set(filenames).intersection(set(flcfiles)) + + # Identify unwanted files from the download list and remove from disk + files_to_remove = set(filenames).symmetric_difference(set(flcfiles)) + try: + for ftr in files_to_remove: + os.remove(ftr) + except Exception as x_cept: + print("") + print("Exception encountered: {}.".format(x_cept)) + print("The file {} could not be deleted from disk. ".format(ftr)) + print("Remove files which are not used for processing from disk manually.") + + print("\ngather_data_for_processing. Gathered data: {}".format(files_to_process)) + + return list(files_to_process) + + +def gather_output_data(manifest_filename): + # Determine the filenames of all the output files from the manifest + print(f"\nManifest Filename: {manifest_filename}") + files = [] + with open(manifest_filename, 'r') as fout: + for line in fout.readlines(): + files.append(line.rstrip('\n')) + print("\ngather_output_data. Output data files: {}".format(files)) + + return files + + +def construct_manifest_filename(filenames): + # Construct the output manifest filename from input file keywords + inst = fits.getval(filenames[0], "INSTRUME", ext=0).lower() + root = fits.getval(filenames[0], "ROOTNAME", ext=0).lower() + tokens_tuple = (inst, root[1:4], root[4:6], "manifest.txt") + manifest_filename = "_".join(tokens_tuple) + print("\nconstruct_manifest_filename. Manifest filename: {}".format(manifest_filename)) + + return manifest_filename + + +def test_driver(tmp_path_factory): + # Act: Process the input data by executing runsinglehap - time consuming activity + + current_dt = datetime.datetime.now() + print(str(current_dt)) + + # Read the "poller file" and download the input files, as necessary + input_names = gather_data_for_processing(tmp_path_factory) + + # Construct the manifest filename for later + manifest_filename = construct_manifest_filename(input_names) + + # Run the SVM processing + path = os.path.join(os.path.dirname(__file__), POLLER_FILE) + try: + status = runsinglehap.perform(path, log_level="debug") + + output_files = gather_output_data(manifest_filename) + + # Check the output primary WCSNAME includes FIT_SVM_GAIA as part of the string value + tdp_files = [files for files in output_files if + files.lower().find("total") > -1 and files.lower().endswith(".fits")] + + for tdp in tdp_files: + wcsname = fits.getval(tdp, "WCSNAME", ext=1).upper() + print("\ntest_svm_wcs. WCSNAME: {} Output file: {}".format(wcsname, tdp)) + assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp}." + + # Catch anything that happens and report it. This is meant to catch unexpected errors and + # generate sufficient output exception information so algorithmic problems can be addressed. + except Exception as except_details: + print(except_details) + pytest.fail("\nsvm_setup. Exception Visit: {}\n", path) + + current_dt = datetime.datetime.now() + print(str(current_dt)) diff --git a/tests/hap/test_svm_hrcsbc.py b/tests/hap/test_svm_hrcsbc.py index 3e85b97a6..13fc35f01 100644 --- a/tests/hap/test_svm_hrcsbc.py +++ b/tests/hap/test_svm_hrcsbc.py @@ -25,11 +25,14 @@ """ WCS_SUB_NAME = "FIT_SVM_GAIA" -POLLER_FILE = "test_acs_hrc_sbc_input.out" +POLLER_FILE = "acs_hrc_sbc_input.out" # Gather expected values for pass/fail criteria here -expected_point_sources = {'hrc': 225, 'sbc': 72} -expected_seg_sources = {'hrc': 680, 'sbc': 264} +expected_total_point_sources = {'hrc': 268, 'sbc': 65} +expected_total_segment_sources = {'hrc': 642, 'sbc': 283} +expected_filter_point_sources = {'hrc': 269, 'sbc': 208} +expected_filter_segment_sources = {'hrc': 642, 'sbc': 399} +tolerance = 0.25 @pytest.fixture(scope="module") @@ -198,32 +201,39 @@ def test_svm_empty_cats(gather_output_data): assert len(bad_tables) == 0, f"Catalog file(s) {bad_tables} is/are unexpectedly empty" -@pytest.mark.skip -def test_svm_point_cats(gather_output_data): - # Check that the point catalogs have the expected number of sources - cat_files = [files for files in gather_output_data if files.lower().endswith("point-cat.ecsv")] +# Due to the way the catalogs are filtered, check the size of the total catalog and one of the filter +# catalogs separately. The total catalog has the row removed for each source where the constituent +# filter catalogs *ALL* have flag>5 for the source. Rows are NOT removed from the filter table based on +# flag values. +def test_svm_point_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance + print("\ntest_svm_point_total_cat.") + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("point-cat.ecsv")] - num_sources = {cat:len(ascii.read(cat, format="ecsv")) for cat in cat_files} + num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files} valid_cats = {} - for cat in expected_point_sources.keys(): - for file in cat_files: - if cat in file and "total" in file: - valid_cats[cat] = (np.isclose(num_sources[file], expected_point_sources[cat], rtol=0.1), num_sources[file]) + for tdp in expected_total_point_sources.keys(): + for file in tdp_files: + if tdp in file: + tol_limit = tolerance * expected_total_point_sources[tdp] + valid_cats[tdp] = (file, np.isclose(expected_total_point_sources[tdp], num_sources[file], atol=tol_limit)) break - bad_cats = [cat for cat in valid_cats if not valid_cats[cat][0]] - assert len(bad_cats) == 0, f"Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_point_sources}" + bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]] + assert len(bad_cats) == 0, f"Total Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_total_point_sources}" -def test_svm_segment_cats(gather_output_data): - # Check that the point catalogs have the expected number of sources - cat_files = [files for files in gather_output_data if files.lower().endswith("segment-cat.ecsv")] +def test_svm_segment_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance + print("\ntest_svm_segment_total_cat.") + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("segment-cat.ecsv")] - num_sources = {cat: len(ascii.read(cat, format="ecsv")) for cat in cat_files} + num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files} valid_cats = {} - for cat in expected_seg_sources.keys(): - for file in cat_files: - if cat in file and "total" in file: - valid_cats[cat] = (np.isclose(num_sources[file], expected_seg_sources[cat], rtol=0.1), num_sources[file]) + for tdp in expected_total_segment_sources.keys(): + for file in tdp_files: + if tdp in file: + tol_limit = tolerance * expected_total_segment_sources[tdp] + valid_cats[tdp] = (file, np.isclose(expected_total_segment_sources[tdp], num_sources[file], atol=tol_limit)) break - bad_cats = [cat for cat in valid_cats if not valid_cats[cat][0]] - assert len(bad_cats) == 0, f"Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_seg_sources}" + bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]] + assert len(bad_cats) == 0, f"Total Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_total_segment_sources}" diff --git a/tests/hap/test_svm_ibqk07.py b/tests/hap/test_svm_ibqk07.py index 309b09127..5608c6a92 100644 --- a/tests/hap/test_svm_ibqk07.py +++ b/tests/hap/test_svm_ibqk07.py @@ -29,18 +29,15 @@ """ POLLER_FILE = "wfc3_bqk_07_input.out" -IR_WCS_SUB_NAME = "FIT_SVM_GAIA" -UVIS_WCS_SUB_NAME = "HSC30" -EXPECTED_POINT_SOURCES = { -"hst_12557_07_wfc3_ir_f160w_ibqk07_point-cat.ecsv": 15, +WCS_SUB_NAME = "HSC30" +expected_total_point_sources = { "hst_12557_07_wfc3_ir_total_ibqk07_point-cat.ecsv": 4, -"hst_12557_07_wfc3_uvis_f555w_ibqk07_point-cat.ecsv": 40, "hst_12557_07_wfc3_uvis_total_ibqk07_point-cat.ecsv": 14} -EXPECTED_SEG_SOURCES = { -"hst_12557_07_wfc3_ir_f160w_ibqk07_segment-cat.ecsv": 2, +expected_total_segment_sources= { "hst_12557_07_wfc3_ir_total_ibqk07_segment-cat.ecsv": 2, -"hst_12557_07_wfc3_uvis_f555w_ibqk07_segment-cat.ecsv": 26, "hst_12557_07_wfc3_uvis_total_ibqk07_segment-cat.ecsv": 19} +tolerance = 0.25 + MEAN_CAT_MAGAP2_POINT = { "hst_12557_07_wfc3_ir_f160w_ibqk07_point-cat.ecsv": 24.27, "hst_12557_07_wfc3_uvis_f555w_ibqk07_point-cat.ecsv": 25.36} @@ -178,7 +175,6 @@ def test_svm_manifest_name(construct_manifest_filename): # Ensure the manifest file uses the proper naming convention assert(path.is_file()) -@pytest.mark.skip def test_svm_wcs_ir(gather_output_data): print("\ntest_svm_wcs_ir.") # Get the TDP for this detector @@ -187,7 +183,7 @@ def test_svm_wcs_ir(gather_output_data): # Check the WCS solution is as expected wcsname = fits.getval(tdp_files[0], "WCSNAME", ext=1).upper() print("\ntest_svm_wcs_ir. WCSNAME: {} Output file: {}".format(wcsname, tdp_files[0])) - assert IR_WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." + assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." def test_svm_wcs_ir_all(gather_output_data): @@ -207,7 +203,7 @@ def test_svm_wcs_uvis(gather_output_data): # Check the WCS solution is as expected wcsname = fits.getval(tdp_files[0], "WCSNAME", ext=1).upper() print("\ntest_svm_wcs_uvis. WCSNAME: {} Output file: {}".format(wcsname, tdp_files[0])) - assert UVIS_WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." + assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." def test_svm_wcs_uvis_all(gather_output_data): @@ -218,38 +214,43 @@ def test_svm_wcs_uvis_all(gather_output_data): wcsnames = [fits.getval(uvis, "WCSNAME", ext=1).upper() for uvis in uvis_files] assert len(set(wcsnames)) == 1, f"WCSNAMES are not all the same for the UVIS detector: {wcsnames}" -@pytest.mark.skip -def test_svm_point_cat_numsources(gather_output_data): - # Check that the point catalogs have the expected number of sources - print("\ntest_svm_point_cat_numsources.") - cat_files = [files for files in gather_output_data if files.lower().endswith("point-cat.ecsv")] - num_sources = {cat:len(ascii.read(cat, format="ecsv")) for cat in cat_files} +# Due to the way the catalogs are filtered, check the size of the total catalog and one of the filter +# catalogs separately. The total catalog has the row removed for each source where the constituent +# filter catalogs *ALL* have flag>5 for the source. Rows are NOT removed from the filter table based on +# flag values. +def test_svm_point_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance + print("\ntest_svm_point_total_cat.") + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("point-cat.ecsv")] + + num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files} valid_cats = {} - for cat in EXPECTED_POINT_SOURCES.keys(): - for file in cat_files: - if cat == file: - print("Point numsources. file: {} num: {}".format(file, EXPECTED_POINT_SOURCES[cat])) - valid_cats[cat] = (np.isclose(num_sources[file], EXPECTED_POINT_SOURCES[cat], rtol=0.1), num_sources[file]) + for tdp in expected_total_point_sources.keys(): + for file in tdp_files: + if tdp in file: + tol_limit = tolerance * expected_total_point_sources[tdp] + valid_cats[tdp] = (file, np.isclose(expected_total_point_sources[tdp], num_sources[file], atol=tol_limit)) break - bad_cats = [cat for cat in valid_cats if not valid_cats[cat][0]] - assert len(bad_cats) == 0, f"Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {EXPECTED_POINT_SOURCES}" + bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]] + assert len(bad_cats) == 0, f"Total Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_point_sources}" -def test_svm_segment_cat_numsources(gather_output_data): - print("\ntest_svm_segment_cat_numsources.") - # Check that the point catalogs have the expected number of sources - cat_files = [files for files in gather_output_data if files.lower().endswith("segment-cat.ecsv")] +def test_svm_segment_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance + print("\ntest_svm_segment_total_cat.") + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("segment-cat.ecsv")] - num_sources = {cat: len(ascii.read(cat, format="ecsv")) for cat in cat_files} + num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files} valid_cats = {} - for cat in EXPECTED_SEG_SOURCES.keys(): - for file in cat_files: - if cat == file: - valid_cats[cat] = (np.isclose(num_sources[file], EXPECTED_SEG_SOURCES[cat], rtol=0.1), num_sources[file]) + for tdp in expected_total_segment_sources.keys(): + for file in tdp_files: + if tdp in file: + tol_limit = tolerance * expected_total_segment_sources[tdp] + valid_cats[tdp] = (file, np.isclose(expected_total_segment_sources[tdp], num_sources[file], atol=tol_limit)) break - bad_cats = [cat for cat in valid_cats if not valid_cats[cat][0]] - assert len(bad_cats) == 0, f"Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {EXPECTED_SEG_SOURCES}" + bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]] + assert len(bad_cats) == 0, f"Total Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_segment_sources}" def test_svm_point_cat_meanmag(gather_output_data): diff --git a/tests/hap/test_svm_ibyt50.py b/tests/hap/test_svm_ibyt50.py index 4d53d4a31..5ca66ff3c 100644 --- a/tests/hap/test_svm_ibyt50.py +++ b/tests/hap/test_svm_ibyt50.py @@ -29,17 +29,16 @@ """ POLLER_FILE = "wfc3_byt_50_input.out" -WCS_SUB_NAME = "FIT_SVM_GAIA" -EXPECTED_POINT_SOURCES = { -"hst_13023_50_wfc3_ir_f140w_ibyt50_point-cat.ecsv": 295, -"hst_13023_50_wfc3_ir_total_ibyt50_point-cat.ecsv": 124, -"hst_13023_50_wfc3_uvis_f606w_ibyt50_point-cat.ecsv": 538, -"hst_13023_50_wfc3_uvis_total_ibyt50_point-cat.ecsv": 106} -EXPECTED_SEG_SOURCES = { -"hst_13023_50_wfc3_ir_f140w_ibyt50_segment-cat.ecsv": 123, -"hst_13023_50_wfc3_ir_total_ibyt50_segment-cat.ecsv": 113, -"hst_13023_50_wfc3_uvis_f606w_ibyt50_segment-cat.ecsv": 2176, -"hst_13023_50_wfc3_uvis_total_ibyt50_segment-cat.ecsv": 399} +WCS_UVIS_SUB_NAME = "FIT_SVM_GAIA" +WCS_IR_SUB_NAME = "FIT_SVM_GSC242" +expected_total_point_sources = { +"hst_13023_50_wfc3_ir_total_ibyt50_point-cat.ecsv": 122, +"hst_13023_50_wfc3_uvis_total_ibyt50_point-cat.ecsv": 105} +expected_total_segment_sources = { +"hst_13023_50_wfc3_ir_total_ibyt50_segment-cat.ecsv": 107, +"hst_13023_50_wfc3_uvis_total_ibyt50_segment-cat.ecsv": 415} +tolerance = 0.25 + @pytest.fixture(scope="module") def read_csv_for_filenames(): @@ -169,7 +168,6 @@ def test_svm_manifest_name(construct_manifest_filename): # Ensure the manifest file uses the proper naming convention assert(path.is_file()) -@pytest.mark.skip def test_svm_wcs_ir(gather_output_data): print("\ntest_svm_wcs_ir.") # Get the TDP for this detector @@ -178,7 +176,7 @@ def test_svm_wcs_ir(gather_output_data): # Check the WCS solution is as expected wcsname = fits.getval(tdp_files[0], "WCSNAME", ext=1).upper() print("\ntest_svm_wcs_ir. WCSNAME: {} Output file: {}".format(wcsname, tdp_files[0])) - assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." + assert WCS_IR_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." def test_svm_wcs_ir_all(gather_output_data): @@ -198,7 +196,7 @@ def test_svm_wcs_uvis(gather_output_data): # Check the WCS solution is as expected wcsname = fits.getval(tdp_files[0], "WCSNAME", ext=1).upper() print("\ntest_svm_wcs_uvis. WCSNAME: {} Output file: {}".format(wcsname, tdp_files[0])) - assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." + assert WCS_UVIS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp_files[0]}." def test_svm_wcs_uvis_all(gather_output_data): @@ -210,36 +208,39 @@ def test_svm_wcs_uvis_all(gather_output_data): assert len(set(wcsnames)) == 1, f"WCSNAMES are not all the same for the UVIS detector: {wcsnames}" -@pytest.mark.skip -def test_svm_point_cat_numsources(gather_output_data): - # Check that the point catalogs have the expected number of sources - print("\ntest_svm_point_cat_numsources.") - cat_files = [files for files in gather_output_data if files.lower().endswith("point-cat.ecsv")] +# Due to the way the catalogs are filtered, check the size of the total catalog and one of the filter +# catalogs separately. The total catalog has the row removed for each source where the constituent +# filter catalogs *ALL* have flag>5 for the source. Rows are NOT removed from the filter table based on +# flag values. +def test_svm_point_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance + print("\ntest_svm_point_total_cat.") + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("point-cat.ecsv")] - num_sources = {cat:len(ascii.read(cat, format="ecsv")) for cat in cat_files} + num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files} valid_cats = {} - for cat in EXPECTED_POINT_SOURCES.keys(): - for file in cat_files: - if cat == file: - print("Point numsources. file: {} num: {}".format(file, EXPECTED_POINT_SOURCES[cat])) - valid_cats[cat] = (np.isclose(num_sources[file], EXPECTED_POINT_SOURCES[cat], rtol=0.1), num_sources[file]) + for tdp in expected_total_point_sources.keys(): + for file in tdp_files: + if tdp in file: + tol_limit = tolerance * expected_total_point_sources[tdp] + valid_cats[tdp] = (file, np.isclose(expected_total_point_sources[tdp], num_sources[file], atol=tol_limit)) break - bad_cats = [cat for cat in valid_cats if not valid_cats[cat][0]] - assert len(bad_cats) == 0, f"Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {EXPECTED_POINT_SOURCES}" + bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]] + assert len(bad_cats) == 0, f"Total Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_point_sources}" -@pytest.mark.skip -def test_svm_segment_cat_numsources(gather_output_data): - print("\ntest_svm_segment_cat_numsources.") - # Check that the point catalogs have the expected number of sources - cat_files = [files for files in gather_output_data if files.lower().endswith("segment-cat.ecsv")] +def test_svm_segment_total_cat(gather_output_data): + # Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance + print("\ntest_svm_segment_total_cat.") + tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("segment-cat.ecsv")] - num_sources = {cat: len(ascii.read(cat, format="ecsv")) for cat in cat_files} + num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files} valid_cats = {} - for cat in EXPECTED_SEG_SOURCES.keys(): - for file in cat_files: - if cat == file: - valid_cats[cat] = (np.isclose(num_sources[file], EXPECTED_SEG_SOURCES[cat], rtol=0.1), num_sources[file]) + for tdp in expected_total_segment_sources.keys(): + for file in tdp_files: + if tdp in file: + tol_limit = tolerance * expected_total_segment_sources[tdp] + valid_cats[tdp] = (file, np.isclose(expected_total_segment_sources[tdp], num_sources[file], atol=tol_limit)) break - bad_cats = [cat for cat in valid_cats if not valid_cats[cat][0]] - assert len(bad_cats) == 0, f"Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {EXPECTED_SEG_SOURCES}" + bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]] + assert len(bad_cats) == 0, f"Total Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_segment_sources}" diff --git a/tests/hap/test_svm_wfc3ir.py b/tests/hap/test_svm_wfc3ir.py index 36787a332..bc7e8ab88 100644 --- a/tests/hap/test_svm_wfc3ir.py +++ b/tests/hap/test_svm_wfc3ir.py @@ -27,7 +27,7 @@ POLLER_FILE = "wfc3_ir_ib6807_input.out" # Gather all expected values used for determining pass/fail criteria here -expected_point_sources = {'ir': 358} +expected_point_sources = {'ir': 315} expected_seg_sources = {'ir': 298}