Skip to content

Commit

Permalink
Cherrypick rc6 (#1182)
Browse files Browse the repository at this point in the history
* SVM: Do not create a bare manifest file (#1174)

* Check the manifest_name variable is not set to the bare filename, "manifest.txt"
which indicates something went very wrong in the code.  Only if the manifest_name
variable is set to the agreed upon format instrument_programID_visitID_manifest.txt
(e.g., acs_bny_a1_manifest.txt) will a manifest file be written and documented in
the output log.

* Added extract checks after testing to ensure clean exit.

* In order to write out a manifest file, the manifest_name must not be an
empty string, as well as mot be equal to "manifest.txt".

* Fixed variable name typo.

* Only remove rows where all flags are bad (#1175)

* Only remove rows where all flags are bad

* Make comparison with float more robust

* Fix the units of a few variables in the output Point and Segmentation catalogs (#1178)

* Fix the units of a few variables in the output Point and Segmentation
catalogs.

* Larry B. indicated the "background_centroid" (aka background_at_centroid) has
the same units as the input background image, so the units are electrons/s.

* Fixed typo on units for cxx, cxy, cyy (pixels**2 -> pixels**(-2)).

* Moved initialization of grism and ramp lists, as well as the found_data (#1181)

indicator before the call to poller_utils.  Moving the list initialiazation
ensures no exception will be generated upon exit.
Do not add the "no_data_trl" (no data being processed trailer file) to the
product_list.
Only append the the astrodriz trailer file to the total trailer file if
data_found = True.
Updated comments as necessary.

Co-authored-by: mdlpstsci <[email protected]>
  • Loading branch information
stsci-hack and mdlpstsci authored Nov 4, 2021
1 parent 5d96633 commit e1e59e9
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 43 deletions.
69 changes: 34 additions & 35 deletions drizzlepac/hapsequencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,18 +266,6 @@ def create_catalog_products(total_obj_list, log_level, diagnostic_mode=False, ph
# all catalogs.
# This requires collating results for each type of catalog from all filter products.
for cat_type in filter_product_catalogs.catalogs.keys():
catalog_mask = filter_product_catalogs.catalogs[cat_type].source_cat['Flags'] > flag_trim_value
if source_mask[cat_type] is None:
source_mask[cat_type] = catalog_mask
else:
# Combine masks for all filters for this catalog type
source_mask[cat_type] = np.bitwise_or(source_mask[cat_type], catalog_mask)

# Trim based on user-specified/default flag limit 'flag_trim_value' specified in parameter file
trimmed_rows = np.where(source_mask[cat_type])[0].tolist()
filter_product_catalogs.catalogs[cat_type].source_cat.remove_rows(trimmed_rows)
filter_product_catalogs.catalogs[cat_type].subset_filter_source_cat.remove_rows(trimmed_rows)

subset_columns_dict[cat_type] = {}
subset_columns_dict[cat_type]['subset'] = \
filter_product_catalogs.catalogs[cat_type].subset_filter_source_cat
Expand All @@ -292,7 +280,6 @@ def create_catalog_products(total_obj_list, log_level, diagnostic_mode=False, ph
# rows which contain empty strings (masked values) for *all* measurements for *all*
# of the filter catalogs.
for cat_type in total_product_catalogs.catalogs.keys():
good_rows_index = []
if cat_type == 'aperture':
all_columns = total_product_catalogs.catalogs[cat_type].sources.colnames
table_filled = total_product_catalogs.catalogs[cat_type].sources.filled(-9999.9)
Expand All @@ -301,11 +288,15 @@ def create_catalog_products(total_obj_list, log_level, diagnostic_mode=False, ph
table_filled = total_product_catalogs.catalogs[cat_type].source_cat.filled(-9999.9)
flag_columns = [colname for colname in all_columns if "Flags_" in colname]
filled_flag_columns = table_filled[flag_columns]
for i, trow in enumerate(filled_flag_columns):
for tcol in trow:
if tcol != -9999:
good_rows_index.append(i)
break

# work out what rows have flag values > flag_limit in ALL flag columns
flag_bitmasks = [np.logical_or(filled_flag_columns[col] > flag_trim_value,
np.isclose(filled_flag_columns[col], -9999.9))
for col in filled_flag_columns.colnames]
flag_mask = np.logical_and.reduce(flag_bitmasks)
# Get indices of all good rows
good_rows_index = np.where(flag_mask == False)[0]

if cat_type == 'aperture':
total_product_catalogs.catalogs[cat_type].sources = total_product_catalogs.catalogs[cat_type].sources[good_rows_index]
else:
Expand Down Expand Up @@ -511,8 +502,12 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_
# Start by reading in any environment variable related to catalog generation that has been set
cat_switches = {sw: _get_envvar_switch(sw, default=envvar_cat_svm[sw]) for sw in envvar_cat_svm}

# Since these are used in the finally block, make sure they are initialized
total_obj_list = []
grism_product_list = []
ramp_product_list = []
manifest_name = ""
found_data = False
try:
# Parse the poller file and generate the the obs_info_dict, as well as the total detection
# product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
Expand All @@ -537,19 +532,21 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_

# It is possible the total_obj_list output from the poller_utils contains only Grism/Prism
# data and no direct images, so no further processing should be done. If this is the case,
# there is actually nothing to be done for the visit, except write out a manifest and
# a log file. Check every item in the total data product list.
found_data = False
# there is actually nothing to be done for the visit, except write out an empty manifest file.
# Check every item in the total data product list.
for total_item in total_obj_list:
# Indicator of viable (e.g., not spectroscopic, etc.) direct images for processing
if total_item.edp_list and not found_data:
found_data = True
# Indicator of only Grism/Prism data found with no direct images, so no processing done.
# Leaving the no_data_trl variable and just not adding it to the product_list in case
# the requirement changes again to write out a trailer file in this instance.
elif total_item.grism_edp_list:
no_data_trl = total_item.trl_filename
if not found_data:
log.warning("")
log.warning("There are no viable direct images in any Total Data Product for this visit. No processing can be done.")
log.warning("No SVM processing is done for the Grism/Prism data - no SVM output products are generated.")
product_list += [no_data_trl]
sys.exit(0)

# Update all of the product objects with their associated configuration information.
Expand Down Expand Up @@ -584,7 +581,6 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_

# Need to delete the Ramp filter Exposure objects from the *Product lists as
# these images should not be processed beyond the alignment to Gaia (run_align_to_gaia).
ramp_product_list = []
_ = delete_ramp_exposures(total_item.fdp_list, 'FILTER')
ramp_product_list = delete_ramp_exposures(total_item.edp_list, 'EXPOSURE')
product_list += ramp_product_list
Expand All @@ -594,7 +590,6 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_
# appropriate to be an 'a priori' or the pipeline default (fallback) solution. Note: there
# is no need to delete the Grism/Prism lists after the WCS update as the Grism/Prism
# exposures are stored in an list ignored by a majority of the processing.
grism_product_list = []
if total_item.grism_edp_list and total_item.edp_list:
grism_product_list = update_wcs_in_visit(total_item)
product_list += grism_product_list
Expand Down Expand Up @@ -660,6 +655,7 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_
# first column of the first row is needed. It is desired to use the contents of the
# FITS header keywords INSTRUME and ROOTNAME to use/parse for necessary information.
# co = close out
h0 = None
if type(input_filename) == str:
co_filename= ascii.read(input_filename, format='no_header')["col1"][0]
h0 = fits.getheader(co_filename)
Expand All @@ -668,21 +664,23 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_
elif type(input_filename) == list:
h0 = fits.getheader(input_filename[0])

co_inst = h0["INSTRUME"].lower()
co_root = h0["ROOTNAME"].lower()
tokens_tuple = (co_inst, co_root[1:4], co_root[4:6], "manifest.txt")
manifest_name = "_".join(tokens_tuple)
if h0:
co_inst = h0["INSTRUME"].lower()
co_root = h0["ROOTNAME"].lower()
tokens_tuple = (co_inst, co_root[1:4], co_root[4:6], "manifest.txt")
manifest_name = "_".join(tokens_tuple)

# Problem case - just give it the base name
if type(input_filename) != str and type(input_filename) != list:
manifest_name = "manifest.txt"

# Write out manifest file listing all products generated during processing
log.info("Creating manifest file {}.".format(manifest_name))
log.info(" The manifest contains the names of products generated during processing.")
with open(manifest_name, mode='w') as catfile:
if total_obj_list:
[catfile.write("{}\n".format(name)) for name in product_list]
if manifest_name and manifest_name != "manifest.txt":
# Write out manifest file listing all products generated during processing
log.info("Creating manifest file {}.".format(manifest_name))
log.info(" The manifest contains the names of products generated during processing.")
with open(manifest_name, mode='w') as catfile:
if total_obj_list:
[catfile.write("{}\n".format(name)) for name in product_list]

end_dt = datetime.datetime.now()
log.info('Processing completed at {}'.format(str(end_dt)))
Expand All @@ -707,7 +705,8 @@ def run_hap_processing(input_filename, diagnostic_mode=False, input_custom_pars_
# Append total trailer file (from astrodrizzle) to all total log files
if total_obj_list:
for tot_obj in total_obj_list:
proc_utils.append_trl_file(tot_obj.trl_filename, logname, clean=False)
if found_data:
proc_utils.append_trl_file(tot_obj.trl_filename, logname, clean=False)
if tot_obj.edp_list:
# Update DRIZPARS keyword value with new logfile name in ALL drizzle products
tot_obj.update_drizpars()
Expand Down
16 changes: 8 additions & 8 deletions drizzlepac/haputils/catalog_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1211,8 +1211,8 @@ def measure_sources(self, filter_name):
"MagErrAp1": "Error of MagAp1",
"MagAp2": "ABMAG of source based on the outer (larger) aperture",
"MagErrAp2": "Error of MagAp2",
"MSkyAp2": "ABMAG of sky based on outer (larger) aperture",
"StdevAp2": "Standard deviation of sky measurement in outer (larger) aperture",
"MSkyAp2": "Sky estimate from an annulus outside Aperture 2",
"StdevAp2": "Standard deviation of sky estimate from annulus outside Aperture 2",
"FluxAp2": "Flux of source based on the outer (larger) aperture",
"CI": "Concentration Index",
"Flags": "Numeric encoding for conditions on detected sources"}
Expand All @@ -1222,7 +1222,7 @@ def measure_sources(self, filter_name):
# add units to columns
final_col_units = {"X-Center": "pixels", "Y-Center": "pixels", "RA": "degrees", "DEC": "degrees",
"ID": "unitless", "MagAp1": "ABMAG", "MagErrAp1": "ABMAG", "MagAp2": "ABMAG",
"MagErrAp2": "ABMAG", "MSkyAp2": "ABMAG", "StdevAp2": "ABMAG",
"MagErrAp2": "ABMAG", "MSkyAp2": "electrons/s/pixel", "StdevAp2": "electrons/s/pixel",
"FluxAp2": "electrons/sec", "CI": "ABMAG", "Flags": "unitless"}
for col_title in final_col_units:
output_photometry_table[col_title].unit = final_col_units[col_title]
Expand Down Expand Up @@ -2365,7 +2365,7 @@ def _define_filter_table(self, filter_table):
"MagErrAp2": "Error of MagAp2",
"FluxAp2": "Flux of source based on the outer (larger) aperture",
"FluxErrAp2": "Error of FluxAp2",
"MSkyAp2": "ABMAG of sky based on outer (larger) aperture",
"MSkyAp2": "Sky estimate from an annulus outside Aperture 2",
"FluxSegment": "Sum of unmasked data values in the source segment",
"FluxSegmentErr": "Uncertainty of FluxSegment, propagated from the input error array",
"KronRadius": "The unscaled first-moment Kron radius",
Expand Down Expand Up @@ -2402,17 +2402,17 @@ def _define_filter_table(self, filter_table):
"MagErrAp2": "ABMAG",
"FluxAp2": "electrons/s",
"FluxErrAp2": "electrons/s",
"MSkyAp2": "ABMAG",
"MSkyAp2": "electrons/s/pixel",
"MagSegment": "ABMAG",
"FluxSegment": "electrons/s",
"FluxSegmentErr": "electrons/s",
"KronRadius": "pixels",
"X2": "pixels**2",
"Y2": "pixels**2",
"XY": "pixels**2",
"CXX": "pixels**2",
"CYY": "pixels**2",
"CXY": "pixels**2",
"CXX": "pixels**(-2)",
"CYY": "pixels**(-2)",
"CXY": "pixels**(-2)",
"Xmin": "pixels",
"Ymin": "pixels",
"Xmax": "pixels",
Expand Down

0 comments on commit e1e59e9

Please sign in to comment.