From 7a1df5edefa8cc13f984d509eaf89ceed1b30a42 Mon Sep 17 00:00:00 2001 From: Doug Friedel Date: Fri, 17 Jan 2020 10:38:57 -0600 Subject: [PATCH 1/4] updates for python3 --- bin/add_weight | 2 +- bin/apply_bpm | 2 +- bin/bf_correct | 2 +- bin/bias_correct | 2 +- bin/coadd_nwgint | 2 +- bin/coadd_prepare | 2 +- bin/connect_streaks | 6 + bin/cticheck | 6 + bin/find_flat_normalization | 2 +- bin/fix_cols | 2 +- bin/fix_columns | 2 +- bin/flat_correct | 2 +- bin/flat_correct_cp | 2 +- bin/gain_correct | 2 +- bin/image_diff | 2 +- bin/lightbulb | 6 + bin/linearity_correct | 2 +- bin/make_mask | 2 +- bin/maskStreaks | 207 ++++ bin/mask_saturation | 2 +- bin/mini_compare | 2 +- bin/normalize_flat | 2 +- bin/null_weights | 2 +- bin/nullop | 2 +- bin/override_bpm | 2 +- bin/overscan_correct | 2 +- bin/pca_plot | 4 +- bin/pixcorrect_cp | 2 +- bin/pixcorrect_fp | 2 +- bin/pixcorrect_im | 2 +- bin/row_interp | 2 +- bin/rowinterp_nullweight | 2 +- bin/scale_flat | 2 +- bin/sky_combine | 2 +- bin/sky_compress | 2 +- bin/sky_fit | 2 +- bin/sky_pca | 2 +- bin/sky_subtract | 2 +- bin/sky_template | 2 +- bin/starflat_correct | 2 +- pixcorrect_test.build | 6 +- python/pixcorrect/PixCorrectDriver.py | 114 +-- python/pixcorrect/__init__.py | 2 +- python/pixcorrect/add_weight.py | 36 +- python/pixcorrect/bf_correct.py | 58 +- python/pixcorrect/bfinfo.py | 48 +- python/pixcorrect/bias_correct.py | 19 +- python/pixcorrect/clippedMean.py | 56 +- python/pixcorrect/coadd_nwgint.py | 299 ++++-- python/pixcorrect/coadd_prepare.py | 131 +-- python/pixcorrect/connect_streaks.py | 948 +++++++++++++++++++ python/pixcorrect/corr_util.py | 48 +- python/pixcorrect/cti.py | 92 ++ python/pixcorrect/cti_utils.py | 214 +++++ python/pixcorrect/dbc.py | 64 +- python/pixcorrect/decaminfo.py | 425 +++++---- python/pixcorrect/find_flat_normalization.py | 80 +- python/pixcorrect/fix_columns.py | 144 ++- python/pixcorrect/flat_correct.py | 66 +- python/pixcorrect/flat_correct_cp.py | 68 +- python/pixcorrect/gain_correct.py | 46 +- python/pixcorrect/image_diff.py | 21 +- python/pixcorrect/imtypes.py | 6 +- python/pixcorrect/lightbulb.py | 108 +++ python/pixcorrect/lightbulb_utils.py | 263 +++++ python/pixcorrect/linearity_correct.py | 76 +- python/pixcorrect/make_mask.py | 76 +- python/pixcorrect/mask_saturation.py | 13 +- python/pixcorrect/mini_compare.py | 37 +- python/pixcorrect/normalize_flat.py | 111 ++- python/pixcorrect/null_weights.py | 32 +- python/pixcorrect/pixcorrect_cp.py | 71 +- python/pixcorrect/pixcorrect_im.py | 73 +- python/pixcorrect/row_interp.py | 117 ++- python/pixcorrect/row_zipper.py | 71 +- python/pixcorrect/rowinterp_nullweight.py | 33 +- python/pixcorrect/scale_flat.py | 56 +- python/pixcorrect/sky_combine.py | 60 +- python/pixcorrect/sky_compress.py | 49 +- python/pixcorrect/sky_fit.py | 29 +- python/pixcorrect/sky_pca.py | 120 ++- python/pixcorrect/sky_subtract.py | 122 ++- python/pixcorrect/sky_template.py | 242 +++-- python/pixcorrect/sky_template_slow.py | 137 ++- python/pixcorrect/skyinfo.py | 256 +++-- python/pixcorrect/skyplot.py | 50 +- python/pixcorrect/starflat_correct.py | 12 +- setup.py | 21 +- 88 files changed, 3675 insertions(+), 1850 deletions(-) create mode 100755 bin/connect_streaks create mode 100755 bin/cticheck create mode 100755 bin/lightbulb create mode 100755 bin/maskStreaks create mode 100755 python/pixcorrect/connect_streaks.py create mode 100755 python/pixcorrect/cti.py create mode 100755 python/pixcorrect/cti_utils.py create mode 100755 python/pixcorrect/lightbulb.py create mode 100755 python/pixcorrect/lightbulb_utils.py diff --git a/bin/add_weight b/bin/add_weight index 6f7c595..409593d 100755 --- a/bin/add_weight +++ b/bin/add_weight @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.add_weight import add_weight diff --git a/bin/apply_bpm b/bin/apply_bpm index f2b298c..e3644ba 100755 --- a/bin/apply_bpm +++ b/bin/apply_bpm @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.apply_bpm import apply_bpm diff --git a/bin/bf_correct b/bin/bf_correct index 62aef2e..23c7e9e 100755 --- a/bin/bf_correct +++ b/bin/bf_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.bf_correct import bf_correct diff --git a/bin/bias_correct b/bin/bias_correct index 0af0573..8145d3a 100755 --- a/bin/bias_correct +++ b/bin/bias_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.bias_correct import bias_correct diff --git a/bin/coadd_nwgint b/bin/coadd_nwgint index d031fee..be0c307 100755 --- a/bin/coadd_nwgint +++ b/bin/coadd_nwgint @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.coadd_nwgint import CoaddZipperInterpNullWeight diff --git a/bin/coadd_prepare b/bin/coadd_prepare index 7ed556d..cdb0fa7 100644 --- a/bin/coadd_prepare +++ b/bin/coadd_prepare @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.coadd_prepare import coadd_prepare diff --git a/bin/connect_streaks b/bin/connect_streaks new file mode 100755 index 0000000..b8573f9 --- /dev/null +++ b/bin/connect_streaks @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 + +from pixcorrect.connect_streaks import connect_streaks + +if __name__ == '__main__': + connect_streaks.main() diff --git a/bin/cticheck b/bin/cticheck new file mode 100755 index 0000000..dc3cdd5 --- /dev/null +++ b/bin/cticheck @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 + +from pixcorrect.cti import cticheck + +if __name__ == '__main__': + cticheck.main() diff --git a/bin/find_flat_normalization b/bin/find_flat_normalization index b78fa66..53c954e 100755 --- a/bin/find_flat_normalization +++ b/bin/find_flat_normalization @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.find_flat_normalization import find_flat_normalization diff --git a/bin/fix_cols b/bin/fix_cols index e042fde..3ebb980 100755 --- a/bin/fix_cols +++ b/bin/fix_cols @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.fix_cols import fix_cols diff --git a/bin/fix_columns b/bin/fix_columns index 7f41c5c..c5f72b7 100755 --- a/bin/fix_columns +++ b/bin/fix_columns @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.fix_columns import fix_columns diff --git a/bin/flat_correct b/bin/flat_correct index 592b33e..fec91bf 100755 --- a/bin/flat_correct +++ b/bin/flat_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.flat_correct import flat_correct diff --git a/bin/flat_correct_cp b/bin/flat_correct_cp index 4fe48bc..03bb932 100755 --- a/bin/flat_correct_cp +++ b/bin/flat_correct_cp @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.flat_correct_cp import flat_correct_cp diff --git a/bin/gain_correct b/bin/gain_correct index d4f1bb6..d5e2a64 100755 --- a/bin/gain_correct +++ b/bin/gain_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.gain_correct import gain_correct diff --git a/bin/image_diff b/bin/image_diff index 6d98d63..08cb0da 100755 --- a/bin/image_diff +++ b/bin/image_diff @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.image_diff import image_diff diff --git a/bin/lightbulb b/bin/lightbulb new file mode 100755 index 0000000..459c0dc --- /dev/null +++ b/bin/lightbulb @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 + +from pixcorrect.lightbulb import lightbulb + +if __name__ == '__main__': + lightbulb.main() diff --git a/bin/linearity_correct b/bin/linearity_correct index ee263a6..a624e5c 100755 --- a/bin/linearity_correct +++ b/bin/linearity_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.linearity_correct import linearity_correct diff --git a/bin/make_mask b/bin/make_mask index 60827aa..edfe205 100755 --- a/bin/make_mask +++ b/bin/make_mask @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.make_mask import make_mask diff --git a/bin/maskStreaks b/bin/maskStreaks new file mode 100755 index 0000000..63c2973 --- /dev/null +++ b/bin/maskStreaks @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 + +import argparse +import logging +import sys +import time +from collections import OrderedDict +import fitsio +import numpy as np +import matplotlib.path +from despyastro import wcsutil +from despyastro import astrometry + + +class MaskStreaksError(Exception): + def __init__(self, value): + self.value = value + super().__init__() + def __str__(self): + return repr(self.value) + +def streakMask(streaktab, image_data, image_header, addWidth=0., addLength=100., maxExtrapolate=None): + ''' Produce a list of pixels in the image that should be + masked for streaks in the input table. + streaktab is the output table of new streaks to add + image is a FITS HDU, with header and image data + addWidth is additional number of pixels to add to half-width + addLength is length added to each end of streak (pixels) + Returns: + ypix, xpix: 1d arrays with indices of affected pixels + nStreaks: number of new streaks masked + ''' + + pixscale = astrometry.get_pixelscale(image_header, units='arcsec') / 3600. + shape = image_data.shape + w = wcsutil.WCS(image_header) + + use = np.logical_and(streaktab['expnum'] == image_header['EXPNUM'], + streaktab['ccdnum'] == image_header['CCDNUM']) + logging.info(f"{np.count_nonzero(use):d} streaks found to mask") + + nStreaks = 0 + inside = None + + xpix = np.array(0, dtype=int) + ypix = np.array(0, dtype=int) + + for row in streaktab[use]: + if maxExtrapolate is not None: + if row['extrapolated'] and row['nearest'] > maxExtrapolate: + logging.info('Skipping extrapolated streak') + continue + width = row['width'] + ra = np.array((row['ra1'], row['ra2'])) + dec = np.array((row['dec1'], row['dec2'])) + x, y = w.sky2image(ra, dec) + + + x1, x2, y1, y2 = x[0], x[1], y[0], y[1] + + # Slope of the line, cos/sin form + mx = (x2 - x1) / np.hypot(x2 - x1, y2 - y1) + my = (y2 - y1) / np.hypot(x2 - x1, y2 - y1) + + #displacement for width of streak: + wx = width / pixscale + addWidth + wy = wx * mx + wx = wx * -my + + # grow length + x1 -= addLength * mx + x2 += addLength * mx + y1 -= addLength * my + y2 += addLength * my + + # From Alex's immask routine: mark interior pixels + vertices = [(x1 + wx, y1 + wy), (x2 + wx, y2 + wy), (x2 - wx, y2 - wy), (x1 - wx, y1 - wy)] + vertices.append(vertices[0]) # Close the path + + if inside is None: + # Set up coordinate arrays + yy, xx = np.indices(shape) + points = np.vstack((xx.flatten(), yy.flatten())).T + path = matplotlib.path.Path(vertices) + inside = path.contains_points(points) + else: + # use logical_and for additional streaks + path = matplotlib.path.Path(vertices) + inside = np.logical_or(inside, path.contains_points(points)) + + nStreaks = nStreaks + 1 + + logging.info(f"Masked {nStreaks:d} new streaks") + # Return list of masked pixels + if inside is None: + return np.array(0, dtype=int), np.array(0, dtype=int), nStreaks + ypix, xpix = np.nonzero(inside.reshape(shape)) + return ypix, xpix, nStreaks + +def make_parser(): + # Build the argparse for this + parser = argparse.ArgumentParser(description='Mask additional streaks in a DESDM image') + parser.add_argument('--input_file', '-i', type=str, help='Input FITS image file path') + parser.add_argument('--output_file', '-o', type=str, help='Output FITS image file path') + parser.add_argument('--streak_file', '-s', type=str, help='Streak table file path') + parser.add_argument('--logfile', '-l', help='Logfile name', type=str) + parser.add_argument('--bit_mask', type=int, help='Bit(s) to set in MSK extension, no mask change if' + ' absent') + parser.add_argument('--null_weight', action='store_true', help='Null streaks in WGT image') + parser.add_argument('--add_width', type=float, help='Broaden streak width by this value (pixels)', + default=0.) + parser.add_argument('--add_length', type=float, help='Extend streak endpoints by this value (pixels)', + default=100.) + parser.add_argument('--max_extrapolate', type=float, help='Do not use streaks extrapolated more than' + 'this many degrees') + parser.add_argument('--verbose', '-v', help='Increase logging detail', action='count') + + return parser + +def check_args(args): + if not args.input_file: + logging.error('input_file not given') + raise MaskStreaksError('input_file not given') + if not args.output_file: + logging.error('output_file not given') + raise MaskStreaksError('output_file not given') + if not args.streak_file: + logging.error('streak_file not given') + raise MaskStreaksError('streak_file not given') + +if __name__ == '__main__': + parser = make_parser() + args = parser.parse_args() + + # Set up logfile if there is one + if args.verbose is None or args.verbose == 0: + level = logging.WARNING + elif args.verbose == 1: + level = logging.INFO + elif args.verbose >= 2: + level = logging.DEBUG + if args.logfile is None: + # Logging to screen, set level + logging.basicConfig(level=level) + else: + logging.basicConfig(filename=args.logfile, + filemode='w', + level=level) + + try: + check_args(args) + except MaskStreaksError: + sys.exit(1) + + + # Read the streaks table first + try: + tab = fitsio.FITS(args.streak_file) + streaks = tab[1].read() + except: + logging.error(f"Could not read streak file {args.streak_file:s}") + sys.exit(1) + + try: + header = OrderedDict() + hdu = OrderedDict() + data = OrderedDict() + with fitsio.FITS(args.input_file) as fits: + for k in xrange(len(fits)): + h = fits[k].read_header() + d = fits[k].read() + # Make sure that we can get the EXTNAME + if not h.get('EXTNAME'): + continue + extname = h['EXTNAME'].strip() + if extname == 'COMPRESSED_IMAGE': + continue + header[extname] = h + hdu[extname] = k + data[extname] = d + except: + logging.error(f"Could not read input file {args.input_file:s}") + sys.exit(1) + + ymask, xmask, nStreaks = streakMask(streaks, data['SCI'], header['SCI'], + addWidth=args.add_width, + addLength=args.add_length, + maxExtrapolate=args.max_extrapolate) + + + print(type(xmask), xmask) + print(type(ymask), ymask) + if args.bit_mask: + logging.info('Setting bits in MSK image') + data['MSK'][ymask, xmask] |= args.bit_mask + + if args.null_weight: + logging.info('Nulling WGT pixels') + data['WGT'][ymask, xmask] = 0. + #data['SCI'][ymask,xmask] = 1e9 + + header['SCI']['HISTORY'] = time.asctime(time.localtime()) + f" masked {nStreaks:d} streaks from {args.streak_file:s}" + + logging.info('Writing to ' + args.output_file) + ofits = fitsio.FITS(args.output_file, 'rw', clobber=True) + for EXTNAME in ['SCI', 'MSK', 'WGT']: + ofits.write(data[EXTNAME], extname=EXTNAME, header=header[EXTNAME]) diff --git a/bin/mask_saturation b/bin/mask_saturation index f171846..7521e38 100644 --- a/bin/mask_saturation +++ b/bin/mask_saturation @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.mask_saturation import mask_saturation diff --git a/bin/mini_compare b/bin/mini_compare index 4c9044d..2c14aa9 100755 --- a/bin/mini_compare +++ b/bin/mini_compare @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.mini_compare import mini_compare diff --git a/bin/normalize_flat b/bin/normalize_flat index 2cbc15a..931dd8d 100755 --- a/bin/normalize_flat +++ b/bin/normalize_flat @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.normalize_flat import normalize_flat diff --git a/bin/null_weights b/bin/null_weights index 7644501..2e6debe 100644 --- a/bin/null_weights +++ b/bin/null_weights @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.null_weights import null_weights diff --git a/bin/nullop b/bin/nullop index ef0f53e..ed34803 100755 --- a/bin/nullop +++ b/bin/nullop @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.nullop import nullop diff --git a/bin/override_bpm b/bin/override_bpm index 5861103..61501e2 100755 --- a/bin/override_bpm +++ b/bin/override_bpm @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.override_bpm import override_bpm diff --git a/bin/overscan_correct b/bin/overscan_correct index b62edf3..4a28cb9 100755 --- a/bin/overscan_correct +++ b/bin/overscan_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.overscan_correct import apply_overscan diff --git a/bin/pca_plot b/bin/pca_plot index 7f20002..9cecb9d 100755 --- a/bin/pca_plot +++ b/bin/pca_plot @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys from pixcorrect.skyplot import pcaReport @@ -14,4 +14,4 @@ if __name__=='__main__': args = parser.parse_args() pcaReport(args.pcafile, args.pdffile) sys.exit(0) - + diff --git a/bin/pixcorrect_cp b/bin/pixcorrect_cp index 2b3bdf4..95d2db4 100755 --- a/bin/pixcorrect_cp +++ b/bin/pixcorrect_cp @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.pixcorrect_cp import PixCorrectCP diff --git a/bin/pixcorrect_fp b/bin/pixcorrect_fp index a1eea4b..0d646c5 100755 --- a/bin/pixcorrect_fp +++ b/bin/pixcorrect_fp @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.pixcorrect_fp import PixCorrectFP diff --git a/bin/pixcorrect_im b/bin/pixcorrect_im index b8f18fc..96e3065 100755 --- a/bin/pixcorrect_im +++ b/bin/pixcorrect_im @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.pixcorrect_im import PixCorrectIm diff --git a/bin/row_interp b/bin/row_interp index b1202d8..8009ca7 100644 --- a/bin/row_interp +++ b/bin/row_interp @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.row_interp import row_interp diff --git a/bin/rowinterp_nullweight b/bin/rowinterp_nullweight index ad09b1e..8fcdd59 100755 --- a/bin/rowinterp_nullweight +++ b/bin/rowinterp_nullweight @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.rowinterp_nullweight import RowInterpNullWeight diff --git a/bin/scale_flat b/bin/scale_flat index 522e2e6..301f963 100755 --- a/bin/scale_flat +++ b/bin/scale_flat @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.scale_flat import scale_flat diff --git a/bin/sky_combine b/bin/sky_combine index 2d7039c..3d29f53 100755 --- a/bin/sky_combine +++ b/bin/sky_combine @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.sky_combine import sky_combine diff --git a/bin/sky_compress b/bin/sky_compress index fec835f..a3a2c70 100755 --- a/bin/sky_compress +++ b/bin/sky_compress @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.sky_compress import sky_compress diff --git a/bin/sky_fit b/bin/sky_fit index b61d5de..c23cd46 100755 --- a/bin/sky_fit +++ b/bin/sky_fit @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.sky_fit import sky_fit diff --git a/bin/sky_pca b/bin/sky_pca index 338f539..22390e3 100755 --- a/bin/sky_pca +++ b/bin/sky_pca @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.sky_pca import sky_pca diff --git a/bin/sky_subtract b/bin/sky_subtract index 1f90f9b..48992a2 100755 --- a/bin/sky_subtract +++ b/bin/sky_subtract @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.sky_subtract import sky_subtract diff --git a/bin/sky_template b/bin/sky_template index 8e36d1d..be1453f 100644 --- a/bin/sky_template +++ b/bin/sky_template @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.sky_template import sky_template diff --git a/bin/starflat_correct b/bin/starflat_correct index 9bf5f7d..f50d023 100644 --- a/bin/starflat_correct +++ b/bin/starflat_correct @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from pixcorrect.starflat_correct import starflat_correct diff --git a/pixcorrect_test.build b/pixcorrect_test.build index 8c3fe50..043014c 100755 --- a/pixcorrect_test.build +++ b/pixcorrect_test.build @@ -8,12 +8,12 @@ export PRODUCT_DIR=$HOME/build-test/pixcorrect echo "Will Install to: $PRODUCT_DIR" source $EUPS_DIR/desdm_eups_setup.sh -setup -v scipy 0.14.0+7 -setup -v despyfits 0.5.1+2 +setup -v scipy 0.14.0+9 +setup -v despyfits 0.5.3+0 #setup -v -r ~/build-test/despyfits export PYTHONPATH=$PRODUCT_DIR/python:$PYTHONPATH -python setup.py install --prefix=$PRODUCT_DIR --install-lib=$PRODUCT_DIR/python +python3 setup.py install --prefix=$PRODUCT_DIR --install-lib=$PRODUCT_DIR/python echo " ----------------------------------------" echo " Make sure you setup pixcorrect by doing:" diff --git a/python/pixcorrect/PixCorrectDriver.py b/python/pixcorrect/PixCorrectDriver.py index 2423478..4c9365e 100644 --- a/python/pixcorrect/PixCorrectDriver.py +++ b/python/pixcorrect/PixCorrectDriver.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """Common code for single steps of pixcorrect-im """ @@ -8,12 +7,13 @@ import logging from os import path -from ConfigParser import SafeConfigParser, NoOptionError +from configparser import SafeConfigParser, NoOptionError from argparse import ArgumentParser import numpy as np from pixcorrect import corr_util + from pixcorrect.corr_util import logger from pixcorrect import proddir from despyfits.DESImage import DESImage, DESBPMImage, DESImageCStruct @@ -24,7 +24,7 @@ # interface functions # classes -class PixCorrectDriver(object): +class PixCorrectDriver: @classmethod def run(cls, config): @@ -34,7 +34,7 @@ def run(cls, config): - `config`: the configuration from which to get other parameters """ - raise NotImplemetedError + raise NotImplementedError @classmethod def add_step_args(cls, parser): @@ -47,22 +47,16 @@ def add_step_args(cls, parser): def common_parser(cls): """Generate a parser for a specific step """ - default_config = path.join(proddir, 'etc', cls.step_name+'.config') + default_config = path.join(proddir, 'etc', cls.step_name + '.config') # default_out_config = path.join(cls.step_name+'-as_run'+'.config') default_out_config = "" # Argument parser parser = ArgumentParser(description=cls.description) - parser.add_argument("config", default=default_config, nargs="?", - help="Configuration file filename") - parser.add_argument('-s', '--saveconfig', - default=default_out_config, - help="output config file") - parser.add_argument('-l', '--log', - default="", - help="the name of the logfile") - parser.add_argument('-v', '--verbose', action="count", - help="be verbose") + parser.add_argument("config", default=default_config, nargs="?", help="Configuration file filename") + parser.add_argument('-s', '--saveconfig', default=default_out_config, help="output config file") + parser.add_argument('-l', '--log', default="", help="the name of the logfile") + parser.add_argument('-v', '--verbose', action="count", help="be verbose") return parser @@ -81,10 +75,10 @@ def config(cls): """Return a configuration object for the step """ args = cls.parser().parse_args() - + # load configuration - config = SafeConfigParser() - config.read(args.config) + config = SafeConfigParser() + config.read(args.config) section = cls.step_name if not config.has_section(section): @@ -93,11 +87,11 @@ def config(cls): for argument, value in args._get_kwargs(): value = getattr(args, argument) if value is not None: - if type(value)==type([]): - value=value[0] + if isinstance(value, list): + value = value[0] config.set(section, argument, str(value)) - - if args.saveconfig is not None and len(args.saveconfig)>0: + + if args.saveconfig is not None and args.saveconfig: with open(args.saveconfig, 'w') as out_config: config.write(out_config) @@ -108,7 +102,8 @@ def main(cls): config, args = cls.config() # start logger - if args.log is not None and len(args.log)>0: + global logger + if args.log is not None and args.log: logging.basicConfig(filename=args.log, format="%(asctime)s %(levelname)s:\t%(message)s", level=logging.WARNING) @@ -119,13 +114,12 @@ def main(cls): logging.basicConfig(format="%(asctime)s %(levelname)s:\t%(message)s", level=logging.WARNING) - global logger logger = logging.getLogger() if args.verbose > 0: verbosity = logging.INFO if args.verbose==1 else logging.DEBUG logger.setLevel(verbosity) - + try: ret_val = cls.run(config) exit_status = 0 if ret_val is None else ret_val @@ -155,7 +149,7 @@ def __call__(cls): raise NotImplemetedError - # The step_run method unpacks parameters from config, and + # The step_run method unpacks parameters from config, and # calls __call__ to do the corrections. @classmethod def step_run(cls, config): @@ -183,14 +177,10 @@ def parser(cls): """ parser = cls.common_parser() - parser.add_argument('-i', '--in', - default=None, - help='input image file name') - parser.add_argument('-o', '--out', - default=None, - help='output image file name') + parser.add_argument('-i', '--in', default=None, help='input image file name') + parser.add_argument('-o', '--out', default=None, help='output image file name') """ - parser.add_argument('-n', '--ccdnum', nargs='?', + parser.add_argument('-n', '--ccdnum', nargs='?', type=int, help='input image CCD number') """ @@ -198,7 +188,7 @@ def parser(cls): return parser - # The step_run method unpacks parameters from config, and + # The step_run method unpacks parameters from config, and # calls __call__ to do the corrections. @classmethod def step_run(cls, image, config): @@ -223,12 +213,12 @@ def run(cls, config): except NoOptionError: image = DESImage.load(in_fname) - + ret_code = cls.step_run(image, config) out_fname = config.get(cls.step_name, 'out') image.save(out_fname) - + return ret_code class PixCorrectFPStep(PixCorrectStep): @@ -239,18 +229,14 @@ def parser(cls): """ parser = cls.common_parser() - parser.add_argument('-i', '--in', - default=None, - help='input image file name') - parser.add_argument('-o', '--out', - default=None, - help='output image file name') + parser.add_argument('-i', '--in', default=None, help='input image file name') + parser.add_argument('-o', '--out', default=None, help='output image file name') cls.add_step_args(parser) return parser - # The step_run method unpacks parameters from config, and + # The step_run method unpacks parameters from config, and # calls __call__ to do the corrections. @classmethod def step_run(cls, image, config): @@ -276,7 +262,7 @@ def run(cls, config): out_fname_template = config.get(cls.step_name, 'out') images.save(out_fname_template) - + return ret_code class PixCorrectMultistep(PixCorrectDriver): @@ -291,12 +277,8 @@ def parser(cls): """ parser = cls.common_parser() - parser.add_argument('-i', '--in', - default=None, - help='input image file name') - parser.add_argument('-o', '--out', - default=None, - help='output image file name') + parser.add_argument('-i', '--in', default=None, help='input image file name') + parser.add_argument('-o', '--out', default=None, help='output image file name') cls.add_step_args(parser) @@ -304,14 +286,13 @@ def parser(cls): @classmethod def run(cls, config): - config.set(cls.config_section, 'sci', - config.get(cls.config_section, 'in')) + config.set(cls.config_section, 'sci', config.get(cls.config_section, 'in')) pix_corrector = cls(config) ret_value = pix_corrector() return ret_value def image_data(self, fname): - raise NotImplemetedError + raise NotImplementedError def __getattr__(self, image_name): """Create a shortcut to images using object attributes @@ -326,7 +307,7 @@ def clean_im(self, image_name): """ if image_name in self._image_data: del self._image_data[image_name] - + def do_step(self, step_name): if not self.config.has_option(self.config_section, step_name): @@ -342,8 +323,6 @@ def do_step(self, step_name): # the step, and assume we want to perform the step return True - - # internal functions & classes # @@ -358,26 +337,25 @@ def filelist_to_list(input_file_list, column_used=0, delimeter=None, check_files -`delimeter': delimeter for parsing columns (default=None --> whitespace) -`check_files_exist': function will check that files exist before adding to list (default=True) -`append_missing_files': function appends files even if they are missing (default=False) - :Returns: list + :Returns: list """ - list_of_files=[] + list_of_files = [] try: - f_listfile=open(input_file_list,'r') + f_listfile = open(input_file_list, 'r') except: raise IOError("File not found. Missing input list %s " % input_file_list) for line in f_listfile: - line=line.strip() - if (delimeter is None): - columns=line.split() + line = line.strip() + if delimeter is None: + columns = line.split() else: - columns=line.split(delimeter) + columns = line.split(delimeter) FileExists=True - if (check_files_exist): - if (not(path.isfile(columns[column_used]))): - FileExists=False - if ((append_missing_files)or(FileExists)): + if check_files_exist: + if not path.isfile(columns[column_used]): + FileExists = False + if append_missing_files or FileExists: list_of_files.append(columns[column_used]) f_listfile.close() return(list_of_files) - diff --git a/python/pixcorrect/__init__.py b/python/pixcorrect/__init__.py index 7ff5f17..2d07cd2 100644 --- a/python/pixcorrect/__init__.py +++ b/python/pixcorrect/__init__.py @@ -1,5 +1,5 @@ __author__ = "Eric Neilsen" -__version__ = '0.5.0' +__version__ = '0.5.5' version = __version__ from os import environ proddir = environ['PIXCORRECT_DIR'] diff --git a/python/pixcorrect/add_weight.py b/python/pixcorrect/add_weight.py index 0f46877..8c5dcd8 100755 --- a/python/pixcorrect/add_weight.py +++ b/python/pixcorrect/add_weight.py @@ -1,15 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Gain Correct image (convert pixel values from ADU to electrons) """ -import ctypes -from os import path import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, items_must_match -from despyfits.DESImage import DESImage, DESImageCStruct, weight_dtype, section2slice +from despyfits.DESImage import DESImage, weight_dtype, section2slice from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo +from pixcorrect.corr_util import logger, items_must_match # Which section of the config file to read for this step config_section = 'addweight' @@ -20,14 +17,14 @@ class AddWeight(PixCorrectImStep): @classmethod def __call__(cls, image, dome): - """Add a weight plane + """Add a weight plane :Parameters: - - `image`: the DESImage for weight plane to be added + - `image`: the DESImage for weight plane to be added Applies "in place" """ - + logger.info('Adding Weight Image') if image.weight is None: @@ -38,28 +35,27 @@ def __call__(cls, image, dome): except: return 1 # Transform the sky image into a variance image - data=image.data - var = np.array(data, dtype = weight_dtype) + data = image.data + var = np.array(data, dtype=weight_dtype) for amp in decaminfo.amps: - sec = section2slice(image['DATASEC'+amp]) - invgain = (image['FLATMED'+amp]/image['GAIN'+amp]) / dome.data[sec] + sec = section2slice(image['DATASEC' + amp]) + invgain = (image['FLATMED' + amp] / image['GAIN' + amp]) / dome.data[sec] var[sec] += image['RDNOISE'+amp]**2 * invgain var[sec] *= invgain # Add noise from the dome flat shot noise, if present if dome.weight is not None: - var += data * data / (dome.weight*dome.data * dome.data) + var += data * data / (dome.weight * dome.data * dome.data) elif dome.variance is not None: var += data * data * dome.variance / (dome.data * dome.data) - image.weight = 1.0/var + image.weight = 1.0 / var logger.info('Finished building a weight plane') else: logger.info('Weight plane already present... skipping.') - ret_code=0 + ret_code = 0 return ret_code - @classmethod def step_run(cls, image, config): """Customized execution for addition of a weight plane. @@ -70,11 +66,11 @@ def step_run(cls, image, config): """ logger.info('Weight will be added to %s' % image) - + flat_fname = config.get(cls.step_name, 'flat') - logger.info('Reading flat correction from %s'% flat_fname) + logger.info('Reading flat correction from %s' % flat_fname) flat = DESImage.load(flat_fname) - ret_code = cls.__call__(image,flat) + ret_code = cls.__call__(image, flat) return ret_code @classmethod diff --git a/python/pixcorrect/bf_correct.py b/python/pixcorrect/bf_correct.py index 2153e3f..0571115 100644 --- a/python/pixcorrect/bf_correct.py +++ b/python/pixcorrect/bf_correct.py @@ -1,12 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Apply brighter-fatter correction to an image """ from os import path import numpy as np -from pixcorrect.corr_util import logger, do_once -from despyfits.DESImage import DESImage, section2slice, data_dtype +from despyfits.DESImage import section2slice from despyfits.maskbits import * +from pixcorrect.corr_util import logger, do_once from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo from pixcorrect.bfinfo import * @@ -19,7 +19,7 @@ class BFCorrect(PixCorrectImStep): step_name = config_section @classmethod - @do_once(1,'DESBFC') + @do_once(1, 'DESBFC') def __call__(cls, image, bffile, bfmask): """ Apply brighter-fatter correction to an image, and set BADPIX_SUSPECT bit @@ -43,16 +43,16 @@ def __call__(cls, image, bffile, bfmask): ignore = np.logical_or(np.isinf(image.data), np.isnan(image.data)) ignore = np.logical_or(ignore, (image.mask & bfmask) != 0) - + # Get a median sky level and replace bad pixels with it when deriving kernel # Also put image into electron units, if not already. - data = np.array(image.data) + data = np.array(image.data) for amp in decaminfo.amps: gain = image['GAIN'+amp] sec = section2slice(image['DATASEC'+amp]) if gain != 1: data[sec] *= gain - sky = np.median(data[sec][::4,::4]) + sky = np.median(data[sec][::4, ::4]) data[sec][ignore[sec]] = sky # Convolve data with R kernel to get right-hand pixel shifts @@ -60,54 +60,54 @@ def __call__(cls, image, bffile, bfmask): kernel = bf.kernelR(data.shape) shift = np.fft.irfft2(df * np.fft.rfft2(kernel)) # Multiply by border charge to get amount of charge to move. - charge = 0.5*(data[:,:-1] + data[:,1:])*shift[:,:-1] + charge = 0.5 * (data[:, :-1] + data[:, 1:]) * shift[:, :-1] # Do not shift charge into or out of bad pixels - charge[ignore[:,:-1]] = 0. - charge[ignore[:,1:]] = 0. + charge[ignore[:, :-1]] = 0. + charge[ignore[:, 1:]] = 0. # Adjust data for this shift out = np.array(image.data) for amp in decaminfo.amps: # Redo the temporary gain correction: gain = image['GAIN'+amp] - sec = section2slice(image['DATASEC'+amp]) + sec = section2slice(image['DATASEC' + amp]) if gain != 1: out[sec] *= gain - out[:,1:] -= charge - out[:,:-1] += charge + out[:, 1:] -= charge + out[:, :-1] += charge # Now do the upper-edge pixel shifts ??? Add gain factor here & T? kernel = bf.kernelT(data.shape) shift = np.fft.irfft2(df * np.fft.rfft2(kernel)) # Multiply by border charge to get amount of charge to move. - charge = 0.5*(data[:-1,:] + data[1:,:]) * shift[:-1,:] + charge = 0.5 * (data[:-1, :] + data[1:, :]) * shift[:-1, :] # Do not shift charge into or out of bad pixels - charge[ignore[:-1,:]] = 0. - charge[ignore[1:,:]] = 0. + charge[ignore[:-1, :]] = 0. + charge[ignore[1:, :]] = 0. # Adjust data for this shift - out[1:,:] -= charge - out[:-1,:] += charge + out[1:, :] -= charge + out[:-1, :] += charge # Undo the gain correction if we made it originally: for amp in decaminfo.amps: - gain = image['GAIN'+amp] - sec = section2slice(image['DATASEC'+amp]) + gain = image['GAIN' + amp] + sec = section2slice(image['DATASEC' + amp]) if gain != 1.: out[sec] /= gain image.data = out # Set the SUSPECT flag for all pixels that were adjacent to # ignored pixels, as their b/f correction is off - change_mask = np.zeros(image.mask.shape,dtype=bool) - change_mask[:-1,:] |= ignore[1:,:] # mask below - change_mask[1:,:] |= ignore[:-1,:] # mask above - change_mask[:,:-1] |= ignore[:,1:] # mask to left - change_mask[:,1:] |= ignore[:,:-1] # mask to right + change_mask = np.zeros(image.mask.shape, dtype=bool) + change_mask[:-1, :] |= ignore[1:, :] # mask below + change_mask[1:, :] |= ignore[:-1, :] # mask above + change_mask[:, :-1] |= ignore[:, 1:] # mask to left + change_mask[:, 1:] |= ignore[:, :-1] # mask to right change_mask[ignore] = False # Don't mask what's already bad image.mask[change_mask] |= BADPIX_SUSPECT image.write_key('BFCFIL', path.basename(bffile), comment='Brighter/fatter correction file') - + ret_code = 0 return ret_code @@ -125,8 +125,8 @@ def step_run(cls, image, config): else: bfmask = parse_badpix_mask(DEFAULT_BFMASK) - bffile = config.get(cls.step_name,'bffile') - + bffile = config.get(cls.step_name, 'bffile') + ret_code = cls.__call__(image, bffile, bfmask) return ret_code @@ -134,7 +134,7 @@ def step_run(cls, image, config): def add_step_args(cls, parser): """Add arguments specific application of the BPM """ - parser.add_argument('--bffile', + parser.add_argument('--bffile', help='B/F coefficients filename') parser.add_argument('--bfmask', default=DEFAULT_BFMASK, help='Bitmask for pixels to ignore in B/F correction') diff --git a/python/pixcorrect/bfinfo.py b/python/pixcorrect/bfinfo.py index e754d0e..2e1ab61 100644 --- a/python/pixcorrect/bfinfo.py +++ b/python/pixcorrect/bfinfo.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """ Data structure used to hold the brighter/fatter corrections """ @@ -21,15 +20,16 @@ class BFError(Exception): """ def __init__(self, value): self.value = value + super().__init__() def __str__(self): return repr(self.value) -class BFKernel(object): +class BFKernel: """ Class that holds the coefficients for brighter/fatter relation """ - + def __init__(self, bffile, detpos): """ Read brighter/fatter coefficients from a saved FITS file. The upper-right @@ -47,9 +47,7 @@ def __init__(self, bffile, detpos): if detpos + 'T' not in ff: raise BFError('Kernel table "' + detpos + 'T' + '" not in BF file ' + bffile) - self.aT = ff[detpos+'T'].read() - - return + self.aT = ff[detpos + 'T'].read() def kernelR(self, shape): """ @@ -58,40 +56,40 @@ def kernelR(self, shape): *right* edge of each pixel in the array. Pixels to left/below of the charge wrap around the array. """ - if shape[0] < 2*self.aR.shape[0]-1 or shape[1] < 2*self.aR.shape[1]+1: + if shape[0] < 2 * self.aR.shape[0] - 1 or shape[1] < 2 * self.aR.shape[1] + 1: raise BFError('Requested shape for kernelR is too small to hold the kernel') kernel = np.zeros(shape, dtype=data_dtype) ny, nx = self.aR.shape #r_kernel will contain the fraction of pixel to shift out of right edge - kernel[:ny,:nx] = self.aR + kernel[:ny, :nx] = self.aR # Replicate the other quadrants accordingly - kernel[-1:-ny:-1,:nx] = self.aR[1:,:] + kernel[-1:-ny:-1, :nx] = self.aR[1:, :] kernel[:ny, -1:-nx-1:-1] = -self.aR - kernel[-1:-ny:-1,-1:-nx-1:-1] = -self.aR[1:,:] + kernel[-1:-ny:-1, -1:-nx-1:-1] = -self.aR[1:, :] return kernel def kernelT(self, shape): """ Return an array of specified shape that has the full 2d kernel filled in. """ - if shape[0] < 2*self.aT.shape[0]+1 or shape[1] < 2*self.aT.shape[1]-1: + if shape[0] < 2 * self.aT.shape[0] + 1 or shape[1] < 2 * self.aT.shape[1] - 1: raise BFError('Requested shape for kernelT is too small to hold the kernel') kernel = np.zeros(shape, dtype=data_dtype) ny, nx = self.aT.shape - kernel[:ny,:nx] = self.aT + kernel[:ny, :nx] = self.aT # Replicate the other quadrants accordingly - kernel[:ny, -1:-nx:-1,] = self.aT[:,1:] + kernel[:ny, -1:-nx:-1,] = self.aT[:, 1:] kernel[-1:-ny-1:-1, :nx] = -self.aT - kernel[-1:-ny-1:-1, -1:-nx:-1] = -self.aT[:,1:] + kernel[-1:-ny-1:-1, -1:-nx:-1] = -self.aT[:, 1:] return kernel - + @classmethod def from_gruen(cls, gruenfile, outfile): """ Translate from Daniel's format into FITS format. """ fin = open(gruenfile) - fout = fitsio.FITS(outfile,'rw',clobber=True) + fout = fitsio.FITS(outfile, 'rw', clobber=True) for line in fin: fields = line.split() detpos = fields.pop(0).strip() @@ -99,23 +97,21 @@ def from_gruen(cls, gruenfile, outfile): raise BFError('Did not get "M" in Gruen-format BF file') nx = int(fields.pop(0)) ny = int(fields.pop(0)) - aR = np.zeros( (ny,nx), dtype=data_dtype) + aR = np.zeros((ny, nx), dtype=data_dtype) for ix in range(nx): for iy in range(ny): - aR[iy,ix] = np.float32(fields.pop(0)) + aR[iy, ix] = np.float32(fields.pop(0)) if fields.pop(0) != 'M': raise BFError('Did not get "M" in Gruen-format BF file') nx = int(fields.pop(0)) ny = int(fields.pop(0)) - aT = np.zeros( (ny,nx), dtype=data_dtype) + aT = np.zeros((ny, nx), dtype=data_dtype) for ix in range(nx): for iy in range(ny): - aT[iy,ix] = np.float32(fields.pop(0)) - if len(fields)!=0: + aT[iy, ix] = np.float32(fields.pop(0)) + if fields: raise BFError('Too much info in bf coefficient line for detpos ' + detpos) ## Axis swap...needed according to Daniel: - aR,aT = aT.transpose(), aR.transpose() - fout.write(aR, extname=detpos+'R') - fout.write(aT, extname=detpos+'T') - return - + aR, aT = aT.transpose(), aR.transpose() + fout.write(aR, extname=detpos + 'R') + fout.write(aT, extname=detpos + 'T') diff --git a/python/pixcorrect/bias_correct.py b/python/pixcorrect/bias_correct.py index 7a69af9..9f9af06 100755 --- a/python/pixcorrect/bias_correct.py +++ b/python/pixcorrect/bias_correct.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python -"""Apply a bias correction to a raw DES image +#!/usr/bin/env python3 +"""Apply a bias correction to a raw DES image """ from os import path -import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, do_once, items_must_match from despyfits.DESImage import DESImage +from pixcorrect.corr_util import logger, do_once, items_must_match from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo @@ -19,7 +17,7 @@ class BiasCorrect(PixCorrectImStep): step_name = config_section @classmethod - @do_once(1,'DESBIAS') + @do_once(1, 'DESBIAS') def __call__(cls, image, bias_im): """Apply a bias correction to an image @@ -30,7 +28,7 @@ def __call__(cls, image, bias_im): Applies the correction "in place." Also creates BAND and NITE keywords if they are not present. """ - + logger.info('Applying Bias') # Check that bias and data are from same CCD try: @@ -42,7 +40,7 @@ def __call__(cls, image, bias_im): if (image.weight is not None or image.variance is not None): if bias_im.weight is not None: var = image.get_variance() - var += 1./bias_im.weight + var += 1. / bias_im.weight elif bias_im.variance is not None: var = image.get_variance() var += bias_im.variance @@ -60,11 +58,10 @@ def __call__(cls, image, bias_im): image['NITE'] except: image['NITE'] = decaminfo.get_nite(image['DATE-OBS']) - + ret_code = 0 return ret_code - @classmethod def step_run(cls, image, config): """Customized execution for application of the Bias @@ -78,7 +75,7 @@ def step_run(cls, image, config): bias_fname = config.get(cls.step_name, 'bias') logger.info('reading Bias from %s'% bias_fname) bias_im = DESImage.load(bias_fname) - + ret_code = cls.__call__(image, bias_im) return ret_code diff --git a/python/pixcorrect/clippedMean.py b/python/pixcorrect/clippedMean.py index 3a593c0..78f2f27 100644 --- a/python/pixcorrect/clippedMean.py +++ b/python/pixcorrect/clippedMean.py @@ -20,60 +20,60 @@ def clippedMean(a, nSigma, axis=None, sigma=None, sigmaFloor=None, maxSample=Non n = number of unclipped points. Each is scalar if axis==None, or 1 lower dimension than input array. """ - if axis==None: + if axis is None: dataLength = len(a.flatten()) else: dataLength = a.shape[axis] - if maxSample==None or dataLength <= maxSample: + if maxSample is None or dataLength <= maxSample: sample = a else: # Subsample the data in deriving clipping limits - step = (dataLength-1)/maxSample + 1 - if axis==None: + step = (dataLength - 1) / maxSample + 1 + if axis is None: sample = a.flatten()[::step] else: s = [] - for i in range(0,axis): + for _ in range(0, axis): s.append(slice(None)) - s.append(slice(None,None,step)) - for i in range(axis+1,len(a.shape)): + s.append(slice(None, None, step)) + for i in range(axis + 1, len(a.shape)): s.append(slice(None)) sample = a[s] - - if sigma==None: + + if sigma is None: # Determine sigma from IQD iqdSigma = 1.349 p75 = np.percentile(sample, 75., axis=axis) p25 = np.percentile(sample, 25., axis=axis) - if sigmaFloor==None: + if sigmaFloor is None: # Use the IQD straight up: - upper = (0.5+nSigma/iqdSigma)*p75 + (0.5-nSigma/iqdSigma)*p25 - lower = (0.5-nSigma/iqdSigma)*p75 + (0.5+nSigma/iqdSigma)*p25 + upper = (0.5 + nSigma / iqdSigma) * p75 + (0.5 - nSigma / iqdSigma) * p25 + lower = (0.5 - nSigma / iqdSigma) * p75 + (0.5 + nSigma / iqdSigma) * p25 else: # set sigma as maximum of IQD and the floor - dev = nSigma*np.maximum( (p75-p25)/iqdSigma, sigmaFloor) - mid = 0.5*(p25+p75) - lower = mid-dev - upper = mid+dev + dev = nSigma * np.maximum((p75 - p25)/iqdSigma, sigmaFloor) + mid = 0.5 * (p25 + p75) + lower = mid - dev + upper = mid + dev else: # use prescribed sigma about median med = np.median(sample, axis=axis) - upper = med + nSigma*sigma - lower = med - nSigma*sigma + upper = med + nSigma * sigma + lower = med - nSigma * sigma - if axis==None: + if axis is None: data = ma.masked_outside(a, lower, upper) else: # Need to broadcast the upper and lower bounds across chosen axis - bshape = a.shape[:axis] + (1,) + a.shape[axis+1:] - mask = np.logical_or(a < lower.reshape(bshape), a>upper.reshape(bshape)) + bshape = a.shape[:axis] + (1,) + a.shape[axis + 1:] + mask = np.logical_or(a < lower.reshape(bshape), a > upper.reshape(bshape)) data = ma.masked_where(mask, a, copy=False) - if axis==None: + if axis is None: # Force return of double precision to use this for accumulating mean & avoid roundoff return data.mean(dtype=np.float64), data.var(dtype=np.float64), data.count() - else: - # A problem is that count() appears to return float data if it returns an array. - # Also need to force calculation of the mean and variance in double precision - return data.mean(axis=axis, dtype=np.float64).astype(np.float32), \ - data.var(axis=axis, dtype=np.float64).astype(np.float32), \ - data.count(axis=axis).astype(np.int32) + + # A problem is that count() appears to return float data if it returns an array. + # Also need to force calculation of the mean and variance in double precision + return data.mean(axis=axis, dtype=np.float64).astype(np.float32), \ + data.var(axis=axis, dtype=np.float64).astype(np.float32), \ + data.count(axis=axis).astype(np.int32) diff --git a/python/pixcorrect/coadd_nwgint.py b/python/pixcorrect/coadd_nwgint.py index 85cf1aa..a96c81c 100755 --- a/python/pixcorrect/coadd_nwgint.py +++ b/python/pixcorrect/coadd_nwgint.py @@ -1,4 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + +import time +import os +import sys + +import matplotlib.path +import fitsio +import numpy as np from pixcorrect.null_weights import null_weights from pixcorrect.row_zipper import row_zipper @@ -6,16 +14,15 @@ from pixcorrect.PixCorrectDriver import PixCorrectMultistep from despyastro.CCD_corners import update_DESDM_corners +from despyastro import wcsutil, astrometry import despyfits from despyfits.maskbits import parse_badpix_mask -from despyfits.DESImage import DESImage,update_hdr_compression,insert_eupspipe +from despyfits.DESImage import DESImage, update_hdr_compression, insert_eupspipe +from despyfits import updateWCS + from despymisc.miscutils import elapsed_time -from despyfits import updateWCS -import time -import fitsio -import numpy as np class CoaddZipperInterpNullWeight(PixCorrectMultistep): @@ -34,8 +41,8 @@ class CoaddZipperInterpNullWeight(PixCorrectMultistep): # Fix the step_name for passing the command-line arguments to the classes null_weights.__class__.step_name = config_section - row_zipper.__class__.step_name = config_section - + row_zipper.__class__.step_name = config_section + def __call__(self): """ Run row_zipper and null_weights in one step, we run the tasks @@ -44,165 +51,279 @@ def __call__(self): t0 = time.time() # Check if we want special multi-epoch weighting, and which bits we want to 'save' - me_wgt_keepmask = get_safe_boolean('me_wgt_keepmask',self.config,self.config_section) + me_wgt_keepmask = get_safe_boolean('me_wgt_keepmask', self.config, self.config_section) # Get verbose try: - verbose = self.config.get(self.config_section,'verbose') + verbose = self.config.get(self.config_section, 'verbose') except: verbose = False # Get the science image - input_image = self.config.get(self.config_section,'in') + input_image = self.config.get(self.config_section, 'in') self.sci = DESImage.load(input_image) + # In case a streak table is provided -- we proceed with the extra STREAK maskinh + streak_file = self.config.get(self.config_section, 'streak_file') + if os.path.exists(streak_file): + add_width = self.config.getfloat(self.config_section, 'add_width') + add_length = self.config.getfloat(self.config_section, 'add_length') + max_extrapolate = self.config.getfloat(self.config_section, 'max_extrapolate') + self.streakMask(streak_file, + addWidth=add_width, + addLength=add_length, + maxExtrapolate=max_extrapolate) + # Add TILENAME and TILEID to sci header (optional) if required self.update_sci_header(input_image) # Update the header wcs if both headfile and hdupcfg are present (optional) - self.update_wcs_header(input_image,verbose=verbose) - + self.update_wcs_header(input_image, verbose=verbose) + # Check if want to create the custon weight for SWArp/SExtractor combination if me_wgt_keepmask: self.custom_weight(input_image) - + # Run null_weights t1 = time.time() - logger.info("Running null_weights on: %s" % input_image) - null_weights.step_run(self.sci,self.config) - logger.info("Time NullWeights : %s" % elapsed_time(t1)) + logger.info("Running null_weights on: %s", input_image) + null_weights.step_run(self.sci, self.config) + logger.info("Time NullWeights : %s", elapsed_time(t1)) # Run row_zipper t2 = time.time() - logger.info("Running row_zipper on: %s" % input_image) - row_zipper.step_run(self.sci,self.config) - logger.info("Time ZipperInterp : %s" % elapsed_time(t2)) + logger.info("Running row_zipper on: %s", input_image) + row_zipper.step_run(self.sci, self.config) + logger.info("Time ZipperInterp : %s", elapsed_time(t2)) # Null the sci image only if null_mask_sci !=0 self.null_sci(input_image) - + output_image = self.config.get(self.config_section, 'out') # Special write out - if me_wgt_keepmask : + if me_wgt_keepmask: self.custom_write(output_image) else: self.sci.save(output_image) - - logger.info("Wrote new file: %s" % output_image) - logger.info("Time Total: %s" % elapsed_time(t0)) + + logger.info("Wrote new file: %s", output_image) + logger.info("Time Total: %s", elapsed_time(t0)) return 0 - def update_wcs_header(cls,input_image,verbose=False): + def update_wcs_header(self, input_image, verbose=False): # Get optional config file, first we try to get them as boolean, then as strings - headfile = get_safe_boolean('headfile',cls.config,cls.config_section) - hdupcfg = get_safe_boolean('hdupcfg',cls.config,cls.config_section) + headfile = get_safe_boolean('headfile', self.config, self.config_section) + hdupcfg = get_safe_boolean('hdupcfg', self.config, self.config_section) # Update the header if both headfile and hdupcfg are present if headfile and hdupcfg: - logger.info("Will update image header with scamp .head file %s" % headfile) - cls.sci = updateWCS.run_update(cls.sci,headfile=headfile,hdupcfg=hdupcfg,verbose=verbose) - - def update_sci_header(cls,input_image): - - tilename = get_safe_boolean('tilename',cls.config,cls.config_section) - tileid = get_safe_boolean('tileid',cls.config,cls.config_section) + logger.info("Will update image header with scamp .head file %s", headfile) + self.sci = updateWCS.run_update(self.sci, headfile=headfile, hdupcfg=hdupcfg, verbose=verbose) + + def update_sci_header(self, input_image): + tilename = get_safe_boolean('tilename', self.config, self.config_section) + tileid = get_safe_boolean('tileid', self.config, self.config_section) if tilename: - record={'name':'TILENAME', 'value':tilename, 'comment':'DES Tilename'} - cls.sci.header.add_record(record) + record = {'name': 'TILENAME', 'value': tilename, 'comment': 'DES Tilename'} + self.sci.header.add_record(record) if tileid: - record={'name':'TILEID', 'value':int(tileid), 'comment':'Tile ID for DES Tilename'} - cls.sci.header.add_record(record) + record = {'name': 'TILEID', 'value': int(tileid), 'comment': 'Tile ID for DES Tilename'} + self.sci.header.add_record(record) + + def null_sci(self, input_image): - def null_sci(cls, input_image): - - null_mask_sci = parse_badpix_mask( cls.config.get(cls.config_section, 'null_mask_sci') ) - if null_mask_sci !=0: + null_mask_sci = parse_badpix_mask(self.config.get(self.config_section, 'null_mask_sci')) + if null_mask_sci != 0: logger.info('Nulling science image from null_mask_bits') - kill = np.array(cls.sci.mask & null_mask_sci, dtype=bool) - cls.sci.data[kill] = 0.0 + kill = np.array(self.sci.mask & null_mask_sci, dtype=bool) + self.sci.data[kill] = 0.0 else: logger.info('Science image was not null') - return - - def custom_weight(cls,input_image): + def custom_weight(self, input_image): # Make custom weight, that will not zero STAR maskbit - logger.info("Will perform special weighting for multi-epoch input on %s" % input_image) + logger.info("Will perform special weighting for multi-epoch input on %s", input_image) # Make a copy of the original untouched weight - cls.sci.weight_custom = np.copy(cls.sci.weight) - null_mask = cls.config.get(cls.config_section, 'null_mask') - me_wgt_keepmask = cls.config.get(cls.config_section, 'me_wgt_keepmask') + self.sci.weight_custom = np.copy(self.sci.weight) + null_mask = self.config.get(self.config_section, 'null_mask') + me_wgt_keepmask = self.config.get(self.config_section, 'me_wgt_keepmask') # Make python lists of the coma-separated input lists null_list = null_mask.split(',') - keep_list = me_wgt_keepmask.split(',') + keep_list = me_wgt_keepmask.split(',') # Special case we care: - # . we are nulling the TRAIL but want keep where STAR + # . we are nulling the TRAIL but want keep where STAR if 'TRAIL' in null_list and 'STAR' in keep_list and 'TRAIL' not in keep_list: # Remove STAR from the list - if 'STAR' in null_list: null_list.remove('STAR') + if 'STAR' in null_list: + null_list.remove('STAR') null_mask_bits = parse_badpix_mask(','.join(null_list)) # Null each plane at a time. First the TRAILS and replace with STAR - kill = np.array(cls.sci.mask & parse_badpix_mask('TRAIL'), dtype=bool) - stars = np.array(cls.sci.mask & parse_badpix_mask('STAR'), dtype=bool) - cls.sci.weight_custom[kill] = 0.0 - cls.sci.weight_custom[stars] = np.copy(cls.sci.weight[stars]) + kill = np.array(self.sci.mask & parse_badpix_mask('TRAIL'), dtype=bool) + stars = np.array(self.sci.mask & parse_badpix_mask('STAR'), dtype=bool) + self.sci.weight_custom[kill] = 0.0 + self.sci.weight_custom[stars] = np.copy(self.sci.weight[stars]) # Loop over the bitplanes, but skipping TRAIL, which we already did null_list.remove('TRAIL') for bitplane in null_list: - kill = np.array(cls.sci.mask & parse_badpix_mask(bitplane), dtype=bool) - cls.sci.weight_custom[kill] = 0.0 + kill = np.array(self.sci.mask & parse_badpix_mask(bitplane), dtype=bool) + self.sci.weight_custom[kill] = 0.0 # We remove tham from the null_list else: for bitplane in me_wgt_keepmask.split(','): - if bitplane in null_list: null_list.remove(bitplane) + if bitplane in null_list: + null_list.remove(bitplane) null_mask_bits = parse_badpix_mask(','.join(null_list)) - kill = np.array( cls.sci.mask & null_mask_bits, dtype=bool) - cls.sci.weight_custom[kill] = 0.0 - - def custom_write(cls,output_image): + kill = np.array(self.sci.mask & null_mask_bits, dtype=bool) + self.sci.weight_custom[kill] = 0.0 + + def custom_write(self, output_image): # Write out the image using fitsio, but skipping the mask as we won't need it. - ofits = fitsio.FITS(output_image,'rw',clobber=True) + ofits = fitsio.FITS(output_image, 'rw', clobber=True) # Here we mimick the steps followed by DESImage.save() # SCI logger.info("Creating SCI HDU and relevant FZ*/DES_EXT/EXTNAME keywords") - cls.sci.header = update_hdr_compression(cls.sci.header,'SCI') + self.sci.header = update_hdr_compression(self.sci.header, 'SCI') logger.info("Calculating CCD corners/center/extern keywords for SCI HDU ") - cls.sci.header = update_DESDM_corners(cls.sci.header,get_extent=True, verb=False) + self.sci.header = update_DESDM_corners(self.sci.header, get_extent=True, verb=False) if despyfits.DESImage.pipekeys_write: logger.info("Inserting EUPS PIPEPROD and PIPEVER to SCI HDU") - cls.sci.header = insert_eupspipe(cls.sci.header) - ofits.write(cls.sci.data, extname='SCI', header=cls.sci.header) + self.sci.header = insert_eupspipe(self.sci.header) + ofits.write(self.sci.data, extname='SCI', header=self.sci.header) # WGT logger.info("Creating WGT HDU and relevant FZ*/DES_EXT/EXTNAME keywords") - cls.sci.weight_hdr = update_hdr_compression(cls.sci.weight_hdr,'WGT') - ofits.write(cls.sci.weight,extname='WGT',header=cls.sci.weight_hdr) - # WGT_ME + self.sci.weight_hdr = update_hdr_compression(self.sci.weight_hdr, 'WGT') + ofits.write(self.sci.weight, extname='WGT', header=self.sci.weight_hdr) + # WGT_ME # For WGT_ME we do not need to update the FZ keywords, as we use the same hdr as WGT logger.info("Creating WGT_ME HDU") - ofits.write(cls.sci.weight_custom,extname='WGT_ME',header=cls.sci.weight_hdr) + ofits.write(self.sci.weight_custom, extname='WGT_ME', header=self.sci.weight_hdr) # MSK logger.info("Creating MSK HDU and relevant FZ*/DES_EXT/EXTNAME keywords") - cls.sci.mask_hdr = update_hdr_compression(cls.sci.mask_hdr,'MSK') - ofits.write(cls.sci.mask,extname='MSK',header=cls.sci.mask_hdr) + self.sci.mask_hdr = update_hdr_compression(self.sci.mask_hdr, 'MSK') + ofits.write(self.sci.mask, extname='MSK', header=self.sci.mask_hdr) ofits.close() + + def streakMask(self, streak_file, addWidth=0., addLength=100., maxExtrapolate=0): + ''' + Produce a list of pixels in the image that should be masked for + streaks in the input table. streaktab is the output table of new + streaks to add image is a FITS HDU, with header and image data + addWidth is additional number of pixels to add to half-width + addLength is length added to each end of streak (pixels) + + Returns: + ypix, xpix: 1d arrays with indices of affected pixels + nStreaks: number of new streaks masked + ''' + + # Read the streaks table first + try: + tab = fitsio.FITS(streak_file) + streaktab = tab[1].read() + except: + logger.error('Could not read streak file {:s}'.format(streak_file)) + sys.exit(1) + + image_header = self.sci.header + image_data = self.sci.data + # Pixscale in degrees + pixscale = astrometry.get_pixelscale(image_header, units='arcsec') / 3600. + shape = image_data.shape + + # # Due to a bug in fitsio 1.0.0rc1+0, we need to clean up the + # # header before feeding it to wcsutil and remove the 'None' and other problematic items + # for k in image_header: + # # Try to access the item, if failed we hace to remove it + # try: + # item = image_header[k] + # except: + # logger.info("Removing keyword: {:s} from header".format(k)) + # image_header.delete(k) + + w = wcsutil.WCS(image_header) + + # WE NEED TO UPDATE THIS WHEN THE TABLE IS PER EXPNUM + use = np.logical_and(streaktab['expnum'] == image_header['EXPNUM'], + streaktab['ccdnum'] == image_header['CCDNUM']) + logger.info('{:d} streaks found to mask'.format(np.count_nonzero(use))) + + nStreaks = 0 + inside = None + + + for row in streaktab[use]: + if maxExtrapolate > 0: + if row['extrapolated'] and row['nearest'] > maxExtrapolate: + logger.info('Skipping extrapolated streak') + continue + width = row['width'] + ra = np.array((row['ra1'], row['ra2'])) + dec = np.array((row['dec1'], row['dec2'])) + x, y = w.sky2image(ra, dec) + + x1, x2, y1, y2 = x[0], x[1], y[0], y[1] + + # Slope of the line, cos/sin form + mx = (x2 - x1) / np.hypot(x2 - x1, y2 -y1) + my = (y2 - y1) / np.hypot(x2 - x1, y2 -y1) + + #displacement for width of streak: + wx = width / pixscale + addWidth + wy = wx * mx + wx = wx * -my + + # grow length + x1 -= addLength * mx + x2 += addLength * mx + y1 -= addLength * my + y2 += addLength * my + + # From Alex's immask routine: mark interior pixels + vertices = [(x1 + wx, y1 + wy), (x2 + wx, y2 + wy), (x2 - wx, y2 - wy), (x1 - wx, y1 - wy)] + vertices.append(vertices[0]) # Close the path + + if inside is None: + # Set up coordinate arrays + yy, xx = np.indices(shape) + points = np.vstack((xx.flatten(), yy.flatten())).T + path = matplotlib.path.Path(vertices) + inside = path.contains_points(points) + else: + # use logical_and for additional streaks + path = matplotlib.path.Path(vertices) + inside = np.logical_or(inside, path.contains_points(points)) + + nStreaks = nStreaks + 1 + + logger.info('Masked {:d} new streaks'.format(nStreaks)) + + # Make the list of masked pixels + if inside is None: + ymask, xmask = np.array(0, dtype=int), np.array(0, dtype=int) + else: + ymask, xmask = np.nonzero(inside.reshape(shape)) + + logger.info('Setting bits in MSK image for STREAK: {:d}'.format(parse_badpix_mask('STREAK'))) + self.sci.mask[ymask, xmask] |= parse_badpix_mask('STREAK') + @classmethod def add_step_args(cls, parser): """Add arguments for null_weights and row_zipper """ null_weights.add_step_args(parser) row_zipper.add_step_args(parser) - #parser.add_argument('--custom_weight', action='store_true',default=cls.DEFAULT_CUSTOM_WEIGHT, + #parser.add_argument('--custom_weight', action='store_true',default=self.DEFAULT_CUSTOM_WEIGHT, # help='Run custom weights for STAR and do not write MSK plane for multi-epoch (me)') - parser.add_argument('--me_wgt_keepmask', action='store',default=cls.DEFAULT_ME_WGT_KEEPMASK, + parser.add_argument('--me_wgt_keepmask', action='store', default=cls.DEFAULT_ME_WGT_KEEPMASK, help='Run custom weight for multi-epoch (me) WGT_ME and preserve KEEPMASK') - parser.add_argument('--null_mask_sci', action='store',default=cls.DEFAULT_NULL_MASK_SCI, + parser.add_argument('--null_mask_sci', action='store', default=cls.DEFAULT_NULL_MASK_SCI, help='Names of mask bits to null (or an integer mask) on the SCI plane') parser.add_argument('--headfile', action='store', default=cls.DEFAULT_HEADFILE, help='Headfile (containing most update information)') @@ -211,16 +332,24 @@ def add_step_args(cls, parser): parser.add_argument('--tilename', action='store', default=cls.DEFAULT_TILENAME, help='Add (optional) TILENAME to SCI header') parser.add_argument('--tileid', action='store', type=int, default=cls.DEFAULT_TILEID, - help='Add (optional) TILENAME to SCI header') - return + help='Add (optional) TILEID to SCI header') + # Options for the extra streak maskig + parser.add_argument('--streak_file', action='store', type=str, default='', + help='Streak table file path') + parser.add_argument('--add_width', action='store', type=float, default=0., + help='Broaden streak width by this value (pixels)') + parser.add_argument('--add_length', action='store', type=float, default=100., + help='Extend streak endpoints by this value (pixels)') + parser.add_argument('--max_extrapolate', action='store', default=0.0, type=float, + help='Do not use streaks extrapolated more than this many degrees') -def get_safe_boolean(name,config,config_section): +def get_safe_boolean(name, config, config_section): """ Get boolean first and if fail, get from config""" try: - param = config.getboolean(config_section,name) + param = config.getboolean(config_section, name) except: - param = config.get(config_section,name) + param = config.get(config_section, name) return param if __name__ == '__main__': diff --git a/python/pixcorrect/coadd_prepare.py b/python/pixcorrect/coadd_prepare.py index 4e2dbfc..d159beb 100644 --- a/python/pixcorrect/coadd_prepare.py +++ b/python/pixcorrect/coadd_prepare.py @@ -1,17 +1,15 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ """ -from os import path import sys -import numpy as np import time +import logging +from argparse import ArgumentParser import fitsio +import numpy as np from pixcorrect.corr_util import logger -from despyfits import maskbits -import logging -from argparse import ArgumentParser class CoaddPrepare: description = "Prepare a coadd tile image for SExtractor by interpolating vertically across" \ @@ -25,68 +23,71 @@ class CoaddPrepare: @classmethod def main(cls): + global logger description = "Prepare coadd file for SExtractor by interpolating across low-weight pixels," \ " and adding a mask plane that flags the interpolated pixels" - + # Get arguments parser = ArgumentParser(description=description) - parser.add_argument('-l', '--log', - default="", - help="the name of the logfile") - parser.add_argument('-v', '--verbose', action="count", - help="be verbose") - parser.add_argument('-i', '--infile', - default=None, - help='input coadd image file name') - parser.add_argument('-o', '--outfile', - default=None, - help='output coadd image file name') + parser.add_argument('-l', '--log', + default="", + help="the name of the logfile") + parser.add_argument('-v', '--verbose', action="count", + help="be verbose") + parser.add_argument('-i', '--infile', + default=None, + help='input coadd image file name') + parser.add_argument('-o', '--outfile', + default=None, + help='output coadd image file name') parser.add_argument('--min_cols', - default=None, - help='minimum height of region to interpolate') + default=None, + help='minimum height of region to interpolate') parser.add_argument('--max_cols', - default=None, - help='maximum height of region to interpolate') + default=None, + help='maximum height of region to interpolate') parser.add_argument('--weight_threshold', - default=CoaddPrepare.DEFAULT_WEIGHT_THRESHOLD, - help='Maximum weight value to interpolate over') + default=CoaddPrepare.DEFAULT_WEIGHT_THRESHOLD, + help='Maximum weight value to interpolate over') parser.add_argument('--weight_value', - default=CoaddPrepare.DEFAULT_WEIGHT_VALUE, - help='Weight value assigned to interpolated pixels, <0 to use neighbors') - + default=CoaddPrepare.DEFAULT_WEIGHT_VALUE, + help='Weight value assigned to interpolated pixels, <0 to use neighbors') + args = parser.parse_args() - + # Set up logger - if args.log is not None and len(args.log)>0: + if args.log is not None and args.log: logging.basicConfig(filename=args.log, - format="%(asctime)s %(levelname)s:\t%(message)s", - level=logging.WARNING) + format="%(asctime)s %(levelname)s:\t%(message)s", + level=logging.WARNING) sh = logging.StreamHandler() sh.setFormatter(logging.Formatter("%(asctime)s %(levelname)s:\t%(message)s")) + logger = logging.getLogger() + logger.addHandler(sh) else: logging.basicConfig(format="%(asctime)s %(levelname)s:\t%(message)s", - level=logging.WARNING) + level=logging.WARNING) - logger = logging.getLogger() + logger = logging.getLogger() if args.verbose > 0: - verbosity = logging.INFO if args.verbose==1 else logging.DEBUG + verbosity = logging.INFO if args.verbose == 1 else logging.DEBUG logger.setLevel(verbosity) # Call routine ret_val = coadd_prepare(args.infile, args.outfile, - min_cols=args.min_cols, - max_cols=args.max_cols, - weight_threshold = float(args.weight_threshold), - weight_value = float(args.weight_value)) + min_cols=args.min_cols, + max_cols=args.max_cols, + weight_threshold=float(args.weight_threshold), + weight_value=float(args.weight_value)) sys.exit(ret_val) - + @classmethod def __call__(cls, imageIn, imageOut, min_cols=DEFAULT_MINCOLS, max_cols=DEFAULT_MAXCOLS, - weight_threshold = DEFAULT_WEIGHT_THRESHOLD, - weight_value = DEFAULT_WEIGHT_VALUE): + weight_threshold=DEFAULT_WEIGHT_THRESHOLD, + weight_value=DEFAULT_WEIGHT_VALUE): """ Add a mask plane to imageIn and set bits wherever the weight value is <= given threshold. Then interpolate the data plane along columns to replace masked pixels. @@ -101,71 +102,71 @@ def __call__(cls, imageIn, imageOut, - `weight_threshold`: Upper bound for weight values to mark as bad - `weight_value`: New weight value for bad pixels, enter <0 to use neighbor weights """ - + logger.info('Preparing coadd {:s} for SExtractor'.format(imageIn)) # Read weight plane and science plane - sci,scihdr = fitsio.read(imageIn, ext=0, header=True) - wt,wthdr = fitsio.read(imageIn, ext=1, header=True) + sci, scihdr = fitsio.read(imageIn, ext=0, header=True) + wt, wthdr = fitsio.read(imageIn, ext=1, header=True) # Make mask plane mask = wt <= float(weight_threshold) # Identify column runs to interpolate, start by marking beginnings of runs work = np.array(mask) - work[1:,:] = np.logical_and(mask[1:,:], ~mask[:-1,:]) - xstart,ystart = np.where(work.T) + work[1:, :] = np.logical_and(mask[1:, :], ~mask[:-1, :]) + xstart, ystart = np.where(work.T) # Now ends of runs work = np.array(mask) - work[:-1,:] = np.logical_and(mask[:-1,:], ~mask[1:,:]) + work[:-1, :] = np.logical_and(mask[:-1, :], ~mask[1:, :]) xend, yend = np.where(work.T) yend = yend + 1 # Make the value one-past-end # If we've done this correctly, every run has a start and an end, on same col - if not np.all(xstart==xend): + if not np.all(xstart == xend): logger.error("Logic problem, xstart and xend not equal.") - print xstart,xend ### + print(xstart, xend)### return 1 # Narrow our list to runs of the desired length range and # not touching the edges - use = yend-ystart >= min_cols + use = yend - ystart >= min_cols if max_cols is not None: - use = np.logical_and(yend-ystart<=max_cols, use) - use = np.logical_and(ystart>0, use) - use = np.logical_and(yend 0, use) + use = np.logical_and(yend < mask.shape[0], use) ystart = ystart[use] yend = yend[use] xstart = xstart[use] # Assign mean of top and bottom to runs, and fill in weight plane for run in range(len(xstart)): - sci[ystart[run]:yend[run],xstart[run]] = \ - 0.5*(sci[ystart[run]-1,xstart[run]] + - sci[yend[run],xstart[run]]) - if weight_value<0: - fill_weight = 0.5*(wt[ystart[run]-1,xstart[run]] \ - + wt[yend[run],xstart[run]]) + sci[ystart[run]:yend[run], xstart[run]] = \ + 0.5 * (sci[ystart[run] - 1, xstart[run]] + + sci[yend[run], xstart[run]]) + if weight_value < 0: + fill_weight = 0.5 * (wt[ystart[run] - 1, xstart[run]] \ + + wt[yend[run], xstart[run]]) else: fill_weight = weight_value - wt[ystart[run]:yend[run],xstart[run]] = fill_weight + wt[ystart[run]:yend[run], xstart[run]] = fill_weight # Add to image history - scihdr['HISTORY'] =time.asctime(time.localtime()) + \ + scihdr['HISTORY'] = time.asctime(time.localtime()) + \ ' coadd_prepare with weight threshold {:f}'.format(weight_threshold) - + # Write out all three planes - mask = np.array(mask, dtype=np.int16)*cls.MASK_VALUE + mask = np.array(mask, dtype=np.int16) * cls.MASK_VALUE logger.debug('Writing output images') with fitsio.FITS(imageOut, mode=fitsio.READWRITE, clobber=True) as ff: ff.write(sci, extname='SCI', header=scihdr, clobber=True) ff.write(mask, extname='MSK') ff.write(wt, extname='WGT', header=wthdr) - + logger.debug('Finished coadd_prepare') - ret_code=0 + ret_code = 0 return ret_code diff --git a/python/pixcorrect/connect_streaks.py b/python/pixcorrect/connect_streaks.py new file mode 100755 index 0000000..6bc4ce0 --- /dev/null +++ b/python/pixcorrect/connect_streaks.py @@ -0,0 +1,948 @@ +#!/usr/bin/env python3 + +# $Id: connect_streaks.py 47132 2018-06-13 19:05:15Z rgruendl $ +# $Rev:: 47132 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2018-06-13 14:05:15 #$: # Date of last commit. +# +# Note original code was developed by Gary Bernstein... +# + +""" +Read streak tables, determine candidate missed streaks, add them to masks +""" + +import re +import time +import shutil +import numpy as np +from scipy.optimize import newton +import matplotlib +matplotlib.use('pdf') +import matplotlib.pyplot as pl + +import fitsio +from despyastro import wcsutil +from despyfits.maskbits import parse_badpix_mask +from despyfits.DESImage import DESImage +from pixcorrect.PixCorrectDriver import PixCorrectDriver, filelist_to_list +from pixcorrect.corr_util import logger + +# Which section of the config file to read for this step +config_section = 'connect_streaks' + +# Utility routines for this task: +def gnomonic(ra, dec, ra0, dec0): + # ra, dec are the target points + # ra0, dec0 are the projection centers + # All are given in degrees + # returns x, y also in degrees, with + # x to east and y to north + dtor = np.pi / 180. + cd = np.cos(dec * dtor) + cd0 = np.cos(dec0 * dtor) + sd = np.sin(dec * dtor) + sd0 = np.sin(dec0 * dtor) + cr = np.cos((ra - ra0) * dtor) + sr = np.sin((ra - ra0) * dtor) + x = (cd * sr) / (sd0 * sd + cd0 * cd * cr) / dtor + y = (cd0 * sd - sd0 * cd * cr) / (sd0 * sd + cd0 * cd * cr) / dtor + return x, y + +# And its inverse +def gnomonicInverse(x, y, ra0, dec0): + # take location (x,y) in + # gnomonic projection about ra0,dec0 + # and return ra,dec. + # Everything in degrees. + dtor = np.pi / 180. + rho = np.hypot(x, y) * dtor + c = np.arctan(rho) + cd0 = np.cos(dec0 * dtor) + sd0 = np.sin(dec0 * dtor) + dec = np.arcsin(np.cos(c) * sd0 + y * dtor * np.sin(c) * cd0 / rho) / dtor + ra = np.arctan(x * dtor * np.sin(c) / (rho * cd0 * np.cos(c) - y * dtor * sd0 * np.sin(c))) / dtor + ra0 + return ra, dec + +def boxCross(x0, y0, mx_in, my_in, xmin, xmax, ymin, ymax): + # Function that determines what (if any) parts of the + # lines defined by (x-x0)*my = (y-y0)*mx cross + # the rectangle bounded by xmin0. If mx==0, + # special handling needed. + ss = np.sign(mx_in) + mx = mx_in * ss + my = np.where(ss < 0, -my_in, my_in) + + # Find the two points where line crosses x=xmin,x=xmax + xcross1 = np.where(ss == 0, x0, xmin) + ycross1 = np.where(ss == 0, ymin, y0 + my * (xmin - x0) / mx) + xcross2 = np.where(ss == 0, x0, xmax) + ycross2 = np.where(ss == 0, ymax, y0 + my * (xmax - x0) / mx) + + # No intersection if y does not enter range at all + miss = np.logical_or(np.logical_and(ycross1 < ymin, ycross2 < ymin), + np.logical_and(ycross1 > ymax, ycross2 > ymax)) + + # or if it's a vertical line and out of x range + miss = np.logical_or(miss, np.logical_and(ss == 0, x0 < xmin)) + miss = np.logical_or(miss, np.logical_and(ss == 0, x0 > xmax)) + # or if it's a horizontal line and out of y range + miss = np.logical_or(miss, np.logical_and(my == 0, y0 < ymin)) + miss = np.logical_or(miss, np.logical_and(my == 0, y0 > ymax)) + + # Pull crossing points to be within [ymin,ymax] + fix = ycross1 > ymax + xcross1 = np.where(fix, x0 + (ymax - y0) * mx / my, xcross1) + ycross1 = np.where(fix, ymax, ycross1) + fix = ycross2 > ymax + xcross2 = np.where(fix, x0 + (ymax - y0) * mx / my, xcross2) + ycross2 = np.where(fix, ymax, ycross2) + fix = ycross1 < ymin + xcross1 = np.where(fix, x0 + (ymin - y0) * mx / my, xcross1) + ycross1 = np.where(fix, ymin, ycross1) + fix = ycross2 < ymin + xcross2 = np.where(fix, x0 + (ymin - y0) * mx / my, xcross2) + ycross2 = np.where(fix, ymin, ycross2) + + # Backfill non-crossers + xcross1[miss] = 0 + xcross2[miss] = -1. + ycross1[miss] = 0 + ycross2[miss] = 0 + + # Special case for valid horizontal lines?? (my==0) + fix = np.logical_and(my == 0, y0 > ymin) + fix = np.logical_and(fix, y0 < ymax) + xcross1 = np.where(fix, xmin, xcross1) + xcross2 = np.where(fix, xmax, xcross2) + ycross1 = np.where(fix, y0, ycross1) + ycross2 = np.where(fix, y0, ycross2) + return xcross1, xcross2, ycross1, ycross2 + +class Line: + ''' + Class representing the line connecting two points. + Used to establish a new (t,u) coordinate system + where t is along the line and u is perpendicular distance. + Inputs to all functions can be scalars or numpy arrays. + ''' + def __init__(self, x1, y1, x2, y2): + # Define line connecting (x1,y1) to (x2,y2) + self.x0 = 0.5 * (x1 + x2) + self.y0 = 0.5 * (y1 + y2) + dx = x2 - x1 + dy = y2 - y1 + self.mx = dx / np.hypot(dx, dy) + self.my = dy / np.hypot(dx, dy) + + def xy2tu(self, x, y): + # Transform x,y coordinates to the (t,u) system defined by line + dx = x - self.x0 + dy = y - self.y0 + return self.mx * dx + self.my * dy, -self.my * dx + self.mx * dy + + def tu2xy(self, t, u): + # Transform t,u coordinates in the line's system into x,y. + dx = self.mx * t - self.my * u + dy = self.my * t + self.mx * u + return dx + self.x0, dy + self.y0 + + def ux2t(self, u, x): + # Solve for the t value where u and x are given + return (self.my * u + (x - self.x0)) / self.mx + + def uy2t(self, u, y): + # Solve for the t value where u and y are given + return (-self.mx * u + (y - self.y0)) / self.my + +def boxTrack(line, w, xmin, xmax, ymin, ymax): + ''' Determine the corners of the minimal rectangle centered + on the line and having width 2w that contains all possible points + rectangle bounded by [xy][min|max]. + Return value will be 4x2 array of these corners' xy coords. They + will all be zero if there is no intersection. + ''' + xbox = np.array([xmin, xmax, xmax, xmin, xmin]) + ybox = np.array([ymin, ymin, ymax, ymax, ymin]) + xVaries = (True, False, True, False) # which coord varies along edge? + fixedValue = (ymin, xmax, ymax, xmin) # what is value of fixed x or y? + t, u = line.xy2tu(xbox, ybox) + hw = 0.5 * w + # Make an array which is +1,0,-1 as the corner is + # above, inside, or below the track of width w + corner_state = np.where(u > hw, 1, 0) + corner_state = np.where(u < hw, -1, corner_state) + + # Analyze each corner and edge of the box + # to find t values of all crossings of track + # with box edges, or of box corners in the track + + # All corners within track are possible extrema + t_extremes = t[corner_state == 0].tolist() + + for corner in range(4): + state1 = corner_state[corner] + state2 = corner_state[corner + 1] + if (state1 > 0 and state2 <= 0) or (state2 > 0 and state1 <= 0): + # There should be an upper crossing on this segment + if xVaries[corner]: + t_extremes.append(line.uy2t(+hw, fixedValue[corner])) + else: + t_extremes.append(line.ux2t(+hw, fixedValue[corner])) + if (state1 < 0 and state2 >= 0) or (state2 < 0 and state1 >= 0): + # There should be a lower crossing on this segment + if xVaries[corner]: + t_extremes.append(line.uy2t(-hw, fixedValue[corner])) + else: + t_extremes.append(line.ux2t(-hw, fixedValue[corner])) + # Now the limits of t for the rectangle will be min and max of box + out = np.zeros((4, 2), dtype=float) + if t_extremes: + # Only do this if there is any overlap + tmin = np.min(t_extremes) + tmax = np.max(t_extremes) + rectangle_t = np.array([tmin, tmin, tmax, tmax]) + rectangle_u = np.array([-hw, +hw, +hw, -hw]) # ?? backwards ?? + rectangle_x, rectangle_y = line.tu2xy(rectangle_t, rectangle_u) + out[:, 0] = rectangle_x + out[:, 1] = rectangle_y + return out + +def friends(xc1, xc2, yc1, yc2, mx, my, ccdnum, i, j, max_sine=0.02): + # Determine whether the two streaks + # pass within tolerance of each other's centers + + # Same-CCD streaks cannot be friends + # (Though they may get linked if both friends of + # a streak on another CCD.) + if ccdnum[i] == ccdnum[j]: + return False + + xi = (xc1[i] + xc2[i]) * 0.5 + xj = (xc1[j] + xc2[j]) * 0.5 + yi = (yc1[i] + yc2[i]) * 0.5 + yj = (yc1[j] + yc2[j]) * 0.5 + + di = (xi - xj) * my[j] - (yi - yj) * mx[j] + dj = (xj - xi) * my[i] - (yj - yi) * mx[i] + + # This criterion is that the sine of angle between the + # center-to-center line and each streak is < max_sine + dij = np.hypot(xi - xj, yi - yj) + return max(np.abs(di), np.abs(dj)) < max_sine * dij + + +class ConnectStreaks(PixCorrectDriver): + description = "Predict missed streak detections and mask them" + step_name = config_section + + DEFAULT_STREAK_NAME_IN = 'streak' + DEFAULT_STREAK_NAME_OUT = 'streak2' + DEFAULT_IMAGE_NAME_IN = 'hmmasked' + DEFAULT_IMAGE_NAME_OUT = 'immasked' + DEFAULT_ADD_WIDTH = 0 + DEFAULT_MAX_EXTRAPOLATE = 1.1 * np.hypot(2048., 4096.) * 0.263 / 3600 # A bit more than 1 CCD diagonal + + @classmethod + def __call__(cls, streak_list, image_list, + streak_name_in, streak_name_out, + image_name_in, image_name_out, + add_width, max_extrapolate, + plotfile=None): + + """ + Read input list of streak detections and predict where a streak + crossed a CCD but was missed. Then create new copies of images, + altering masks to set STREAK bit in new streaks. + + :Parameters: + - `streak_list`: list of input streak file names + - `image_list`: list of names of image files to be updated + - `streak_name_in`: string to replace in input streak filenames + - `streak_name_out`: replacement string for output streak filenames + - `image_name_in`: string to replace in input image filenames + - `image_name_out`: replacement string for output image filenames + - `add_width`: number of pixels to grow (or shrink) streak width + - `max_extrapolate`: farthest to start a new streak from endpoint of an existing one (degrees) + - `plotfile`: if given, a diagram of streaks is drawn into this file + """ + + logger.info('Reading {:d} streak files'.format(len(streak_list))) + + # Read in all the streak RA/Dec, into a dictionary keyed by CCDNUM, + # which should be in the primary header. Also save a dictionary of + # the file names for these + streak_corners = {} + streak_names = {} + for streakfile in streak_list: + try: + with fitsio.FITS(streakfile, 'r') as fits: + ccdnum = fits[0].read_header()['CCDNUM'] + streak_names[ccdnum] = streakfile + tab = fits[1].read() + if tab: + streak_corners[ccdnum] = fits[1].read()['CORNERS_WCS'] + except: + logger.error('Failure reading streak file <{:s}>'.format(streakfile)) + return 1 + + logger.info('Reading WCS from {:d} CCDs'.format(len(image_list))) + + # Read in the WCS for each CCD for which we have an image, + # also put into dicts keyed by CCDNUM + # Will get these directly from FITS instead of using DESImage in order + # to save reading all of the data. + wcs = {} + crval1 = [] + crval2 = [] + for imgfile in image_list: + try: + hdr = fitsio.read_header(imgfile, 0) + ccd = hdr['CCDNUM'] + crval1.append(hdr['CRVAL1']) + crval2.append(hdr['CRVAL2']) + # Due to a bug in fitsio 1.0.0rc1+0, we need to clean up the + # header before feeding it to wcsutil and remove the 'None' and other problematic items + for k in hdr: + # Try to access the item, if failed we have to remove it + if not k: + hdr.delete(k) + continue + try: + _ = hdr[k] + except: + logger.info("Removing keyword: {:s} from header".format(k)) + hdr.delete(k) + wcs[ccd] = wcsutil.WCS(hdr) + except Exception as e: + print(e) ### + logger.error('Failure reading WCS from {:s}'.format(imgfile)) + return 1 + + # Determine a center for local gnomonic projection + ra0 = np.median(crval1) + dec0 = np.median(crval2) + + # Calculate upper and lower bounds of each CCD in the local + # gnomonic system. + ccd_x1 = np.zeros(63, dtype=float) + ccd_x2 = np.zeros(63, dtype=float) + ccd_y1 = np.zeros(63, dtype=float) + ccd_y2 = np.zeros(63, dtype=float) + + ccd_xmin = 1. + ccd_xmax = 2048. + ccd_ymin = 1. + ccd_ymax = 4096. + ccd_corners_xpix = np.array([ccd_xmin, ccd_xmin, ccd_xmax, ccd_xmax]) + ccd_corners_ypix = np.array([ccd_ymin, ccd_ymax, ccd_ymax, ccd_ymin]) + for ccd, w in wcs.items(): + ra, dec = w.image2sky(ccd_corners_xpix, ccd_corners_ypix) + x_corners, y_corners = gnomonic(ra, dec, ra0, dec0) + ccd_x1[ccd] = np.min(x_corners) + ccd_y1[ccd] = np.min(y_corners) + ccd_x2[ccd] = np.max(x_corners) + ccd_y2[ccd] = np.max(y_corners) + + # Now collect information on all of the streak segments that we have + ccdnum = [] + ra_corner = [] + dec_corner = [] + + for ccd, streaks in streak_corners.items(): + if ccd not in wcs: + # Skip segments on CCDs that have no WCS + logger.warning('No WCS found for streaks on CCD {:d}'.format(ccd)) + continue + n1, _, _ = streaks.shape + for i in range(n1): + ccdnum.append(ccd) + ra_corner.append(streaks[i, :, 0]) + dec_corner.append(streaks[i, :, 1]) + # Put streak corners into gnomonic system for this exposure + x1, y1 = gnomonic(np.array([r[0] for r in ra_corner], dtype=float), + np.array([d[0] for d in dec_corner], dtype=float), + ra0, dec0) + x2, y2 = gnomonic(np.array([r[1] for r in ra_corner], dtype=float), + np.array([d[1] for d in dec_corner], dtype=float), + ra0, dec0) + x3, y3 = gnomonic(np.array([r[2] for r in ra_corner], dtype=float), + np.array([d[2] for d in dec_corner], dtype=float), + ra0, dec0) + x4, y4 = gnomonic(np.array([r[3] for r in ra_corner], dtype=float), + np.array([d[3] for d in dec_corner], dtype=float), + ra0, dec0) + ccdnum = np.array(ccdnum, dtype=int) + + # Describe each segmet by two endpoints at the midpoints of short sides + # Will need to decide which is the short side + d12 = np.hypot(x2 - x1, y2 - y1) + d23 = np.hypot(x3 - x2, y3 - y2) + xleft = np.where(d12 < d23, 0.5 * (x1 + x2), 0.5 * (x2 + x3)) + yleft = np.where(d12 < d23, 0.5 * (y1 + y2), 0.5 * (y2 + y3)) + xright = np.where(d12 < d23, 0.5 * (x3 + x4), 0.5 * (x4 + x1)) + yright = np.where(d12 < d23, 0.5 * (y3 + y4), 0.5 * (y4 + y1)) + dx = xright - xleft + dy = yright - yleft + # Calculate a width as 2x the + # largest perp distance from a vertex to this line + w1 = np.abs(dx * (y1 - yleft) - dy * (x1 - xleft)) / np.hypot(dx, dy) + w2 = np.abs(dx * (y2 - yleft) - dy * (x2 - xleft)) / np.hypot(dx, dy) + w3 = np.abs(dx * (y3 - yleft) - dy * (x3 - xleft)) / np.hypot(dx, dy) + w4 = np.abs(dx * (y4 - yleft) - dy * (x4 - xleft)) / np.hypot(dx, dy) + wmax = np.maximum(w1, w2) + wmax = np.maximum(wmax, w3) + wmax = np.maximum(wmax, w4) + wmax = 2 * wmax + + # Rearrange so that xleft <= xright + swapit = xright < xleft + tmp = np.where(swapit, xleft, xright) + xleft = np.where(swapit, xright, xleft) + xright = np.array(tmp) + tmp = np.where(swapit, yleft, yright) + yleft = np.where(swapit, yright, yleft) + yright = np.array(tmp) + + # Get the crossing points of the lines into CCDs + xc1, xc2, yc1, yc2 = boxCross(xleft, yleft, dx, dy, + ccd_x1[ccdnum], ccd_x2[ccdnum], ccd_y1[ccdnum], ccd_y2[ccdnum]) + + # Get rid of segments that appear to miss their host CCDs + miss = xc2 < xc1 + + # Take 1st crossing point instead of left point if it has higher x, or vertical + # with higher y, i.e. truncate the track segment at the edge of the CCD. + replace = np.where(dx == 0, yc1 > yleft, xc1 > xleft) + xc1 = np.where(replace, xc1, xleft) + yc1 = np.where(replace, yc1, yleft) + # Likewise truncate segment at right-hand crossing + replace = np.where(dx == 0, yc2 < yright, xc2 < xright) + xc2 = np.where(replace, xc2, xright) + yc2 = np.where(replace, yc2, yright) + + # Backfill the non-intersections again - note that above + # maneuvers will leave xc2 np.median(np.abs(my[ids])) + if not xOrder: + xx, yy = yy, xx + + # Record limits of detected tracks' independent variable + xxmin = np.min(xx) + xxmax = np.max(xx) + + # Fit a quadratic to the points, or + # linear if only one streak + # Allow up to nclip points to clip + RESID_TOLERANCE = 6. / 3600. # Clip >6" deviants + nclip = 2 + for i in range(nclip+1): + if len(xx) > 2: + A = np.vstack((np.ones_like(xx), xx, xx * xx)) + else: + A = np.vstack((np.ones_like(xx), xx)) + coeffs = np.linalg.lstsq(A.T, yy)[0] + resid = yy - np.dot(A.T, coeffs) + j = np.argmax(np.abs(resid)) + if i == nclip or np.abs(resid[j]) < RESID_TOLERANCE: + break + xx = np.delete(xx, j) + yy = np.delete(yy, j) + + # Calculate the y(x1),y(x2) where tracks + # cross the left/right of every CCD, then + # find the ones that will cross CCD's y. + + # These are CCD bounds, with xx being the quadratic's argument + if xOrder: + xx1 = ccd_x1 + xx2 = ccd_x2 + yy1 = ccd_y1 + yy2 = ccd_y2 + else: + xx1 = ccd_y1 + xx2 = ccd_y2 + yy1 = ccd_x1 + yy2 = ccd_x2 + + if len(coeffs) == 2: + A2 = np.vstack((np.ones_like(xx2), xx2)).T + A1 = np.vstack((np.ones_like(xx1), xx1)).T + else: + A2 = np.vstack((np.ones_like(xx2), xx2, xx2 * xx2)).T + A1 = np.vstack((np.ones_like(xx1), xx1, xx1 * xx1)).T + + # yyc[12] are the dependent coordinate at crossings of xx[12] bounds + yyc1 = np.dot(A1, coeffs) + yyc2 = np.dot(A2, coeffs) + # Now we ask whether the y value of streak at either edge crossing + # is in the y range of a CCD + missed = np.logical_or(np.maximum(yyc1, yyc2) < yy1, np.minimum(yyc1, yyc2) > yy2) + # Also skip any CCD where we already have a streak + for iccd in ccdnum[ids]: + missed[iccd] = True + missed[0] = True # There is no CCD0 + missed[61] = True # Never use this one either, it's always dead + + # Now find intersection of new streaks with edges of their CCDs + # Define a function for the streak path that we'll use for solving + def poly(x, coeffs, ysolve): + y = coeffs[0] + x * coeffs[1] + if len(coeffs) > 2: + y += coeffs[2] * x * x + return y - ysolve + + EDGE_TOLERANCE = 0.2 / 3600. # Find x/y of edge to this accuracy (0.2 arcsec) + for iccd in np.where(~missed)[0]: + # This is a loop over every CCD that the track crosses but has no detected segment + # Determine an (xx,yy) pair for its entry and exit from the CCD + new_yy1 = yyc1[iccd] + new_yy2 = yyc2[iccd] + new_xx1 = xx1[iccd] + new_xx2 = xx2[iccd] + # left side: + if new_yy1 < yy1[iccd]: + new_xx1 = newton(poly, new_xx1, args=(coeffs, yy1[iccd]), tol=EDGE_TOLERANCE) + elif new_yy1 > yy2[iccd]: + new_xx1 = newton(poly, new_xx1, args=(coeffs, yy2[iccd]), tol=EDGE_TOLERANCE) + new_yy1 = poly(new_xx1, coeffs, 0.) + # right side + if new_yy2 < yy1[iccd]: + new_xx2 = newton(poly, new_xx2, args=(coeffs, yy1[iccd]), tol=EDGE_TOLERANCE) + elif new_yy2 > yy2[iccd]: + new_xx2 = newton(poly, new_xx2, args=(coeffs, yy2[iccd]), tol=EDGE_TOLERANCE) + new_yy2 = poly(new_xx2, coeffs, 0.) + # Does the solution lie outside the input streaks? + extrapolated = new_xx1 < xxmin or new_xx2 > xxmax + width = np.median(wmax[ids]) + + # Calculate distance to nearest unclipped streak member + nearest = min(np.min(np.hypot(xx - new_xx1, yy - new_yy1)), + np.min(np.hypot(xx - new_xx2, yy - new_yy2))) + + if not xOrder: + # swap xx,yy back if we had y as the independent variable + new_xx1, new_yy1 = new_yy1, new_xx1 + new_xx2, new_yy2 = new_yy2, new_xx2 + + # Project the coordinates back to RA, Dec + ra1, dec1 = gnomonicInverse(new_xx1, new_yy1, ra0, dec0) + ra2, dec2 = gnomonicInverse(new_xx2, new_yy2, ra0, dec0) + + # Append this streak to list of new ones + new_ccdnum.append(iccd) + new_xc1.append(new_xx1) + new_xc2.append(new_xx2) + new_yc1.append(new_yy1) + new_yc2.append(new_yy2) + new_ra1.append(ra1) + new_ra2.append(ra2) + new_dec1.append(dec1) + new_dec2.append(dec2) + new_width.append(width) + new_extrapolated.append(extrapolated) + new_nearest.append(nearest) + + # Make all lists into arrays + new_ccdnum = np.array(new_ccdnum, dtype=int) + new_xc1 = np.array(new_xc1, dtype=float) + new_xc2 = np.array(new_xc2, dtype=float) + new_yc1 = np.array(new_yc1, dtype=float) + new_yc2 = np.array(new_yc2, dtype=float) + new_ra1 = np.array(new_ra1, dtype=float) + new_ra2 = np.array(new_ra2, dtype=float) + new_dec1 = np.array(new_dec1, dtype=float) + new_dec2 = np.array(new_dec2, dtype=float) + new_width = np.array(new_width, dtype=float) + new_extrapolated = np.array(new_extrapolated, dtype=bool) + new_nearest = np.array(new_nearest, dtype=float) + + # Decide which new segments will be masked + maskit = np.logical_or(~new_extrapolated, new_nearest <= max_extrapolate) + + logger.info('Identified {:d} missing streak segments for masking'.format(\ + np.count_nonzero(maskit))) + + # Make the diagnostic plot if desired + if plotfile is not None: + pl.figure(figsize=(6, 6)) + pl.xlim(-1.1, 1.1) + pl.ylim(-1.1, 1.1) + pl.gca().set_aspect('equal') + + # Draw CCD outlines and numbers + for ccd, w in wcs.items(): + ra, dec = w.image2sky(ccd_corners_xpix, ccd_corners_ypix) + x_corners, y_corners = gnomonic(ra, dec, ra0, dec0) + x = x_corners.tolist() + y = y_corners.tolist() + x.append(x[0]) + y.append(y[0]) + pl.plot(x, y, 'k-', label=None) + x = np.mean(x_corners) + y = np.mean(y_corners) + pl.text(x, y, str(ccd), horizontalalignment='center', + verticalalignment='center', fontsize=14) + + + + # Draw input streaks marked as edge + labelled = False + for i in np.where(nearedge)[0]: + x = (xc1[i], xc2[i]) + y = (yc1[i], yc2[i]) + if not labelled: + pl.plot(x, y, 'm-', lw=2, label='edge') + labelled = True + else: + pl.plot(x, y, 'm-', lw=2, label=None) + + # Draw linked tracks + s = set() + for t in tracks: + if len(t) > 1: + s = s.union(set(t)) + labelled = False + for i in s: + x = (xc1[i], xc2[i]) + y = (yc1[i], yc2[i]) + if not labelled: + pl.plot(x, y, 'b-', lw=2, label='connected') + labelled = True + else: + pl.plot(x, y, 'b-', lw=2, label=None) + + # Draw singleton tracks as those that are neither edge nor connected + s = s.union(set(np.where(nearedge)[0])) + single = set(range(len(xc1))) + single = single.difference(s) + labelled = False + for i in single: + x = (xc1[i], xc2[i]) + y = (yc1[i], yc2[i]) + if not labelled: + pl.plot(x, y, 'c-', lw=2, label='unconnected') + labelled = True + else: + pl.plot(x, y, 'c-', lw=2, label=None) + + # Draw missed tracks that will be masked + labelled = False + for i in np.where(maskit)[0]: + x = (new_xc1[i], new_xc2[i]) + y = (new_yc1[i], new_yc2[i]) + if not labelled: + pl.plot(x, y, 'r-', lw=2, label='new masked') + labelled = True + else: + pl.plot(x, y, 'r-', lw=2, label=None) + + + # Draw missed tracks that will not be masked + labelled = False + for i in np.where(~maskit)[0]: + x = (new_xc1[i], new_xc2[i]) + y = (new_yc1[i], new_yc2[i]) + if not labelled: + pl.plot(x, y, 'r:', lw=2, label='new skipped') + labelled = True + else: + pl.plot(x, y, 'r:', lw=2, label=None) + + # legend + pl.legend(framealpha=0.3, fontsize='small') + pl.savefig(plotfile) + + # Now accumulate pixel coordinates of corners of all new streaks to mask + added_streak_ccds = [] + added_streak_corners = [] + + for id, ccd in enumerate(new_ccdnum): + ccd = new_ccdnum[id] + if not maskit[id]: + continue # Only proceed with the ones to be masked + # Get a pixel scale from the WCS, in arcsec/pix + xmid = np.mean(ccd_corners_xpix) + ymid = np.mean(ccd_corners_ypix) + ra, dec = wcs[ccd].image2sky(xmid, ymid) + ra2, dec2 = wcs[ccd].image2sky(xmid + 1, ymid) + pixscale = np.hypot(np.cos(dec * np.pi / 180.) * (ra - ra2), dec - dec2) + + # width of streak, in pixels + w = new_width[id] / pixscale + add_width + if w <= 0.: + continue # Don't mask streaks of zero width + # Make RA/Dec of track endpoints + x = np.array([new_xc1[id], new_xc2[id]]) + y = np.array([new_yc1[id], new_yc2[id]]) + ra, dec = gnomonicInverse(x, y, ra0, dec0) + # Convert to pixel coordinates + x, y = wcs[ccd].sky2image(ra, dec) + line = Line(x[0], y[0], x[1], y[1]) + # Create bounding rectangle of track + corners_pix = boxTrack(line, w, ccd_xmin, ccd_xmax, ccd_ymin, ccd_ymax) + added_streak_ccds.append(ccd) + added_streak_corners.append(np.array(corners_pix)) + + added_streak_ccds = np.array(added_streak_ccds) + + # Make new copies of streak files, adding new ones + logger.debug('Rewriting streak files') + + for ccd, streakfile_in in streak_names.items(): + nmatch = len(re.findall(streak_name_in, streakfile_in)) + if nmatch != 1: + logger.error('Could not update streak file named <' + streakfile_in + '>') + return 1 + streakfile_out = re.sub(streak_name_in, streak_name_out, streakfile_in) + # Use file system to make fresh copy of table's FITS file + shutil.copy2(streakfile_in, streakfile_out) + + # Find new streaks for this ccd + add_ids = np.where(added_streak_ccds == ccd)[0] + if add_ids: + # Open the table and add new streaks' info + try: + fits = fitsio.FITS(streakfile_out, 'rw') + addit = np.recarray(len(add_ids), + dtype=[('LABEL', '>i4'), + ('CORNERS', '>f8', (4, 2)), + ('CORNERS_WCS', '>f8', (4, 2))]) + if fits[1]['LABEL'][:]: + first_label = np.max(fits[1]['LABEL'][:]) + 1 + else: + first_label = 1 + addit.LABEL = np.arange(first_label, first_label + len(addit)) + + for i, id in enumerate(add_ids): + corners_pix = added_streak_corners[id] + addit.CORNERS[i] = corners_pix + ra, dec = wcs[ccd].image2sky(corners_pix[:, 0], corners_pix[:, 1]) + addit.CORNERS_WCS[i] = np.vstack((ra, dec)).T + + fits[1].append(addit) + fits.close() + except Exception as e: + print(e) + logger.error('Failure updating streak file <{:s}>'.format(streakfile_out)) + return 1 + + logger.debug('Remasking images') + + for imgfile_in in image_list: + # Make the name needed for output + nmatch = len(re.findall(image_name_in, imgfile_in)) + if nmatch != 1: + logger.error('Could not create output name for image file named <' + imgfile_in + '>') + return 1 + imgfile_out = re.sub(image_name_in, image_name_out, imgfile_in) + + sci = DESImage.load(imgfile_in) + ccd = sci.header['CCDNUM'] + + # Find added streaks for this ccd + add_ids = np.where(added_streak_ccds == ccd)[0] + if add_ids: + shape = sci.mask.shape + yy, xx = np.indices(shape) + points = np.vstack((xx.flatten(), yy.flatten())).T + inside = None + + for id in add_ids: + # From Alex's immask routine: mark interior pixels + # for each added streak + v = added_streak_corners[id] + vertices = [(v[0, 0], v[0, 1]), + (v[1, 0], v[1, 1]), + (v[2, 0], v[2, 1]), + (v[3, 0], v[3, 1]), + (v[0, 0], v[0, 1])] + path = matplotlib.path.Path(vertices) + + if inside is None: + inside = path.contains_points(points) + else: + inside = np.logical_or(inside, path.contains_points(points)) + + # Make the list of masked pixels + if inside is None: + ymask, xmask = np.array(0, dtype=int), np.array(0, dtype=int) + else: + ymask, xmask = np.nonzero(inside.reshape(shape)) + + sci.mask[ymask, xmask] |= parse_badpix_mask('STREAK') + + # Write something into the image header + + sci['DESCNCTS'] = time.asctime(time.localtime()) + \ + ' Mask {:d} new streaks'.format(len(add_ids)) +# sci['HISTORY'] = time.asctime(time.localtime()) + \ +# ' Mask {:d} new streaks'.format(len(add_ids)) + sci.save(imgfile_out) + + logger.info('Finished connecting streaks') + ret_code = 0 + return ret_code + + @classmethod + def add_step_args(cls, parser): + """Add arguments specific to streak connecting + """ + parser.add_argument('--streak_file', type=str, + help='File holding list of input streak file names') + parser.add_argument('--image_file', type=str, + help='File holding list of input image file names') + parser.add_argument('--streak_name_in', type=str, default=cls.DEFAULT_STREAK_NAME_IN, + help='String to replace in input streak filenames') + parser.add_argument('--streak_name_out', type=str, default=cls.DEFAULT_STREAK_NAME_OUT, + help='Replacement string for output streak filenames') + parser.add_argument('--image_name_in', type=str, default=cls.DEFAULT_IMAGE_NAME_IN, + help='String to replace in input image filenames') + parser.add_argument('--image_name_out', type=str, default=cls.DEFAULT_IMAGE_NAME_OUT, + help='Replacement string for output image filenames') + parser.add_argument('--add_width', type=float, default=cls.DEFAULT_ADD_WIDTH, + help='number of pixels to grow (or shrink) streak width') + parser.add_argument('--max_extrapolate', type=float, default=cls.DEFAULT_MAX_EXTRAPOLATE, + help='farthest to start a new streak from endpoint of an existing one (degrees)') + parser.add_argument('--plotfile', type=str, + help='filename for diagnostic plot, if desired') + + @classmethod + def run(cls, config): + """Customized execution for streak connection. No single input or output images. + + :Parameters: + - `config`: the configuration from which to get other parameters + + """ + + streak_name_in = config.get(cls.step_name, 'streak_name_in') + streak_name_out = config.get(cls.step_name, 'streak_name_out') + image_name_in = config.get(cls.step_name, 'image_name_in') + image_name_out = config.get(cls.step_name, 'image_name_out') + add_width = config.getfloat(cls.step_name, 'add_width') + max_extrapolate = config.getfloat(cls.step_name, 'max_extrapolate') + + if config.has_option(cls.step_name, 'plotfile'): + plotfile = config.get(cls.step_name, 'plotfile') + else: + plotfile = None + + try: + streak_list = filelist_to_list(config.get(cls.step_name, 'streak_file')) + except: + logger.error('Failure reading streak file names from {:s}'.format(streak_list)) + return 1 + + try: + image_list = filelist_to_list(config.get(cls.step_name, 'image_file')) + except: + logger.error('Failure reading image file names from {:s}'.format(image_list)) + return 1 + + ret_code = cls.__call__(streak_list=streak_list, + image_list=image_list, + streak_name_in=streak_name_in, + streak_name_out=streak_name_out, + image_name_in=image_name_in, + image_name_out=image_name_out, + add_width=add_width, + max_extrapolate=max_extrapolate, + plotfile=plotfile) + return ret_code + + +connect_streaks = ConnectStreaks() + +# internal functions & classes + +if __name__ == '__main__': + connect_streaks.main() diff --git a/python/pixcorrect/corr_util.py b/python/pixcorrect/corr_util.py index ad2500e..aefd628 100644 --- a/python/pixcorrect/corr_util.py +++ b/python/pixcorrect/corr_util.py @@ -2,21 +2,11 @@ # imports from functools import wraps -from ConfigParser import SafeConfigParser -from argparse import ArgumentParser import logging -from contextlib import closing -from os import path import time import platform import ctypes -import pyfits -import numpy as np - - -from pixcorrect import proddir - # constants global logger logger = logging.getLogger('pixcorrect') @@ -31,11 +21,13 @@ class ArrayShapeException(Exception): class LibraryException(Exception): def __init__(self, value): self.value = value + super().__init__() class MismatchError(Exception): # Exception class for mismatch between properties of images def __init__(self, value): self.value = value + super().__init__() def __str__(self): return repr(self.value) @@ -51,10 +43,10 @@ def load_shlib(shlib_name): try: shlib = ctypes.CDLL(fname) except KeyError: - raise RuntimeError, ("Unknown platform: " + platform.system()) - + raise RuntimeError("Unknown platform: " + platform.system()) + return shlib - + # A decorator that uses a FITS keyword to determine whether a step # has already been performed, and skips it if it has. @@ -69,7 +61,7 @@ def f_wrapper(*args, **kwargs): done = False if not done: result = f(*args, **kwargs) - hdu.header[fits_keyword]=time.asctime(time.localtime()) + hdu.header[fits_keyword] = time.asctime(time.localtime()) else: result = 0 logger.warning("Skipping " + f.__name__ + " (" + fits_keyword + " already set)") @@ -80,7 +72,7 @@ def f_wrapper(*args, **kwargs): # contract conditions def match_array_shapes(arglist, func_args=None, func_kwargs=None, func_result=None): - """A dbc contract condition that forces multiple arguments to be the same shape + """A dbc contract condition that forces multiple arguments to be the same shape A function suitable for passing as the first argument to the precondition, postcondition, or invariantcondition decorators in @@ -96,7 +88,7 @@ def match_array_shapes(arglist, func_args=None, func_kwargs=None, func_result=No ref_shape = func_args[arglist[0]].shape for i in arglist[1:]: if func_args[arglist[i]].shape != ref_shape: - raise ImageShapeException() + raise ArrayShapeException() def no_lib_error(func_args=None, func_kwargs=None, func_result=None): """A dbc contract condition that checks that the returned value is 0 @@ -105,18 +97,18 @@ def no_lib_error(func_args=None, func_kwargs=None, func_result=None): precondition, postcondition, or invariantcondition decorators in the dbc module. - Raises a LibraryException if the wrapped function returns a + Raises a LibraryException if the wrapped function returns a non-zero value. This is useful when wrapping C libraries that - follow the converntion that returning a non-zero value + follow the converntion that returning a non-zero value indicates an error. """ if func_result != 0: - raise LibraryException(func_result) + raise LibraryException(func_result) # classes # internal functions & classes -def items_must_match(d1,d2,*args): +def items_must_match(d1, d2, *args): """ Check that items in 2 headers (or any indexable objects) d1 and d2 are equal. Each of the args is used as an index. Logs an error and throws an exception @@ -128,19 +120,15 @@ def items_must_match(d1,d2,*args): v1 = d1[k] v2 = d2[k] except: - msg = 'Items missing that must match for key [{:s}]'.format(k) + msg = f"Items missing that must match for key [{k:s}]" logging.error(msg) raise MismatchError(msg) - if type(v1)==str: + if isinstance(v1, str): v1 = v1.strip() - if type(v2)==str: + if isinstance(v2, str): v2 = v2.strip() - if not v1==v2: - msg = 'Mismatch for key [{:s}]: '.format(k) + str(v1) + ' vs ' + str(v2) - print msg ### + if v1 != v2: + msg = f"Mismatch for key [{k:s}]: " + str(v1) + ' vs ' + str(v2) + print(msg) logging.error(msg) raise MismatchError(msg) - return - - - diff --git a/python/pixcorrect/cti.py b/python/pixcorrect/cti.py new file mode 100755 index 0000000..1985067 --- /dev/null +++ b/python/pixcorrect/cti.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 + +# $Id: cti.py 47952 2019-01-03 21:04:53Z rgruendl $ +# $Rev:: 47952 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2019-01-03 15:04:53 #$: # Date of last commit. + +"""Perform CTI Search and Mask on image +""" + +from pixcorrect import cti_utils as cti +from pixcorrect import lightbulb_utils as lb +#from pixcorrect.corr_util import logger, do_once +from pixcorrect.corr_util import logger +from pixcorrect.PixCorrectDriver import PixCorrectImStep +#from despyfits.DESImage import DESImage, DESImageCStruct, section2slice, data_dtype + +# Which section of the config file to read for this step +config_section = 'cticheck' + +class CTIcheck(PixCorrectImStep): + description = "Search for indication that a known CTI (Charge Transfer Inefficiency) is active and mask" + step_name = config_section + + @classmethod + def __call__(cls, image): + """ + This is currently written with instance of CTI (Charge Transfer Inefficiency) in mind (that occuring + for CCD=41 during DES Y6). It may be generalizable if further cases occur (but not all the parts have + yet been written with a generalized case in mind). When CTI is detected the entire amplifier in + question will be masked with BADPIX_BADAMP + """ +# A simple dictionary with parameters for the only known case of CTI +# Currently explist is set to encompass DES Y6 (20180815 and beyond (expnum>765533) +# This could be tightened to a range as no CTI has been detected after November 2018 but +# it has not yet been systematicall watched for. + CTI = {41: {'amp': 'B', 'explist': '765533-'}} + + if image['CCDNUM'] in CTI: + +# +# Currently can re-use the function developed for lightbulb checking +# + check_for_light = lb.check_lightbulb_explist(image['EXPNUM'], CTI[image['CCDNUM']]['explist']) + if check_for_light: + logger.info(' CTI: Expnum={:d}, CCDNUM={:d}, in proscribed range checking for CTI'.format( + image['EXPNUM'], image['CCDNUM'])) + ctiDict = cti.check_cti(image, CTI[image['CCDNUM']], verbose=1) +# +# Current criterion: +# Looks for horizontal striping in image (with large deficits in counts that are not +# associated with an edge-bleed. +# Examines auto-correlation for lags in the x-direction at 5, 7, and 15 pixel offsets +# and compares to lags obtained from measurments in the diaganol direction. +# Looks for evidence of excessive power in the ratio between x-direction and diagnol sets +# that indicative that charge is bleeding in the x-direction. +# + if ctiDict['isCTI']: + image = cti.mask_cti(image, CTI[image['CCDNUM']], ctiDict, verbose=1) + logger.info(' CTI: Detected CTI for Exp={:d}, CCD={:d}, Amp={:s}'.format(image['EXPNUM'], image['CCDNUM'], CTI[image['CCDNUM']]['amp'])) + image.write_key('DES_CTI', 'Masked DATASEC{:s}'.format(CTI[image['CCDNUM']]['amp'])) + + logger.debug('Finished checking and applying mask CTI') + ret_code = 0 + return ret_code + + + @classmethod + def step_run(cls, image, config): + """Customized execution for check and masking of CTI + + :Parameters: + - `image`: the DESImage on which to operate +# - `config`: the configuration from which to get other parameters + + """ + logger.info('CTI check %s' % image) + + ret_code = cls.__call__(image) + return ret_code + + @classmethod + def add_step_args(cls, parser): + """Add arguments specific application of the gain correction + """ + +cticheck = CTIcheck() + +# internal functions & classes + +if __name__ == '__main__': + cticheck.main() diff --git a/python/pixcorrect/cti_utils.py b/python/pixcorrect/cti_utils.py new file mode 100755 index 0000000..ada8599 --- /dev/null +++ b/python/pixcorrect/cti_utils.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 + +# $Id: cti_utils.py 47952 2019-01-03 21:04:53Z rgruendl $ +# $Rev:: 47952 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2019-01-03 15:04:53 #$: # Date of last commit. + +"""CTI masking functions +""" + +import numpy as np +from despyfits.DESImage import section2slice +from despyfits.maskbits import * +from pixcorrect import lightbulb_utils as lb +from pixcorrect.corr_util import logger +from pixcorrect import decaminfo + + +########################################### +def check_cti(image, CTI, verbose=0): + """Function to check for presence of CTI""" + +# +# Initialize ctiDict +# + ctiDict = {'isCTI': False} + ctiDict['expnum'] = image['EXPNUM'] + + # Also create the BAND and NITE keywords if they are not present + try: + image['BAND'] + except: + image['BAND'] = decaminfo.get_band(image['FILTER']) + try: + image['NITE'] + except: + image['NITE'] = decaminfo.get_nite(image['DATE-OBS']) + + band = image['BAND'].strip() + sec = section2slice(image['DATASEC' + CTI['amp']]) +# +# This could become useful if it is necessary to start examining the opposite amplifier in +# conjunction with the amplifier that is having a problem +# +# if (CTI['amp']=="A"): +# osec = section2slice(image['DATASEC'+'B']) +# else: +# osec = section2slice(image['DATASEC'+'A']) + + maxiter = 10 + converge_num = 0.0001 + clipsig = 3.0 + + clip_avg, clip_med, clip_std = lb.medclip(image.data[sec], clipsig, maxiter, converge_num, verbose=0) + logger.info(' CTI: Global(clipped): median = {:.3f}, stddev = {:.3f} '.format(clip_med, clip_std)) + ctiDict['cmed'] = float(clip_med) + ctiDict['cstd'] = float(clip_std) + clow = clip_med - (3.0 * clip_std) + ctiDict['clow'] = float(clow) + +# oclip_avg,oclip_med,oclip_std=medclip(image.data[osec],clipsig,maxiter,converge_num,verbose) +# print(" Global(oclipped): median = {:.3f}, stddev = {:.3f} ".format(oclip_med,oclip_std)) +# oclow=oclip_med-(3.0*oclip_std) + +# +# Obtain row-by-row median to look for horizontal striping (also needed to check/reject edgebleeds) +# + row_med = np.median(image.data[sec], axis=1) + wsm = np.where(row_med < clow) + nrow_low = row_med[wsm].size +# +# Hacky attempt to check for edge-bleed +# + iedge = [4, 4091] + while row_med[iedge[0]] < clow: + iedge[0] = iedge[0] + 1 + while row_med[iedge[1]] < clow: + iedge[1] = iedge[1] - 1 + if iedge[0] == 4: + iedge[0] = 0 + if iedge[1] == 4091: + iedge[1] = 4095 + nrow_edge = 4096 - (iedge[1] - iedge[0] + 1) + logger.info(' CTI: Number of low rows: {:d} (nrow_edge={:d}) '.format(nrow_low, nrow_edge)) + +# +# Blank out pixels that are below the 3-sigma level with respect to median +# This removes power from vertical stripes +# + wsm = np.where(image.data[sec] < clow) + npix_low = image.data[sec][wsm].size + logger.info(' CTI: Number of low pixels: {:d} '.format(npix_low)) + u = image.data[sec] - clip_med + u[wsm] = 0.0 +# +# Harder cut currently not needed. If used this would get rid of all pixels below the median level +# (effectively this reduces the amount that noise suppresses contrast of the auto-correlation signal from CTI) +# +# wsm=np.where(u<0.) +# npix_zero=u[wsm].size +# logger.info(' CTI: Number of sub-zero pixels: {:d} '.format(npix_zero)) +# u[wsm]=0.0 + +# +# Calculate a set of auto-correlations by sampling lags in the x-direction and +# then two diaganol sets at PA=+/-45 degrees +# Note: y-direction lags would be succeptible to both bad columns and bleeds. +# These are normalized by the auto-correlation with lag 0 (defined as 'a' below). +# Take a maximum lag that will be calculated and use that to trim the image. +# Note: This both gets rid of most edge-effects automatically but also removes the need to calculate an effective normalization for higher lags +# + maxlag = 100 + lagList = [0, 1, 3, 5, 7, 11, 15, 19, 23, 31, 37, 45] + + a = np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag:-maxlag]) +# b=np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag:-maxlag]) + x = [1.0] + d1 = [1.0] + d2 = [1.0] +# vx=[1.0] +# vd1=[1.0] +# vd2=[1.0] +# +# More lags than those sampled are needed because the diagonal (PA=+/-45) measures will need to be interpolated +# for comaparison to lags in the x-direction. +# + + for lag in lagList: + if lag != 0: + x.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag:-maxlag, maxlag - lag:-maxlag - lag]) / a) + d1.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag - lag:-maxlag - lag]) / a) + d2.append(np.sum(u[maxlag:-maxlag, maxlag:-maxlag] * u[maxlag-lag:-maxlag - lag, maxlag + lag:-maxlag + lag]) / a) +# vx.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag:-maxlag,maxlag-lag:-maxlag-lag])/b) +# vd1.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag-lag:-maxlag-lag])/b) +# vd2.append(np.sum(v[maxlag:-maxlag,maxlag:-maxlag]*v[maxlag-lag:-maxlag-lag,maxlag+lag:-maxlag+lag])/b) + + data = {'lag': np.array(lagList), + 'x': np.array(x), + 'd1': np.array(d1), + 'd2': np.array(d2) +# 'vx':np.array(vx), +# 'vd1':np.array(vd1), +# 'vd2':np.array(vd2) + } + + r2 = np.sqrt(2.0) + l1 = data['lag'] + l2 = data['lag'] * r2 + x1 = data['x'] + d1i = np.interp(data['lag'], l2, data['d1']) + d2i = np.interp(data['lag'], l2, data['d2']) + rd1 = data['x'] / d1i + rd2 = data['x'] / d2i + +# vx1=data['vx'] +# vd1i=np.interp(data['lag'],l2,data['vd1']) +# vd2i=np.interp(data['lag'],l2,data['vd2']) +# vrd1=data['vx']/vd1i +# vrd2=data['vx']/vd2i +## vdx=data['x']/data['vx'] +# vdx=(rd1+rd2)/(vrd1+vrd2) + + logger.info(' CTI: lags {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(l1[3], l1[4], l1[6], l1[8], l1[10])) + logger.info(' CTI: lx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(x1[3], x1[4], x1[6], x1[8], x1[10])) + logger.info(' CTI: d1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d1i[3], d1i[4], d1i[6], d1i[8], d1i[10])) + logger.info(' CTI: d2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(d2i[3], d2i[4], d2i[6], d2i[8], d2i[10])) + logger.info(' CTI: ld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd1[3], rd1[4], rd1[6], rd1[8], rd1[10])) + logger.info(' CTI: ld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(rd2[3], rd2[4], rd2[6], rd2[8], rd2[10])) +# logger.info(' CTI: lvx {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vx1[3],vx1[4],vx1[6],vx1[8],vx1[10])) +# logger.info(' CTI:vd1i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd1i[3],vd1i[4],vd1i[6],vd1i[8],vd1i[10])) +# logger.info(' CTI:vd2i {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vd2i[3],vd2i[4],vd2i[6],vd2i[8],vd2i[10])) +# logger.info(' CTI:vld1 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd1[3],vrd1[4],vrd1[6],vrd1[8],vrd1[10])) +# logger.info(' CTI:vld2 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vrd2[3],vrd2[4],vrd2[6],vrd2[8],vrd2[10])) +# logger.info(' CTI:vdx0 {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} '.format(vdx[3],vdx[4],vdx[6],vdx[8],vdx[10])) + +# +# Set band dependent thresholds... +# Note the criteria used are based on an empirical study of the one example we currently have (CCD=41, Y6) +# + nrow_lim = 5 + if band != "Y": + cclim = 0.9 + else: + cclim = 1.15 +# +# Now check and set flag based on empirical critera. +# First are the horizontal streaks that can appear... +# Second are the comparison of the auto-correlation in the x and average of the diaganol directrions +# + flag_cti = False + if nrow_low - nrow_edge >= nrow_lim: + flag_cti = True + + avg_rd = (rd1 + rd2) / 2.0 + if avg_rd[3] > cclim and avg_rd[4] > cclim and avg_rd[6] > cclim: + flag_cti = True + + if flag_cti: + ctiDict['isCTI'] = True + + return ctiDict + + +########################################### +def mask_cti(image, CTI, ctiDict, verbose=0): + """Function to mask the region affected by a lightbulb""" + + if ctiDict['isCTI']: + sec = section2slice(image['DATASEC' + CTI['amp']]) + image.mask[sec] |= BADPIX_BADAMP + + logger.info(' CTI: mask applied to image') + + return image diff --git a/python/pixcorrect/dbc.py b/python/pixcorrect/dbc.py index 97c178d..ffcf293 100644 --- a/python/pixcorrect/dbc.py +++ b/python/pixcorrect/dbc.py @@ -1,20 +1,20 @@ """Simple, limited Design by Contract (DbC) module for python Design by Contract (DbC) is a methodology for software developement in -which each component has well defined expectations and obligations, +which each component has well defined expectations and obligations, enforced through testing preconditions, postconditions, and invariants. To use the module, define python functions that enforce any assertions required. Each enforcement function must accept the following keyword arguments: -func_args +func_args A python list listing the arguments being supplied to the function whose contract is being enforced. func_kwargs A python dictionary with the keyword arguments being supplied to the - function whose contract is being enforced. Be sure to handle the case + function whose contract is being enforced. Be sure to handle the case when the keyword arguments are not supplied. func_result @@ -24,27 +24,27 @@ that specify that certain arguments, keyword values, and returns fall in a certain range of values: ->>> def arg_in_range(min_value, max_value, arg_num, +>>> def arg_in_range(min_value, max_value, arg_num, ... func_args=None, func_kwargs=None, func_result=None): -... msg_template = "Argument %d not in range (%f to %f)" +... msg_template = "Argument %d not in range (%f to %f)" ... msg = msg_template % (arg_num, min_value, max_value) ... assert min_value < func_args[arg_num] < max_value, msg -... +... >>> def kw_in_range(min_value, max_value, kw, ... func_args=None, func_kwargs=None, func_result=None): ... if kw in func_kwargs: -... msg_template = "Value of keyword %s not in range (%f to %f)" +... msg_template = "Value of keyword %s not in range (%f to %f)" ... msg = msg_template % (kw, min_value, max_value) ... assert min_value < func_kwargs[kw] < max_value, msg -... ->>> +... +>>> >>> def result_in_range(min_value, max_value, ... func_args=None, func_kwargs=None, func_result=None): ... msg_template = "Result not in range (%f to %f)" ... msg = msg_template % (min_value, max_value) ... assert min_value < func_result < max_value, msg -... ->>> +... +>>> I can then use the precondition and postcondition python decorators supplied by this module to apply them to my function, and providing @@ -56,8 +56,8 @@ ... @postcondition(result_in_range, 2, 9) ... def foo(x, dx=5): ... return x+dx -... ->>> +... +>>> If all contract conditions are met, the decorators allow the function to be called normally: @@ -76,41 +76,41 @@ check_f(*check_args, **check_kwargs) File "", line 6, in arg_in_range AssertionError: Argument 0 not in range (0.000000 to 10.000000) ->>> +>>> >>> print foo(8, dx=0) Traceback (most recent call last): File "", line 1, in File "dbc.py", line 11, in func_wrapper - result = f(*args, **kwargs) + result = f(*args, **kwargs) File "dbc.py", line 10, in func_wrapper check_f(*check_args, **check_kwargs) File "", line 7, in kw_in_range AssertionError: Value of keyword dx not in range (1.000000 to 12.000000) ->>> +>>> >>> print foo(8, dx=4) Traceback (most recent call last): File "", line 1, in File "dbc.py", line 11, in func_wrapper - result = f(*args, **kwargs) + result = f(*args, **kwargs) File "dbc.py", line 11, in func_wrapper - result = f(*args, **kwargs) + result = f(*args, **kwargs) File "dbc.py", line 14, in func_wrapper check_f(*check_args, **check_kwargs) File "", line 5, in result_in_range AssertionError: Result not in range (2.000000 to 9.000000) ->>> ->>> +>>> +>>> The processess for enforcing invariants is similar: >>> def keep_arg_len(argnum, func_args=None, func_kwargs=None): ... n = len( func_args[argnum] ) ... return n -... +... >>> def keep_arg_max(argnum, func_args=None, func_kwargs=None): ... m = max( func_args[argnum] ) ... return m -... +... >>> @invariant_condition(keep_arg_len, "arg length changed", 0) ... @invariant_condition(keep_arg_max, "max changed", 0) ... def foo(x, n, y): @@ -118,26 +118,26 @@ ... x.append(y) ... else: ... x[n]=7 -... +... >>> foo( [1, 9, 3, 2], 2, 5 ) ->>> +>>> >>> foo( [1, 9, 3, 2], 1, 5 ) Traceback (most recent call last): File "", line 1, in File "dbc.py", line 134, in func_wrapper - result = f(*args, **kwargs) + result = f(*args, **kwargs) File "dbc.py", line 136, in func_wrapper assert after==before, message AssertionError: max changed ->>> +>>> >>> foo( [1, 9, 3, 2], 6, 5 ) Traceback (most recent call last): File "", line 1, in File "dbc.py", line 136, in func_wrapper assert after==before, message AssertionError: arg length changed ->>> ->>> +>>> +>>> """ @@ -152,7 +152,7 @@ def func_wrapper(*args, **kwargs): check_kwargs['func_kwargs'] = kwargs if pre: check_f(*check_args, **check_kwargs) - result = f(*args, **kwargs) + result = f(*args, **kwargs) if post: check_kwargs['func_result'] = result check_f(*check_args, **check_kwargs) @@ -167,16 +167,16 @@ def invariant_condition(check_f, message, *check_args, **check_kwargs): # The message parameter is included here but not in pre or post condiditons, # because there is no good way to set the message from within check_f in the # case of invariants, but it is the most covenvient way for pre and post-conditions. - + def make_func_wrapper(f): @wraps(f) def func_wrapper(*args, **kwargs): check_kwargs['func_args'] = args check_kwargs['func_kwargs'] = kwargs before = check_f(*check_args, **check_kwargs) - result = f(*args, **kwargs) + result = f(*args, **kwargs) after = check_f(*check_args, **check_kwargs) - assert after==before, message + assert after == before, message return result return func_wrapper return make_func_wrapper diff --git a/python/pixcorrect/decaminfo.py b/python/pixcorrect/decaminfo.py index 6f76d98..21b4685 100644 --- a/python/pixcorrect/decaminfo.py +++ b/python/pixcorrect/decaminfo.py @@ -1,19 +1,32 @@ +# $Id: decaminfo.py 47951 2019-01-03 20:58:17Z rgruendl $ +# $Rev:: 47951 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2019-01-03 14:58:17 #$: # Date of last commit. + """ Compendium of information on the geometry of the DECam CCDs """ import calendar -amps = ('A','B') # Possible amplifier choices +amps = ('A', 'B') # Possible amplifier choices -shape = (4096,2048) # Shape of the science array, in numpy format +shape = (4096, 2048) # Shape of the science array, in numpy format def get_band(filter): # Function to return a short BAND string given the FILTER string band = filter.strip()[0] - if band=='V': + if band == 'V': band = 'VR' - elif band not in ('u','g','r','i','z','Y'): + elif band == 'N': +# Currently the only known narrowband filter is N964 + if filter.strip()[0:4] == "N964": + band = 'N964' + else: + band = 'X' + elif band not in ('u', 'g', 'r', 'i', 'z', 'Y'): band = 'X' + if band == "X": + print(f"Warning: get_band given an unrecognized filter pattern. FILTER keyword given as: {filter.strip():s}") return band def get_nite(date_obs): @@ -22,90 +35,90 @@ def get_nite(date_obs): v = date_obs.split(':') hh = int(v[0].split('-')[2][-2:]) if hh > 14: - nite = v[0][:-3].replace('-','') + nite = v[0][:-3].replace('-', '') else: y = int(v[0][0:4]) m = int(v[0][5:7]) d = int(v[0][8:10])-1 - if d==0: - m = m - 1 - if m==0: + if d == 0: + m -= 1 + if m == 0: m = 12 - y = y - 1 - d = calendar.monthrange(y,m)[1] - nite = str(y).zfill(4)+str(m).zfill(2)+str(d).zfill(2) - return nite + y -= 1 + d = calendar.monthrange(y, m)[1] + nite = str(y).zfill(4)+str(m).zfill(2) + str(d).zfill(2) + return nite # ccdnums is a dictionary mapping the DETPOS values into their CCDNUM values -ccdnums = {'S29': 1, - 'S30': 2, - 'S31': 3, - 'S25': 4, - 'S26': 5, - 'S27': 6, - 'S28': 7, - 'S20': 8, - 'S21': 9, - 'S22': 10, - 'S23': 11, - 'S24': 12, - 'S14': 13, - 'S15': 14, - 'S16': 15, - 'S17': 16, - 'S18': 17, - 'S19': 18, - 'S8': 19, - 'S9': 20, - 'S10': 21, - 'S11': 22, - 'S12': 23, - 'S13': 24, - 'S1': 25, - 'S2': 26, - 'S3': 27, - 'S4': 28, - 'S5': 29, - 'S6': 30, - 'S7': 31, - 'N1': 32, - 'N2': 33, - 'N3': 34, - 'N4': 35, - 'N5': 36, - 'N6': 37, - 'N7': 38, - 'N8': 39, - 'N9': 40, - 'N10': 41, - 'N11': 42, - 'N12': 43, - 'N13': 44, - 'N14': 45, - 'N15': 46, - 'N16': 47, - 'N17': 48, - 'N18': 49, - 'N19': 50, - 'N20': 51, - 'N21': 52, - 'N22': 53, - 'N23': 54, - 'N24': 55, - 'N25': 56, - 'N26': 57, - 'N27': 58, - 'N28': 59, - 'N29': 60, - 'N30': 61, - 'N31': 62} +ccdnums = {'S29': 1, + 'S30': 2, + 'S31': 3, + 'S25': 4, + 'S26': 5, + 'S27': 6, + 'S28': 7, + 'S20': 8, + 'S21': 9, + 'S22': 10, + 'S23': 11, + 'S24': 12, + 'S14': 13, + 'S15': 14, + 'S16': 15, + 'S17': 16, + 'S18': 17, + 'S19': 18, + 'S8': 19, + 'S9': 20, + 'S10': 21, + 'S11': 22, + 'S12': 23, + 'S13': 24, + 'S1': 25, + 'S2': 26, + 'S3': 27, + 'S4': 28, + 'S5': 29, + 'S6': 30, + 'S7': 31, + 'N1': 32, + 'N2': 33, + 'N3': 34, + 'N4': 35, + 'N5': 36, + 'N6': 37, + 'N7': 38, + 'N8': 39, + 'N9': 40, + 'N10': 41, + 'N11': 42, + 'N12': 43, + 'N13': 44, + 'N14': 45, + 'N15': 46, + 'N16': 47, + 'N17': 48, + 'N18': 49, + 'N19': 50, + 'N20': 51, + 'N21': 52, + 'N22': 53, + 'N23': 54, + 'N24': 55, + 'N25': 56, + 'N26': 57, + 'N27': 58, + 'N28': 59, + 'N29': 60, + 'N30': 61, + 'N31': 62} detpos_dict = {} """ A dictionary of detpos strings keyed on ccdnum values """ -for k,v in ccdnums.items(): +for k, v in ccdnums.items(): detpos_dict[v] = k """ @@ -113,141 +126,143 @@ def get_nite(date_obs): The format here is (x,y), with 1-indexed positions, a la FITS. Note that x coord is offset by 2048 because of guide CCDs. """ -ccdCorners = {'N1': ( 14337, 1), - 'N2': ( 14337, 4097), - 'N3': ( 14337, 8193), - 'N4': ( 14337, 12289), - 'N5': ( 14337, 16385), - 'N6': ( 14337, 20481), - 'N7': ( 14337, 24577), - 'N8': ( 16385, 2049), - 'N9': ( 16385, 6145), - 'N10': ( 16385, 10241), - 'N11': ( 16385, 14337), - 'N12': ( 16385, 18433), - 'N13': ( 16385, 22529), - 'N14': ( 18433, 2049), - 'N15': ( 18433, 6145), - 'N16': ( 18433, 10241), - 'N17': ( 18433, 14337), - 'N18': ( 18433, 18433), - 'N19': ( 18433, 22529), - 'N20': ( 20481, 4097), - 'N21': ( 20481, 8193), - 'N22': ( 20481, 12289), - 'N23': ( 20481, 16385), - 'N24': ( 20481, 20481), - 'N25': ( 22529, 6145), - 'N26': ( 22529, 10241), - 'N27': ( 22529, 14337), - 'N28': ( 22529, 18433), - 'N29': ( 24577, 8193), - 'N30': ( 24577, 12289), ### dead CCD!! - 'N31': ( 24577, 16385), - 'S1': ( 12289, 1), - 'S2': ( 12289, 4097), - 'S3': ( 12289, 8193), - 'S4': ( 12289, 12289), - 'S5': ( 12289, 16385), - 'S6': ( 12289, 20481), - 'S7': ( 12289, 24577), - 'S8': ( 10241, 2049), - 'S9': ( 10241, 6145), - 'S10': ( 10241, 10241), - 'S11': ( 10241, 14337), - 'S12': ( 10241, 18433), - 'S13': ( 10241, 22529), - 'S14': ( 8193, 2049), - 'S15': ( 8193, 6145), - 'S16': ( 8193, 10241), - 'S17': ( 8193, 14337), - 'S18': ( 8193, 18433), - 'S19': ( 8193, 22529), - 'S20': ( 6145, 4097), - 'S21': ( 6145, 8193), - 'S22': ( 6145, 12289), - 'S23': ( 6145, 16385), - 'S24': ( 6145, 20481), - 'S25': ( 4097, 6145), - 'S26': ( 4097, 10241), - 'S27': ( 4097, 14337), - 'S28': ( 4097, 18433), - 'S29': ( 2049, 8193), - 'S30': ( 2049, 12289), - 'S31': ( 2049, 16385) } +ccdCorners = {'N1': (14337, 1), + 'N2': (14337, 4097), + 'N3': (14337, 8193), + 'N4': (14337, 12289), + 'N5': (14337, 16385), + 'N6': (14337, 20481), + 'N7': (14337, 24577), + 'N8': (16385, 2049), + 'N9': (16385, 6145), + 'N10': (16385, 10241), + 'N11': (16385, 14337), + 'N12': (16385, 18433), + 'N13': (16385, 22529), + 'N14': (18433, 2049), + 'N15': (18433, 6145), + 'N16': (18433, 10241), + 'N17': (18433, 14337), + 'N18': (18433, 18433), + 'N19': (18433, 22529), + 'N20': (20481, 4097), + 'N21': (20481, 8193), + 'N22': (20481, 12289), + 'N23': (20481, 16385), + 'N24': (20481, 20481), + 'N25': (22529, 6145), + 'N26': (22529, 10241), + 'N27': (22529, 14337), + 'N28': (22529, 18433), + 'N29': (24577, 8193), + 'N30': (24577, 12289), ### dead CCD!! + 'N31': (24577, 16385), + 'S1': (12289, 1), + 'S2': (12289, 4097), + 'S3': (12289, 8193), + 'S4': (12289, 12289), + 'S5': (12289, 16385), + 'S6': (12289, 20481), + 'S7': (12289, 24577), + 'S8': (10241, 2049), + 'S9': (10241, 6145), + 'S10': (10241, 10241), + 'S11': (10241, 14337), + 'S12': (10241, 18433), + 'S13': (10241, 22529), + 'S14': (8193, 2049), + 'S15': (8193, 6145), + 'S16': (8193, 10241), + 'S17': (8193, 14337), + 'S18': (8193, 18433), + 'S19': (8193, 22529), + 'S20': (6145, 4097), + 'S21': (6145, 8193), + 'S22': (6145, 12289), + 'S23': (6145, 16385), + 'S24': (6145, 20481), + 'S25': (4097, 6145), + 'S26': (4097, 10241), + 'S27': (4097, 14337), + 'S28': (4097, 18433), + 'S29': (2049, 8193), + 'S30': (2049, 12289), + 'S31': (2049, 16385) + } """ A dictionary giving the approx corner positions of each CCD on the sky, in degrees from center of the focal plane. Tuple is (xmin, xmax, ymin, ymax) with x to E and y to N. """ ccdBounds = {'N1': (-1.0811, -0.782681, -0.157306, -0.00750506), - 'N2': (-0.771362, -0.472493, -0.157385, -0.00749848), - 'N3': (-0.461205, -0.161464, -0.157448, -0.00749265), - 'N4': (-0.150127, 0.149894, -0.15747, -0.00749085), - 'N5': (0.161033, 0.460796, -0.157638, -0.0074294), - 'N6': (0.472171, 0.771045, -0.157286, -0.00740563), - 'N7': (0.782398, 1.08083, -0.157141, -0.0074798), - 'N8': (-0.92615, -0.627492, -0.321782, -0.172004), - 'N9': (-0.616455, -0.317043, -0.322077, -0.172189), - 'N10': (-0.305679, -0.00571999, -0.322071, -0.17217), - 'N11': (0.00565427, 0.305554, -0.322243, -0.172254), - 'N12': (0.31684, 0.616183, -0.322099, -0.172063), - 'N13': (0.627264, 0.925858, -0.321792, -0.171887), - 'N14': (-0.926057, -0.62726, -0.485961, -0.336213), - 'N15': (-0.616498, -0.317089, -0.486444, -0.336606), - 'N16': (-0.30558, -0.00578257, -0.486753, -0.336864), - 'N17': (0.00532179, 0.305123, -0.486814, -0.33687), - 'N18': (0.316662, 0.616018, -0.486495, -0.336537), - 'N19': (0.62708, 0.92578, -0.485992, -0.336061), - 'N20': (-0.770814, -0.471826, -0.650617, -0.500679), - 'N21': (-0.460777, -0.161224, -0.650817, -0.501097), - 'N22': (-0.149847, 0.149886, -0.650816, -0.501308), - 'N23': (0.161001, 0.460566, -0.650946, -0.501263), - 'N24': (0.47163, 0.770632, -0.650495, -0.500592), - 'N25': (-0.615548, -0.316352, -0.814774, -0.665052), - 'N26': (-0.305399, -0.00591217, -0.814862, -0.665489), - 'N27': (0.00550714, 0.304979, -0.815022, -0.665418), - 'N28': (0.316126, 0.615276, -0.814707, -0.664908), - 'N29': (-0.46018, -0.16101, -0.97887, -0.829315), - 'N31': (0.160884, 0.460147, -0.978775, -0.829426), - 'S1': (-1.08096, -0.782554, 0.00715956, 0.15689), - 'S2': (-0.7713, -0.47242, 0.0074194, 0.157269), - 'S3': (-0.4611, -0.161377, 0.00723009, 0.157192), - 'S4': (-0.149836, 0.150222, 0.00737069, 0.157441), - 'S5': (0.161297, 0.461031, 0.0072399, 0.1572), - 'S6': (0.472537, 0.771441, 0.00728934, 0.157137), - 'S7': (0.782516, 1.08097, 0.00742809, 0.15709), - 'S8': (-0.92583, -0.627259, 0.171786, 0.32173), - 'S9': (-0.616329, -0.31694, 0.171889, 0.321823), - 'S10': (-0.305695, -0.00579187, 0.172216, 0.322179), - 'S11': (0.00556739, 0.305472, 0.172237, 0.322278), - 'S12': (0.316973, 0.61631, 0.172015, 0.322057), - 'S13': (0.627389, 0.925972, 0.171749, 0.321672), - 'S14': (-0.925847, -0.627123, 0.335898, 0.48578), - 'S15': (-0.616201, -0.316839, 0.336498, 0.486438), - 'S16': (-0.305558, -0.00574858, 0.336904, 0.486749), - 'S17': (0.00557115, 0.305423, 0.33675, 0.486491), - 'S18': (0.316635, 0.615931, 0.33649, 0.486573), - 'S19': (0.627207, 0.925969, 0.336118, 0.485923), - 'S20': (-0.770675, -0.471718, 0.500411, 0.65042), - 'S21': (-0.46072, -0.161101, 0.501198, 0.650786), - 'S22': (-0.149915, 0.14982, 0.501334, 0.650856), - 'S23': (0.160973, 0.460482, 0.501075, 0.650896), - 'S24': (0.47167, 0.770647, 0.50045, 0.650441), - 'S25': (-0.615564, -0.316325, 0.66501, 0.814674), - 'S26': (-0.30512, -0.0056517, 0.665531, 0.81505), - 'S27': (0.00560886, 0.305082, 0.665509, 0.815022), - 'S28': (0.316158, 0.615391, 0.665058, 0.814732), - 'S29': (-0.46021, -0.160988, 0.829248, 0.978699), - 'S30': (-0.150043, 0.149464, 0.829007, 0.978648), - 'S31': (0.160898, 0.460111, 0.82932, 0.978804) } + 'N2': (-0.771362, -0.472493, -0.157385, -0.00749848), + 'N3': (-0.461205, -0.161464, -0.157448, -0.00749265), + 'N4': (-0.150127, 0.149894, -0.15747, -0.00749085), + 'N5': (0.161033, 0.460796, -0.157638, -0.0074294), + 'N6': (0.472171, 0.771045, -0.157286, -0.00740563), + 'N7': (0.782398, 1.08083, -0.157141, -0.0074798), + 'N8': (-0.92615, -0.627492, -0.321782, -0.172004), + 'N9': (-0.616455, -0.317043, -0.322077, -0.172189), + 'N10': (-0.305679, -0.00571999, -0.322071, -0.17217), + 'N11': (0.00565427, 0.305554, -0.322243, -0.172254), + 'N12': (0.31684, 0.616183, -0.322099, -0.172063), + 'N13': (0.627264, 0.925858, -0.321792, -0.171887), + 'N14': (-0.926057, -0.62726, -0.485961, -0.336213), + 'N15': (-0.616498, -0.317089, -0.486444, -0.336606), + 'N16': (-0.30558, -0.00578257, -0.486753, -0.336864), + 'N17': (0.00532179, 0.305123, -0.486814, -0.33687), + 'N18': (0.316662, 0.616018, -0.486495, -0.336537), + 'N19': (0.62708, 0.92578, -0.485992, -0.336061), + 'N20': (-0.770814, -0.471826, -0.650617, -0.500679), + 'N21': (-0.460777, -0.161224, -0.650817, -0.501097), + 'N22': (-0.149847, 0.149886, -0.650816, -0.501308), + 'N23': (0.161001, 0.460566, -0.650946, -0.501263), + 'N24': (0.47163, 0.770632, -0.650495, -0.500592), + 'N25': (-0.615548, -0.316352, -0.814774, -0.665052), + 'N26': (-0.305399, -0.00591217, -0.814862, -0.665489), + 'N27': (0.00550714, 0.304979, -0.815022, -0.665418), + 'N28': (0.316126, 0.615276, -0.814707, -0.664908), + 'N29': (-0.46018, -0.16101, -0.97887, -0.829315), + 'N31': (0.160884, 0.460147, -0.978775, -0.829426), + 'S1': (-1.08096, -0.782554, 0.00715956, 0.15689), + 'S2': (-0.7713, -0.47242, 0.0074194, 0.157269), + 'S3': (-0.4611, -0.161377, 0.00723009, 0.157192), + 'S4': (-0.149836, 0.150222, 0.00737069, 0.157441), + 'S5': (0.161297, 0.461031, 0.0072399, 0.1572), + 'S6': (0.472537, 0.771441, 0.00728934, 0.157137), + 'S7': (0.782516, 1.08097, 0.00742809, 0.15709), + 'S8': (-0.92583, -0.627259, 0.171786, 0.32173), + 'S9': (-0.616329, -0.31694, 0.171889, 0.321823), + 'S10': (-0.305695, -0.00579187, 0.172216, 0.322179), + 'S11': (0.00556739, 0.305472, 0.172237, 0.322278), + 'S12': (0.316973, 0.61631, 0.172015, 0.322057), + 'S13': (0.627389, 0.925972, 0.171749, 0.321672), + 'S14': (-0.925847, -0.627123, 0.335898, 0.48578), + 'S15': (-0.616201, -0.316839, 0.336498, 0.486438), + 'S16': (-0.305558, -0.00574858, 0.336904, 0.486749), + 'S17': (0.00557115, 0.305423, 0.33675, 0.486491), + 'S18': (0.316635, 0.615931, 0.33649, 0.486573), + 'S19': (0.627207, 0.925969, 0.336118, 0.485923), + 'S20': (-0.770675, -0.471718, 0.500411, 0.65042), + 'S21': (-0.46072, -0.161101, 0.501198, 0.650786), + 'S22': (-0.149915, 0.14982, 0.501334, 0.650856), + 'S23': (0.160973, 0.460482, 0.501075, 0.650896), + 'S24': (0.47167, 0.770647, 0.50045, 0.650441), + 'S25': (-0.615564, -0.316325, 0.66501, 0.814674), + 'S26': (-0.30512, -0.0056517, 0.665531, 0.81505), + 'S27': (0.00560886, 0.305082, 0.665509, 0.815022), + 'S28': (0.316158, 0.615391, 0.665058, 0.814732), + 'S29': (-0.46021, -0.160988, 0.829248, 0.978699), + 'S30': (-0.150043, 0.149464, 0.829007, 0.978648), + 'S31': (0.160898, 0.460111, 0.82932, 0.978804) + } """ Now build another dictionary that gives the CCD centers on the sky in degree system again. These will be (x,y) tuples. """ ccdCenters = {} -for detpos,bounds in ccdBounds.items(): - ccdCenters[detpos] = ( 0.5*(bounds[0]+bounds[1]), 0.5*(bounds[2]+bounds[3])) +for detpos, bounds in ccdBounds.items(): + ccdCenters[detpos] = (0.5 * (bounds[0] + bounds[1]), 0.5 * (bounds[2] + bounds[3])) def minimalHeader(detpos, h=None): """ @@ -255,7 +270,7 @@ def minimalHeader(detpos, h=None): needed to show the mosaic on DS9, plus DETSEC info that give IRAF-style mosaic display. Plus the DETPOS and CCDNUM values, with the EXTNAME set to the detpos If h is None, a new dict is created. - Returns h + Returns h """ if h is None: h = {} @@ -271,9 +286,9 @@ def minimalHeader(detpos, h=None): h['CD2_1'] = -7.286e-5 h['CTYPE1'] = 'RA---TAN' h['CTYPE2'] = 'DEC--TAN' - x,y = ccdCorners[detpos] - h['DETSEC']='[{:d}:{:d},{:d}:{:d}]'.format(x, x+2047, y, y+4095) - h['CRPIX2']=14826.- ((y-1)/2048)*2129.6667 - h['CRPIX1']=13423.2- ((x-1)/2048)*2254.4 - + x, y = ccdCorners[detpos] + h['DETSEC'] = '[{:d}:{:d},{:d}:{:d}]'.format(x, x + 2047, y, y + 4095) + h['CRPIX2'] = 14826.- ((y - 1) / 2048) * 2129.6667 + h['CRPIX1'] = 13423.2- ((x - 1) / 2048) * 2254.4 + return h diff --git a/python/pixcorrect/find_flat_normalization.py b/python/pixcorrect/find_flat_normalization.py index fdb4546..fc6b435 100755 --- a/python/pixcorrect/find_flat_normalization.py +++ b/python/pixcorrect/find_flat_normalization.py @@ -1,18 +1,14 @@ -#!/usr/bin/env python +#!/usr/bin/env 3 """Find a normalization for a set of flat field images """ -import ctypes -import sys import os #from os import path import fitsio import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, load_shlib -from despyfits.DESImage import DESImage, DESImageCStruct, scan_fits_section, data_dtype + +from pixcorrect.corr_util import logger from pixcorrect.PixCorrectDriver import PixCorrectStep, filelist_to_list -from pixcorrect import decaminfo # Which section of the config file to read for this step config_section = 'findflatnorm' @@ -32,48 +28,46 @@ def __call__(cls, in_filenames, ccdnorm, outnorm): """Apply a flat field correction to an image :Parameters: - - `in_filenames`: list of input DESImage(s) to use to determine the normalization factor - - `ccdnorm`: -1-->normalize to median of all files, or to image with CCDNUM=ccdnorm + - `in_filenames`: list of input DESImage(s) to use to determine the normalization factor + - `ccdnorm`: -1-->normalize to median of all files, or to image with CCDNUM=ccdnorm - `outnorm`: output file name to write the normalization factor Applies the correction to each input and writes a separate output file. """ - + logger.info('Initial Read of Flat Field Headers') # - norm_list=[] - scalmean_list=[] - normval=None + norm_list = [] + scalmean_list = [] + normval = None # for filename in in_filenames: - if (os.path.isfile(filename)): - tmp_dict={} - tmp_dict['fname']=filename - if (tmp_dict['fname'][-2:] == "fz"): - sci_hdu=1 # for .fz + if os.path.isfile(filename): + tmp_dict = {} + tmp_dict['fname'] = filename + if tmp_dict['fname'].endswith("fz"): + sci_hdu = 1 # for .fz else: - sci_hdu=0 # for .fits (or .gz) - temp_fits=fitsio.FITS(tmp_dict['fname'],'r') - temp_head=temp_fits[sci_hdu].read_header() + sci_hdu = 0 # for .fits (or .gz) + temp_fits = fitsio.FITS(tmp_dict['fname'], 'r') + temp_head = temp_fits[sci_hdu].read_header() # # Get the CCD number # try: - tmp_dict['ccdnum']=int(temp_head['CCDNUM']) + tmp_dict['ccdnum'] = int(temp_head['CCDNUM']) except: - if (ccdnorm < 1): - tmp_dict['ccdnum']=-1 - pass + if ccdnorm < 1: + tmp_dict['ccdnum'] = -1 else: - print("Warning: image {:s} did not have a CCDNUM keyword!".format(tmp_dict['fname'])) - pass + print(f"Warning: image {tmp_dict['fname']:s} did not have a CCDNUM keyword!") # # Get the SCALMEAN value # try: - tmp_dict['scalmean']=float(temp_head['SCALMEAN']) + tmp_dict['scalmean'] = float(temp_head['SCALMEAN']) except: - raise ValueError("Image %s did not have a SCALMEAN keyword. Aborting!" % tmp_dict['fname']) + raise ValueError(f"Image {tmp_dict['fname']:s} did not have a SCALMEAN keyword. Aborting!") # # Finished first header census # Save file info and scalmean's to a list @@ -84,28 +78,28 @@ def __call__(cls, in_filenames, ccdnorm, outnorm): # # All information is now present. Determine the value that will be used in normalization. # - if (ccdnorm > 1): + if ccdnorm > 1: for tmp_rec in norm_list: - if (normval is None): - if (tmp_rec['ccdnum']==ccdnorm): - normval=tmp_rec['ccdnum'] + if normval is None: + if tmp_rec['ccdnum'] == ccdnorm: + normval = tmp_rec['ccdnum'] else: - if (tmp_rec['ccdnum']==ccdnorm): + if tmp_rec['ccdnum'] == ccdnorm: print("Warning: More than one image with CCDNUM={:d} identified") - if (normval is None): - raise ValueError("No image with CCDNUM=%d found among input list. Aborting!" % ccdnorm) - logger.info('Normaliztion: %.2f set based on value from CCD %d ' % (normval,ccdnorm)) + if normval is None: + raise ValueError(f"No image with CCDNUM={ccdnorm:d} found among input list. Aborting!") + logger.info('Normaliztion: %.2f set based on value from CCD %d ', normval, ccdnorm) else: - a_scalmean=np.array(scalmean_list) - normval=np.median(a_scalmean) - logger.info('Normaliztion: %.2f set based on median value of the ensemble ' % normval ) + a_scalmean = np.array(scalmean_list) + normval = np.median(a_scalmean) + logger.info('Normaliztion: %.2f set based on median value of the ensemble ', normval) # # Write out the normalization factor # - fout=open(outnorm,'w') + fout = open(outnorm, 'w') fout.write("{:.2f}\n".format(normval)) fout.close() - ret_code=0 + ret_code = 0 return ret_code @@ -120,7 +114,7 @@ def step_run(cls, config): """ flat_inlist = config.get(cls.step_name, 'inlist') - in_filenames=filelist_to_list(flat_inlist) + in_filenames = filelist_to_list(flat_inlist) ccdnorm = config.getint(cls.step_name, 'ccdnorm') outnorm = config.get(cls.step_name, 'outnorm') diff --git a/python/pixcorrect/fix_columns.py b/python/pixcorrect/fix_columns.py index f93fbdb..9943ce7 100644 --- a/python/pixcorrect/fix_columns.py +++ b/python/pixcorrect/fix_columns.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Implement John Marriner's correctable-column fixer. I have made a few changes to the C fixCols() function: @@ -12,12 +12,10 @@ from os import path import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError -from pixcorrect import proddir -from pixcorrect.corr_util import logger, do_once, items_must_match -from despyfits.DESImage import DESDataImage, DESImage, DESBPMImage from pixcorrect.PixCorrectDriver import PixCorrectImStep +from pixcorrect.corr_util import logger, do_once, items_must_match +from despyfits.DESImage import DESBPMImage from despyfits import maskbits # Which section of the config file to read for this step @@ -29,6 +27,7 @@ class FixColumnsError(Exception): """ def __init__(self, value): self.value = value + super().__init__() def __str__(self): return repr(self.value) @@ -39,18 +38,18 @@ class FixColumns(PixCorrectImStep): CORR = maskbits.BPMDEF_CORR # These BPM flags define pixels that are not correctable BPMBAD = maskbits.BPMDEF_FLAT_MIN | \ - maskbits.BPMDEF_FLAT_MAX | \ - maskbits.BPMDEF_FLAT_MASK | \ - maskbits.BPMDEF_BIAS_HOT | \ - maskbits.BPMDEF_BIAS_WARM | \ - maskbits.BPMDEF_BIAS_MASK | \ - maskbits.BPMDEF_WACKY_PIX - + maskbits.BPMDEF_FLAT_MAX | \ + maskbits.BPMDEF_FLAT_MASK | \ + maskbits.BPMDEF_BIAS_HOT | \ + maskbits.BPMDEF_BIAS_WARM | \ + maskbits.BPMDEF_BIAS_MASK | \ + maskbits.BPMDEF_WACKY_PIX + MINIMUM_PIXELS = 100 # Smallest number of pixels in column to correct CLIP_SIGMA = 4 # Rejection threshold for mean statistics @classmethod - def _clippedLine(cls,y,z,doslope,nSigma): + def _clippedLine(cls, y, z, doslope, nSigma): """ Perform a straight line fit to data, iteratively clipping outliers > nSigma It is implicitly assumed that the slope is small. The variance is computed @@ -65,11 +64,11 @@ def _clippedLine(cls,y,z,doslope,nSigma): iqdSigma = 1.349 p25 = np.percentile(z, 25.) p50 = np.percentile(z, 50.) - sigma = (p50-p25)/iqdSigma - err = nSigma*sigma + sigma = (p50 - p25) / iqdSigma + err = nSigma * sigma lower = p50 - err upper = p50 + err - mask = np.bitwise_or(zupper) + mask = np.bitwise_or(z < lower, z > upper) #nrej is number of points rejected in the current pass. #Set to arbitrary number>0 for first pass nrej = 100 @@ -77,22 +76,22 @@ def _clippedLine(cls,y,z,doslope,nSigma): yp = y[:][~mask] zp = z[:][~mask] n = np.size(yp) - if n err nrej = np.sum(rej & ~mask) mask |= rej - + gres = res[:][~mask] - var = np.sqrt(np.sum(gres*gres)/n) + var = np.sqrt(np.sum(gres * gres) / n) return mean, slope, var, n @classmethod @@ -101,13 +100,13 @@ def _valid_pix(cls, image, bpm, icol): Return boolean array saying which pixels in column icol are useful for sky stats """ #Allow NEAREDGE columns for reference - use = (image.mask[:,icol] & ~maskbits.BADPIX_NEAREDGE)==0 - use &= ~np.isinf(image.data[:,icol]) - use &= ~np.isnan(image.data[:,icol]) + use = (image.mask[:, icol] & ~maskbits.BADPIX_NEAREDGE) == 0 + use &= ~np.isinf(image.data[:, icol]) + use &= ~np.isnan(image.data[:, icol]) return use - + @classmethod - @do_once(1,'DESFIXC') + @do_once(1, 'DESFIXC') def __call__(cls, image, bpm): """ Find and fix correctable columns in the image as indicated by the BPMDEF_CORR @@ -128,7 +127,7 @@ def __call__(cls, image, bpm): #Correct all correctable pixels, but use only "good" pixels to compute correction logger.info('Fixing columns') - + NEIGHBORS = 6 # Number of comparison columns to seek RANGE = 12 # Farthest away to look for comparison columns # Largest allowable fractional difference in variance between the fixable column @@ -139,7 +138,7 @@ def __call__(cls, image, bpm): #If the pixel values of a column vary in a bi-stable way, the high pixels may be #interpreted as "objects" and the high variance may not be noticed. COUNT_TOL = 0.85 - + if image.mask is None: raise FixColumnsError('Input image does not have mask') # Check that dome and data are from same CCD @@ -149,8 +148,8 @@ def __call__(cls, image, bpm): return 1 # A "fixable" column will have CORR flag set at either start or end of column - fixable = np.where(np.logical_or(bpm.mask[0,:] & cls.CORR, - bpm.mask[-1,:] & cls.CORR))[0] + fixable = np.where(np.logical_or(bpm.mask[0, :] & cls.CORR, + bpm.mask[-1, :] & cls.CORR))[0] #Just an array that gives the ordinal number of each row in the column (for fitting the slope) colord = np.arange(4096) #Don't use slope in clipLine @@ -163,13 +162,13 @@ def __call__(cls, image, bpm): # Which pixels in the column are fixable? # They need to have the CORR flag set (other BPM bits specified by BPMOK are allowed) # Checking for valid NAN's or INF's should no longer be necessary, but is harmless - # Also, we do not use any bad pixels. - coldata = image.data[:,icol] - colbpm = bpm.mask[:,icol] + # Also, we do not use any bad pixels. + coldata = image.data[:, icol] + colbpm = bpm.mask[:, icol] ignore = np.logical_or(colbpm & cls.BPMBAD, np.isinf(coldata)) ignore |= np.isnan(coldata) corr_rows = np.logical_and(colbpm & cls.CORR, ~ignore) - ignore |= image.mask[:,icol] & ~maskbits.BADPIX_BPM + ignore |= image.mask[:, icol] & ~maskbits.BADPIX_BPM use_rows = np.logical_and(colbpm & cls.CORR, ~ignore) if np.count_nonzero(use_rows) < cls.MINIMUM_PIXELS: @@ -179,21 +178,21 @@ def __call__(cls, image, bpm): # Get a robust estimate of mean level and slope in target column y = colord[use_rows] z = coldata[use_rows] - col_mean, col_slope, col_var, col_n = cls._clippedLine(y,z,doslope,cls.CLIP_SIGMA) + col_mean, _, col_var, col_n = cls._clippedLine(y, z, doslope, cls.CLIP_SIGMA) if col_var <= 0.0: logger.info("Error in clipped line fit for column {:d}".format(icol)) continue - + # Now want to collect stats on up to NEIGHBORS nearby columns norm_stats = [] ilow = icol ihigh = icol - low_limit = max(icol - RANGE,0) - high_limit = min(icol + RANGE, image.data.shape[1]-1) - while len(norm_stats) < NEIGHBORS and (ilow>low_limit or ihighlow_limit: + low_limit = max(icol - RANGE, 0) + high_limit = min(icol + RANGE, image.data.shape[1] - 1) + while len(norm_stats) < NEIGHBORS and (ilow > low_limit or ihigh < high_limit): + while ilow > low_limit: # get stats from next useful column to left: - ilow-=1 + ilow -= 1 if ilow in fixable: continue use = cls._valid_pix(image, bpm, ilow) @@ -201,15 +200,16 @@ def __call__(cls, image, bpm): if np.count_nonzero(use) < cls.MINIMUM_PIXELS: continue y = colord[use] - z = image.data[:,ilow][use] - ref_col,ref_slope,ref_var, ref_n = cls._clippedLine(y,z,doslope,cls.CLIP_SIGMA) - if ref_var<=0.0: continue - norm_stats.append([ref_col,ref_slope,ref_var,ref_n]) + z = image.data[:, ilow][use] + ref_col, ref_slope, ref_var, ref_n = cls._clippedLine(y, z, doslope, cls.CLIP_SIGMA) + if ref_var <= 0.0: + continue + norm_stats.append([ref_col, ref_slope, ref_var, ref_n]) break - - while ihigh VAR_TOLERANCE * norm_var: logger.info('Too much variance to fix column {:d}'.format(icol)) continue #Check that number of target column sky pixels is not much less than #the average of the reference columns - norm_n = np.sum(nc)/np.size(nc) - if col_n < COUNT_TOL*norm_n: + norm_n = np.sum(nc) / np.size(nc) + if col_n < COUNT_TOL * norm_n: logger.info('Too few sky pixels to fix column {:d}'.format(icol)) continue - + #Valid correction. Calculate correction & error estimate - norm_mean = np.sum(mean*wt)/np.sum(wt) + norm_mean = np.sum(mean * wt) / np.sum(wt) correction = norm_mean - col_mean - correction_var = 1./np.sum(wt) + col_var/col_n + #correction_var = 1. / np.sum(wt) + col_var / col_n # Apply correction: - image.data[:,icol][corr_rows] += correction + image.data[:, icol][corr_rows] += correction # Promote the corrected pixels from useless to just imperfect: - image.mask[:,icol][corr_rows] &= ~maskbits.BADPIX_BPM - image.mask[:,icol][corr_rows] |= maskbits.BADPIX_FIXED - logger.info('Corrected column {:d} by {:f}'.format(icol,float(correction))) + image.mask[:, icol][corr_rows] &= ~maskbits.BADPIX_BPM + image.mask[:, icol][corr_rows] |= maskbits.BADPIX_FIXED + logger.info('Corrected column {:d} by {:f}'.format(icol, float(correction))) if bpm.sourcefile is None: image.write_key('FIXCFIL', 'UNKNOWN', comment='BPM file for fixing columns') @@ -267,9 +268,7 @@ def __call__(cls, image, bpm): logger.debug('Finished fixing columns') - - - ret_code=0 + ret_code = 0 return ret_code @classmethod @@ -284,7 +283,7 @@ def step_run(cls, image, config): bpm_fname = config.get(cls.step_name, 'bpm') logger.info('reading BPM from %s' % bpm_fname) bpm_im = DESBPMImage.load(bpm_fname) - + ret_code = cls.__call__(image, bpm_im) return ret_code @@ -292,10 +291,9 @@ def step_run(cls, image, config): def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('-b', '--bpm', nargs=1, - default=None, + parser.add_argument('-b', '--bpm', nargs=1, + default=None, help='bad pixel mask filename') - return fix_columns = FixColumns() diff --git a/python/pixcorrect/flat_correct.py b/python/pixcorrect/flat_correct.py index 10392a9..6572f85 100755 --- a/python/pixcorrect/flat_correct.py +++ b/python/pixcorrect/flat_correct.py @@ -1,16 +1,16 @@ -#!/usr/bin/env python -"""Apply a flat correction to a raw DES image +#!/usr/bin/env python3 +"""Apply a flat correction to a raw DES image """ from os import path import numpy as np -from pixcorrect import proddir + +from pixcorrect.PixCorrectDriver import PixCorrectImStep +from pixcorrect import decaminfo from pixcorrect.corr_util import logger, do_once, items_must_match #from despyfits.DESImage import DESImage -from despyfits.DESImage import DESImage, section2slice +from despyfits.DESImage import DESImage, section2slice from despyfits import maskbits -from pixcorrect.PixCorrectDriver import PixCorrectImStep -from pixcorrect import decaminfo # Which section of the config file to read for this step config_section = 'flat' @@ -31,7 +31,7 @@ def _doit(cls, image, flat_im): Applies the correction "in place" """ logger.info('Applying Flat') - + # Check that flat and data are from same CCD and filter try: image['BAND'] @@ -39,18 +39,18 @@ def _doit(cls, image, flat_im): # Give image a BAND from its FILTER if it's not there image['BAND'] = decaminfo.get_band(image['FILTER']) try: - items_must_match(image, flat_im, 'CCDNUM','BAND') + items_must_match(image, flat_im, 'CCDNUM', 'BAND') except: return 1 - + # Apply flat to the data image.data /= flat_im.data # Update variance or weight image if it exists if image.weight is not None: - image.weight *= flat_im.data*flat_im.data + image.weight *= flat_im.data * flat_im.data if image.variance is not None: - image.variance /= flat_im.data*flat_im.data + image.variance /= flat_im.data * flat_im.data # If mask image exists, mark as BADPIX_BPM any pixels that have # non-positive flat and are not already flagged. @@ -58,13 +58,13 @@ def _doit(cls, image, flat_im): # Find flat-field pixels that are invalid but not already bad for # one of these reasons: badmask = maskbits.BADPIX_BPM +\ - maskbits.BADPIX_BADAMP +\ - maskbits.BADPIX_EDGE - badflat = np.logical_and( flat_im.data <= 0., - image.mask & badmask) + maskbits.BADPIX_BADAMP +\ + maskbits.BADPIX_EDGE + badflat = np.logical_and(flat_im.data <= 0., + image.mask & badmask) mark_these = np.where(badflat.flatten())[0] image.mask.flatten()[mark_these] |= maskbits.BADPIX_BPM - + # If a weight or variance image already exists, add to it any additional # variance from the flat: if (image.weight is not None or image.variance is not None): @@ -72,47 +72,47 @@ def _doit(cls, image, flat_im): var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 - var += image.data*image.data/(flat_im.weight*f2) + var += image.data * image.data / (flat_im.weight * f2) elif flat_im.variance is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 - var += image.data*image.data*flat_im.variance/f2 + var += image.data * image.data * flat_im.variance / f2 # Update header keywords for rescaling saturate = 0. scales = [] for amp in decaminfo.amps: # Acquire the typical scaling factor for each amp from the flat - scalekw = 'FLATMED'+amp + scalekw = 'FLATMED' + amp if scalekw in flat_im.header.keys(): # Already stored in the flat's header: scale = flat_im[scalekw] else: # Figure it out ourselves from median of a subsample: # sec = DESImage.section2slice(image['DATASEC'+amp]) - sec = section2slice(image['DATASEC'+amp]) - scale = np.median(flat_im.data[sec][::4,::4]) + sec = section2slice(image['DATASEC' + amp]) + scale = np.median(flat_im.data[sec][::4, ::4]) scales.append(scale) if scalekw in image.header.keys(): # Add current scaling to any previous ones - image[scalekw] = image[scalekw]*scale + image[scalekw] = image[scalekw] * scale else: image[scalekw] = scale - image['GAIN'+amp] = image['GAIN'+amp] * scale - image['SATURAT'+amp] = image['SATURAT'+amp] / scale + image['GAIN' + amp] = image['GAIN' + amp] * scale + image['SATURAT' + amp] = image['SATURAT' + amp] / scale # Scale the SKYVAR if it's already here - kw = 'SKYVAR'+amp + kw = 'SKYVAR' + amp if kw in image.header.keys(): - image[kw] = image[kw] / (scale*scale) - saturate = max(saturate, image['SATURAT'+amp]) + image[kw] = image[kw] / (scale * scale) + saturate = max(saturate, image['SATURAT' + amp]) # The SATURATE keyword is assigned to maximum of the amps' values. image['SATURATE'] = saturate - + # Some other keywords that we will adjust crudely with mean rescaling # if they are present: scale = np.mean(scales) - for kw in ('SKYBRITE','SKYSIGMA'): + for kw in ('SKYBRITE', 'SKYSIGMA'): if kw in image.header.keys(): image[kw] = image[kw] / scale @@ -132,14 +132,14 @@ def step_run(cls, image, config): """ flat_fname = config.get(cls.step_name, 'flat') - logger.info('Reading flat correction from %s'% flat_fname) + logger.info('Reading flat correction from %s' % flat_fname) flat_im = DESImage.load(flat_fname) - + ret_code = cls.__call__(image, flat_im) return ret_code @classmethod - @do_once(1,'DESFLAT') + @do_once(1, 'DESFLAT') def __call__(cls, image, flat_im): """Apply a flat field correction to an image @@ -157,7 +157,7 @@ def __call__(cls, image, flat_im): image.write_key('FLATFIL', path.basename(flat_im.sourcefile), comment='Dome flat correction file') return ret_code - + @classmethod def add_step_args(cls, parser): """Add arguments specific application of the flat field correction diff --git a/python/pixcorrect/flat_correct_cp.py b/python/pixcorrect/flat_correct_cp.py index ef28046..4ecf07f 100755 --- a/python/pixcorrect/flat_correct_cp.py +++ b/python/pixcorrect/flat_correct_cp.py @@ -1,16 +1,16 @@ -#!/usr/bin/env python -"""Apply a flat correction to a raw DES image +#!/usr/bin/env python3 +"""Apply a flat correction to a raw DES image """ from os import path import numpy as np -from pixcorrect import proddir + from pixcorrect.corr_util import logger, do_once, items_must_match #from despyfits.DESImage import DESImage -from despyfits.DESImage import DESImage, section2slice -from despyfits import maskbits from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo +from despyfits.DESImage import DESImage, section2slice +from despyfits import maskbits # Which section of the config file to read for this step config_section = 'flat' @@ -32,7 +32,7 @@ def _doit(cls, image, flat_im, rel_gain_for_flat): Applies the correction "in place" """ logger.info('Applying Flat') - + # Check that flat and data are from same CCD and filter try: image['BAND'] @@ -40,26 +40,25 @@ def _doit(cls, image, flat_im, rel_gain_for_flat): # Give image a BAND from its FILTER if it's not there image['BAND'] = decaminfo.get_band(image['FILTER']) try: - items_must_match(image, flat_im, 'CCDNUM','BAND') + items_must_match(image, flat_im, 'CCDNUM', 'BAND') except: logger.info('Warning: Assuming that keywords are not present and proceeding with flat') - pass # If dictionary has been populated with relative gains then apply # to flat prior to apply flat to the image for amp in rel_gain_for_flat: - logger.info('Scaling flat amplifier %s by %.3f' % (amp,rel_gain_for_flat[amp])) - sec = section2slice(image['DATASEC'+amp]) + logger.info('Scaling flat amplifier %s by %.3f' % (amp, rel_gain_for_flat[amp])) + sec = section2slice(image['DATASEC' + amp]) flat_im.data[sec] *= rel_gain_for_flat[amp] - + # Apply flat to the data image.data /= flat_im.data # Update variance or weight image if it exists if image.weight is not None: - image.weight *= flat_im.data*flat_im.data + image.weight *= flat_im.data * flat_im.data if image.variance is not None: - image.variance /= flat_im.data*flat_im.data + image.variance /= flat_im.data * flat_im.data # If mask image exists, mark as BADPIX_BPM any pixels that have # non-positive flat and are not already flagged. @@ -69,59 +68,59 @@ def _doit(cls, image, flat_im, rel_gain_for_flat): badmask = maskbits.BADPIX_BPM +\ maskbits.BADPIX_BADAMP +\ maskbits.BADPIX_EDGE - badflat = np.logical_and( flat_im.data <= 0., - image.mask & badmask) + badflat = np.logical_and(flat_im.data <= 0., + image.mask & badmask) mark_these = np.where(badflat.flatten())[0] image.mask.flatten()[mark_these] |= maskbits.BADPIX_BPM - + # If a weight or variance image already exists, add to it any additional # variance from the flat: - if (image.weight is not None or image.variance is not None): + if image.weight is not None or image.variance is not None: if flat_im.weight is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 - var += image.data*image.data/(flat_im.weight*f2) + var += image.data * image.data / (flat_im.weight * f2) elif flat_im.variance is not None: var = image.get_variance() f2 = flat_im.data * flat_im.data var *= f2 - var += image.data*image.data*flat_im.variance/f2 + var += image.data * image.data * flat_im.variance / f2 # Update header keywords for rescaling saturate = 0. scales = [] for amp in decaminfo.amps: # Acquire the typical scaling factor for each amp from the flat - scalekw = 'FLATMED'+amp + scalekw = 'FLATMED' + amp if scalekw in flat_im.header.keys(): # Already stored in the flat's header: scale = flat_im[scalekw] else: # Figure it out ourselves from median of a subsample: # sec = DESImage.section2slice(image['DATASEC'+amp]) - sec = section2slice(image['DATASEC'+amp]) - scale = np.median(flat_im.data[sec][::4,::4]) + sec = section2slice(image['DATASEC' + amp]) + scale = np.median(flat_im.data[sec][::4, ::4]) scales.append(scale) if scalekw in image.header.keys(): # Add current scaling to any previous ones - image[scalekw] = image[scalekw]*scale + image[scalekw] = image[scalekw] * scale else: image[scalekw] = scale - image['GAIN'+amp] = image['GAIN'+amp] * scale - image['SATURAT'+amp] = image['SATURAT'+amp] / scale + image['GAIN' + amp] = image['GAIN'+amp] * scale + image['SATURAT' + amp] = image['SATURAT' + amp] / scale # Scale the SKYVAR if it's already here - kw = 'SKYVAR'+amp + kw = 'SKYVAR' + amp if kw in image.header.keys(): - image[kw] = image[kw] / (scale*scale) - saturate = max(saturate, image['SATURAT'+amp]) + image[kw] = image[kw] / (scale * scale) + saturate = max(saturate, image['SATURAT' + amp]) # The SATURATE keyword is assigned to maximum of the amps' values. image['SATURATE'] = saturate - + # Some other keywords that we will adjust crudely with mean rescaling # if they are present: scale = np.mean(scales) - for kw in ('SKYBRITE','SKYSIGMA'): + for kw in ('SKYBRITE', 'SKYSIGMA'): if kw in image.header.keys(): image[kw] = image[kw] / scale @@ -129,7 +128,6 @@ def _doit(cls, image, flat_im, rel_gain_for_flat): ret_code = 0 return ret_code - @classmethod def step_run(cls, image, config): """Customized execution for application of the Flat @@ -147,13 +145,13 @@ def step_run(cls, image, config): # At present the only way to acquire gains is when function is run through # tandem operation with gain_correct. In the absence of having relative gains # an empty dictionary is passed here. - rel_gain_for_flat={} - + rel_gain_for_flat = {} + ret_code = cls.__call__(image, flat_im, rel_gain_for_flat) return ret_code @classmethod - @do_once(1,'DESFLAT') + @do_once(1, 'DESFLAT') def __call__(cls, image, flat_im, rel_gain_for_flat): """Apply a flat field correction to an image @@ -172,7 +170,7 @@ def __call__(cls, image, flat_im, rel_gain_for_flat): image.write_key('FLATFIL', path.basename(flat_im.sourcefile), comment='Dome flat correction file') return ret_code - + @classmethod def add_step_args(cls, parser): """Add arguments specific application of the flat field correction diff --git a/python/pixcorrect/gain_correct.py b/python/pixcorrect/gain_correct.py index 26d3a27..2f41d19 100755 --- a/python/pixcorrect/gain_correct.py +++ b/python/pixcorrect/gain_correct.py @@ -1,15 +1,13 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Gain Correct image (convert pixel values from ADU to electrons) """ -import ctypes -from os import path import numpy as np -from pixcorrect import proddir + from pixcorrect.corr_util import logger, do_once -from despyfits.DESImage import DESImage, DESImageCStruct, section2slice, data_dtype from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo +from despyfits.DESImage import section2slice # Which section of the config file to read for this step config_section = 'gain' @@ -19,7 +17,7 @@ class GainCorrect(PixCorrectImStep): step_name = config_section @classmethod - @do_once(1,'DESGAINC') + @do_once(1, 'DESGAINC') def __call__(cls, image): """Convert pixel values from ADU to electrons, including weight or variance image and critical keywords. @@ -29,33 +27,33 @@ def __call__(cls, image): Applies the correction "in place" """ - + logger.info('Gain Correcting Image') saturate = 0. gains = [] for amp in decaminfo.amps: - sec = section2slice( image['DATASEC'+amp]) - gain = image['GAIN'+amp] + sec = section2slice(image['DATASEC' + amp]) + gain = image['GAIN' + amp] gains.append(gain) - image.data[sec]*=gain + image.data[sec] *= gain # Adjust the weight or variance image if present: if image.weight is not None: - image.weight[sec] *= 1./(gain*gain) + image.weight[sec] *= 1. / (gain * gain) if image.variance is not None: - image.variance[sec] *= (gain*gain) + image.variance[sec] *= gain * gain - # Adjust keywords - image['GAIN'+amp] = image['GAIN'+amp] / gain - image['SATURAT'+amp] = image['SATURAT'+amp] * gain - saturate = max(saturate, image['SATURAT'+amp]) + # Adjust keywords + image['GAIN' + amp] = image['GAIN' + amp] / gain + image['SATURAT' + amp] = image['SATURAT' + amp] * gain + saturate = max(saturate, image['SATURAT' + amp]) # Scale the SKYVAR if it's already here - kw = 'SKYVAR'+amp + kw = 'SKYVAR' + amp if kw in image.header.keys(): image[kw] = image[kw] * gain * gain # The FLATMED will keep track of rescalings *after* gain: - image['FLATMED'+amp] = 1. + image['FLATMED' + amp] = 1. # The SATURATE keyword is assigned to maximum of the two amps. image['SATURATE'] = saturate @@ -63,15 +61,15 @@ def __call__(cls, image): # Some other keywords that we will adjust crudely with mean gain # if they are present: gain = np.mean(gains) - for kw in ('SKYBRITE','SKYSIGMA'): + for kw in ('SKYBRITE', 'SKYSIGMA'): if kw in image.header.keys(): image[kw] = image[kw] * gain - + # One other keyword to adjust: image['BUNIT'] = 'electrons' - + logger.debug('Finished applying Gain Correction') - ret_code=0 + ret_code = 0 return ret_code @@ -84,8 +82,8 @@ def step_run(cls, image, config): - `config`: the configuration from which to get other parameters """ - logger.info('Gain correction will be applied to %s' % image) - + logger.info('Gain correction will be applied to %s', image) + ret_code = cls.__call__(image) return ret_code diff --git a/python/pixcorrect/image_diff.py b/python/pixcorrect/image_diff.py index 2a0c87e..ace4099 100755 --- a/python/pixcorrect/image_diff.py +++ b/python/pixcorrect/image_diff.py @@ -1,13 +1,10 @@ -#!/usr/bin/env python -"""Apply a bias correction to a raw DES image +#!/usr/bin/env python3 +"""Apply a bias correction to a raw DES image """ -from os import path -import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, do_once -from despyfits.DESImage import DESImage +from pixcorrect.corr_util import logger from pixcorrect.PixCorrectDriver import PixCorrectImStep +from despyfits.DESImage import DESImage # Which section of the config file to read for this step config_section = 'imgdiff' @@ -27,14 +24,14 @@ def __call__(cls, image, comp_im): Applies the correction "in place" """ - + logger.info('Taking Difference') image.data -= comp_im.data # If we have two weight images, add variance of the bias to the image's - if (image.weight is not None or image.variance is not None): + if image.weight is not None or image.variance is not None: if comp_im.weight is not None: var = image.get_variance() - var += 1./comp_im.weight + var += 1. / comp_im.weight elif comp_im.variance is not None: var = image.get_variance() var += comp_im.variance @@ -54,9 +51,9 @@ def step_run(cls, image, config): """ comp_fname = config.get(cls.step_name, 'comp') - logger.info('reading Comparison image from %s'% comp_fname) + logger.info('reading Comparison image from %s', comp_fname) comp_im = DESImage.load(comp_fname) - + ret_code = cls.__call__(image, comp_im) return ret_code diff --git a/python/pixcorrect/imtypes.py b/python/pixcorrect/imtypes.py index 369bfe7..0ce6b06 100644 --- a/python/pixcorrect/imtypes.py +++ b/python/pixcorrect/imtypes.py @@ -14,12 +14,14 @@ class ImageTypeException(Exception): class ImageWrongHeader(ImageTypeException): def __init__(self, kw, read_value, expected_value): + super().__init__() self.kw = kw self.read_value = read_value self.expected_value = expected_value class ImageWrongShape(ImageTypeException): def __init__(self, read_shape, expected_shape): + super().__init__() self.read_shape = read_shape self.expected_shape = expected_shape @@ -31,7 +33,7 @@ def __init__(self, read_shape, expected_shape): # for specific image types, while this would be awkward if they were all # instances of the same class. -class ImageTypeChecker(object): +class ImageTypeChecker: shape = None kwdict = {} keywords = [] @@ -60,7 +62,7 @@ class Type1ImageChecker(Type0ImageChecker): 'TREE': 'oak'} class Type3ImageChecker(Type0ImageChecker): - keywords = ['BARCOEFF'] + keywords = ['BARCOEFF'] # # Make design-by-contract (dbc) decorators from our checking classes diff --git a/python/pixcorrect/lightbulb.py b/python/pixcorrect/lightbulb.py new file mode 100755 index 0000000..3a34157 --- /dev/null +++ b/python/pixcorrect/lightbulb.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +# $Id: lightbulb.py 46993 2018-05-10 20:03:29Z rgruendl $ +# $Rev:: 46993 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2018-05-10 15:03:29 #$: # Date of last commit. + +"""Lightbulb Search and Mask on image +""" + +import numpy as np +from pixcorrect import lightbulb_utils as lb +#from pixcorrect.corr_util import logger, do_once +from pixcorrect.corr_util import logger +#from despyfits.DESImage import DESImage, DESImageCStruct, section2slice, data_dtype +from pixcorrect.PixCorrectDriver import PixCorrectImStep + +# Which section of the config file to read for this step +config_section = 'lightbulb' + +class LightBulb(PixCorrectImStep): + description = "Search for indication that a known lightbulb defect is active and mask" + step_name = config_section + + @classmethod + def __call__(cls, image): + """ + This is currently written with a single lightbulb in mind. It may be generalizable if further cases + occur (but not all the parts have yet been written with a generalized case in mind). + Three components are generated depending on the strength of the lightbulb: + 1) circular mask centered on the bulb with a radius sufficient to reach 1-sigma of the noise level + 2) columnar mask that extends toward the read registers (triggered when lightbulb has central brightness > XX + 3) columnar mask that extends away from the read registers (triggered when light bulb is saturated) + """ +# A simple dictionary with parameters for the only known lightbulb +# Currently explist is set to encompass 20170901 and beyond (expnum>674105) +# This could be tightened to 693244 (20171101) or even to 694699 (the earliest weak lighbulb found so far) + LBD = {46: {'explist': '674105-', + 'xc': 795, + 'yc': 2620, + 'rad':500}} + + if image['CCDNUM'] in LBD: + check_for_light = lb.check_lightbulb_explist(image['EXPNUM'], LBD[image['CCDNUM']]['explist']) + if check_for_light: + logger.info('Image CCDNUM=46, in proscribed range checking for lightbulb') + bulbDict = lb.check_lightbulb(image, LBD[image['CCDNUM']], verbose=1) +# +# Current criterion: +# 1) saturate pixels detected, median brightness >100,000 and width above the lower end of range +# 2) non-saturated: +# requires successful fit (failed fits set g_wid, g_widerr, g_amp, g_amperr to -1) +# width (FWHM) in range 70-140 and uncertainty less than 35 +# amplitude > 0 and uncertainty positive and less than sqrt(amp) +# + isBulb = False + bulbDict['bulb'] = 'F' + if bulbDict['num_sat'] > 0: + if bulbDict['g_wid'] > 70. and bulbDict['g_amp'] > 100000: + bulbDict['bulb'] = 'S' + isBulb = True + if not isBulb: + if bulbDict['g_wid'] >= 70. and bulbDict['g_wid'] <= 140. and \ + bulbDict['g_widerr'] > 0. and bulbDict['g_widerr'] < 35.: + if bulbDict['g_amp'] >= 0.: + if bulbDict['g_amperr'] > 0. and bulbDict['g_amperr'] < np.sqrt(bulbDict['g_amp']): + bulbDict['bulb'] = 'T' + isBulb = True +# +# If found use masking utility +# + if isBulb: + logger.info(' LIGHTBULB: detected with central brightness: {:.1f}'.format(bulbDict['bulb_sb'])) + image = lb.mask_lightbulb(image, LBD[image['CCDNUM']], bulbDict, verbose=1) + image.write_key('DESBULB', 'Peak SB {:.1f}, Radius {:.1f} '.format(bulbDict['bulb_sb'], + bulbDict['g_wid']), + comment='') + + logger.debug('Finished checking and applying mask for light bulb') + ret_code = 0 + return ret_code + + + @classmethod + def step_run(cls, image, config): + """Customized execution for check and masking of light bulb + + :Parameters: + - `image`: the DESImage on which to operate +# - `config`: the configuration from which to get other parameters + + """ + logger.info('Light bulb check %s', image) + + ret_code = cls.__call__(image) + return ret_code + + @classmethod + def add_step_args(cls, parser): + """Add arguments specific application of the gain correction + """ + +lightbulb = LightBulb() + +# internal functions & classes + +if __name__ == '__main__': + lightbulb.main() diff --git a/python/pixcorrect/lightbulb_utils.py b/python/pixcorrect/lightbulb_utils.py new file mode 100755 index 0000000..1dce6fd --- /dev/null +++ b/python/pixcorrect/lightbulb_utils.py @@ -0,0 +1,263 @@ +# $Id: lightbulb_utils.py 46990 2018-05-10 19:57:25Z rgruendl $ +# $Rev:: 46990 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2018-05-10 14:57:25 #$: # Date of last commit. + +"""Lightbulb masking functions +""" + +import re +import numpy as np +from scipy.optimize import curve_fit +from pixcorrect.corr_util import logger +from despyfits.maskbits import * + +########################################### +def check_lightbulb_explist(expnum, explist): + """Check whether an exposure number falls within a list + :Parameters: + - expnum: expnum to check + - explist: string that converts to a set of exposure numbers + """ + + if explist == "All": + retval = True + else: + tmp_exp_list = explist.split(",") + ExpList = [] + for exp in tmp_exp_list: + if re.search("-", exp) is None: + ExpList.append([int(exp), int(exp)]) + else: + ExpRange = exp.split("-") + if ExpRange[0] == '': + ExpList.append([1, int(ExpRange[1])]) + elif ExpRange[1] == "": + ExpList.append([int(ExpRange[0]), 1000000000]) + else: + ExpList.append([int(ExpRange[0]), int(ExpRange[1])]) + retval = False + for ExpRange in ExpList: + if ExpRange[0] <= expnum <= ExpRange[1]: + retval = True + + return retval + + +########################################### +def medclip(data, clipsig=3.0, maxiter=10, converge_num=0.001, verbose=0): + """ Function to examine data and determine average, median, stddev + using a clipping algorithm + Inputs: data: image array + clipsig: The number of N-sigma to be excluded when clipping + maxiter: Maximum number of iterations to perform + converge_num: Convergence Criteria + + Outputs: avgval: average from clipped distribution + medval: median from clipped distribution + stdval: stddev from clipped distribution + """ + + ct = data.size + iter = 0 + c1 = 1.0 + c2 = 0.0 + + avgval = np.mean(data) + medval = np.median(data) + sig = np.std(data) + wsm = np.where(abs(data-medval) < clipsig * sig) + if 0 < verbose < 4: + logger.debug("iter,avgval,medval,sig") + if 2 < verbose < 4: + logger.debug("{:d} {:.2f} {:.2f} {:.2f} ".format(0, avgval, medval, sig)) + if verbose > 3: + logger.debug("iter,avgval,medval,sig") + logger.debug("{:d} {:.2f} {:.2f} {:.2f} {:d} {:d} {:.1f} ".format(0, avgval, medval, sig, ct, c1, c2)) + + while c1 >= c2 and iter < maxiter: + iter += 1 + lastct = ct + avgval = np.mean(data[wsm]) + medval = np.median(data[wsm]) + sig = np.std(data[wsm]) + wsm = np.where(abs(data-medval) < clipsig * sig) + ct = len(wsm[0]) + if ct > 0: + c1 = abs(ct - lastct) + c2 = converge_num * lastct + if 2 < verbose < 4: + logger.debug("{:d} {:.2f} {:.2f} {:.2f} ".format(iter, avgval, medval, sig)) + if verbose > 3: + logger.debug("{:d} {:.2f} {:.2f} {:.2f} {:d} {:d} {:.1f} ".format(iter, avgval, medval, sig, ct, c1, c2)) +# End of while loop + if iter >= maxiter: + logger.info("Warning: medclip had not yet converged after {:d} iterations".format(iter)) + + medval = np.median(data[wsm]) + avgval = np.mean(data[wsm]) + stdval = np.std(data[wsm]) + if verbose > 0: + logger.info("{:d} {:.2f} {:.2f} {:.2f} ".format(iter + 1, avgval, medval, stdval)) + + return avgval, medval, stdval + + +######################################### +def rgauss(r, a, b, c): + return a * np.exp(-(r * r) / (2. * b * b)) + c + +########################################### +def check_lightbulb(image, LBD, verbose=0): + """Function to check for presence of light bulb""" + +# +# Get image statistics +# + bulbDict = {} + _, clip_med, clip_std = medclip(image.data, clipsig=3.0, maxiter=20, converge_num=0.001, verbose=0) + logger.info(" LIGHTBULB: Global(clipped): median = {:.3f}, stddev = {:.3f} ".format(clip_med, clip_std)) + bulbDict['cmed'] = float(clip_med) + bulbDict['cstd'] = float(clip_std) + +# +# Looks at central location of bulb and get some basic measure of brightness +# + x0 = LBD['xc'] + y0 = LBD['yc'] + iy1 = y0 - 10 + iy2 = y0 + 10 + ix1 = x0 - 10 + ix2 = x0 + 10 + central_med = np.median(image.data[iy1:iy2, ix1:ix2]) + bulbDict['bulb_sb'] = float(central_med) - bulbDict['cmed'] + logger.info(" LIGHTBULB: Potential Bulb Central SB: median = {:.3f}".format(bulbDict['bulb_sb'])) + + BulbSig = (central_med - clip_med) / clip_std + bulbDict['bulb_sig'] = (float(BulbSig) * 21.) + logger.info(" LIGHTBULB: Bulb Significance = {:.3f} sigma.".format(BulbSig * 21.)) + central_sat = np.sum(image.mask[iy1:iy2, ix1:ix2] & 2)/ 2 + logger.info(" LIGHTBULB: Number of central saturated pixels: {:d} ".format(int(central_sat))) + bulbDict['num_sat'] = int(central_sat) +# +# Form a radial profile and then fit with Gaussian centered at r=0 +# +# image.data[np.isnan(image.data)]=0 + y, x = np.indices(image.data.shape) + r = np.sqrt((x - x0)**2 + (y - y0)**2) + + rbinsize = 10.0 + radbin = np.arange(0., LBD['rad'], rbinsize) + radbin_c = (radbin[0:-1] + radbin[1:]) / 2.0 + medbin = np.zeros(radbin_c.size) + stdbin = np.zeros(radbin_c.size) + for i in range(radbin_c.size): + wsm = np.where(np.logical_and(r >= radbin[i], r < radbin[i + 1])) + if image.data[wsm].size > 0: + medbin[i] = np.median(image.data[wsm]) + stdbin[i] = np.std(image.data[wsm]) + if medbin[i] > bulbDict['cmed']: + stdbin[i] = np.sqrt(medbin[i] - bulbDict['cmed'] + 36.0) + else: + stdbin[i] = np.sqrt(36.0) + else: + medbin[i] = 0.0 + stdbin[i] = np.sqrt(36.0) +# +# Make a simple estimate of radius to mask (then confirm with Gaussian fit) +# + bkg_limit = bulbDict['cmed'] + bulbDict['cstd'] + i = 0 + while medbin[i] > bkg_limit and i < radbin_c.size - 1: + i += 1 + logger.info(" LIGHTBULB: Median Radial Profile estimate shows level of {:.2f} at a radius of {:.2f} ".format(medbin[i], radbin_c[i])) + bulbDict['rad'] = float(radbin_c[i]) +# +# Guess for FWHM, Amp, bkg +# + i = 0 + while medbin[i] - bulbDict['cmed'] > 0.5 * bulbDict['bulb_sb'] and i < radbin_c.size - 1: + i += 1 + sig_guess = radbin_c[i] / 2.355 + amp_guess = bulbDict['bulb_sb'] / (sig_guess * 2.355) + r_guess = [amp_guess, sig_guess, bulbDict['cmed']] +# +# Fit radial profile +# + try: + popt, pcov = curve_fit(rgauss, radbin_c, medbin, p0=r_guess, sigma=stdbin, absolute_sigma=True) + perr = np.sqrt(np.diag(pcov)) + logger.info(" LIGHTBULB: Gauss FIT results (amp, sig, bkg): {:.2f} {:.2f} {:.2f} ".format(popt[0], popt[1], popt[2])) + logger.info(" LIGHTBULB: Gauss FIT results perr(covar): {:.2f} {:.2f} {:.2f} ".format(perr[0], perr[1], perr[2])) + #g_fitval = rgauss(radbin_c, *popt) + bulbDict['g_amp'] = popt[0] + bulbDict['g_wid'] = popt[1] * 2.355 + bulbDict['g_bkg'] = popt[2] + bulbDict['g_amperr'] = perr[0] + bulbDict['g_widerr'] = perr[1] * 2.355 + bulbDict['g_bkgerr'] = perr[2] + except: + logger.info(" LIGHTBULB: Gauss FIT FAILED") +# popt=(0.0,1.0,bulbDict['cmed']) + bulbDict['g_amp'] = -1.0 + bulbDict['g_wid'] = -1.0 + bulbDict['g_bkg'] = -1.0 + bulbDict['g_amperr'] = -1.0 + bulbDict['g_widerr'] = -1.0 + bulbDict['g_bkgerr'] = -1.0 + + return bulbDict + + +########################################### +def mask_lightbulb(image, LBD, bulbDict, verbose=0): + """Function to mask the region affected by a lightbulb""" + +# +# Empirically derived relation for the width of the columnar masks +# pw1 --> width of region extending toward the read registers +# pw2 --> width of region extending away from the read registers +# + if bulbDict['bulb_sb'] > 0.: + y1 = np.power(bulbDict['bulb_sb'], 0.75) / 10. + y2 = np.power(bulbDict['bulb_sb'], 0.75) / 100. + pw1 = 150. * (np.log10(y1) - 0.5) + pw2 = 150. * (np.log10(y2) - 0.5) + if pw1 < 16.0: + pw1 = 0.0 + if pw2 < 16.0: + pw2 = 0.0 + else: + pw1 = 0.0 + pw2 = 0.0 + + xb0 = LBD['xc'] + yb0 = LBD['yc'] + yb, xb = np.indices(image.mask.shape) + rb = np.sqrt((xb - xb0)**2 + (yb - yb0)**2) + + rlim = 1.66 * bulbDict['rad'] + xb11 = xb0 - pw1 + xb12 = xb0 + pw1 + xb21 = xb0 - pw2 + xb22 = xb0 + pw2 + + if pw2 > 7.: + wsm3 = np.where(np.logical_or(rb < rlim, np.logical_or(np.logical_and(yb >= yb0, np.logical_and(xb > xb11, xb < xb12)), + np.logical_and(yb <= yb0, np.logical_and(xb > xb21, xb < xb22))))) + logger.info(" LIGHTBULB: masking circular region with radius {:.1f} ".format(rlim)) + logger.info(" LIGHTBULB: masking columnar region for y>{:.1f} with width {:.1f} ".format(yb0, pw1 * 2.)) + logger.info(" LIGHTBULB: masking columnar region for y<{:.1f} with width {:.1f} ".format(yb0, pw2 * 2.)) + elif pw1 > 7.: + wsm3 = np.where(np.logical_or(rb < rlim, np.logical_and(yb >= yb0, np.logical_and(xb > xb11, xb < xb12)))) + logger.info(" LIGHTBULB: masking circular region with radius {:.1f} ".format(rlim)) + logger.info(" LIGHTBULB: masking columnar region for y>{:.1f} with width {:.1f} ".format(yb0, pw1 * 2.)) + else: + wsm3 = np.where(rb < rlim) + logger.info(" LIGHTBULB: masking circular region with radius {:.1f} ".format(rlim)) + + image.mask[wsm3] |= BADPIX_BPM + + logger.info(" LIGHTBULB: mask applied to image") + + return image diff --git a/python/pixcorrect/linearity_correct.py b/python/pixcorrect/linearity_correct.py index 0232c8b..6d461b8 100755 --- a/python/pixcorrect/linearity_correct.py +++ b/python/pixcorrect/linearity_correct.py @@ -1,17 +1,15 @@ -#!/usr/bin/env python -"""Apply a linearity correction to a DES image +#!/usr/bin/env python3 +"""Apply a linearity correction to a DES image """ -import ctypes from os import path import numpy as np import fitsio from scipy import interpolate -from pixcorrect import proddir from pixcorrect.corr_util import logger, do_once -from despyfits.DESImage import DESImage, section2slice -from despyfits.DESFITSInventory import DESFITSInventory from pixcorrect.PixCorrectDriver import PixCorrectImStep +from despyfits.DESImage import section2slice +from despyfits.DESFITSInventory import DESFITSInventory # Which section of the config file to read for this step config_section = 'lincor' @@ -21,7 +19,7 @@ class LinearityCorrect(PixCorrectImStep): step_name = config_section @classmethod - @do_once(1,'DESLINC') + @do_once(1, 'DESLINC') def __call__(cls, image, fname_lincor): """Apply a linearity correction @@ -36,22 +34,22 @@ def __call__(cls, image, fname_lincor): # Discover the HDU in the linearity correction FITS table that contains data for a specific CCD # fits_inventory = DESFITSInventory(fname_lincor) - lincor_hdu=fits_inventory.ccd_hdus(image['CCDNUM']) - if (len(lincor_hdu) != 1): - if (len(lincor_hdu) == 0): - logger.error('Unable to locate HDU in %s containing linearity correction for CCDNUM %d. Aborting!'.format(fname_lincor,image['CCDNUM'])) + lincor_hdu = fits_inventory.ccd_hdus(image['CCDNUM']) + if len(lincor_hdu) != 1: + if not lincor_hdu: + logger.error('Unable to locate HDU in %s containing linearity correction for CCDNUM %d. Aborting!', fname_lincor, image['CCDNUM']) else: - logger.error('Found multiple HDUs in %s containing linearity correction for CCDNUM %d. Aborting!'.format(fname_lincor,image['CCDNUM'])) - raise - - logger.info('Reading Linearity Correction from %s' % (fname_lincor)) - cat_fits=fitsio.FITS(fname_lincor,'r') - cat_hdu=lincor_hdu[0] - cols_retrieve=["ADU","ADU_LINEAR_A","ADU_LINEAR_B"] - CAT=cat_fits[cat_hdu].read(columns=cols_retrieve) + logger.error('Found multiple HDUs in %s containing linearity correction for CCDNUM %d. Aborting!', fname_lincor, image['CCDNUM']) + raise Exception() + + logger.info('Reading Linearity Correction from %s', fname_lincor) + cat_fits = fitsio.FITS(fname_lincor, 'r') + cat_hdu = lincor_hdu[0] + cols_retrieve = ["ADU", "ADU_LINEAR_A", "ADU_LINEAR_B"] + CAT = cat_fits[cat_hdu].read(columns=cols_retrieve) # # If columns do not get put into CAT in a predefined order then these utilities -# may be needed. RAG has them and can implement... left this way for now since it +# may be needed. RAG has them and can implement... left this way for now since it # currently duplicates imcorrect exactly # # CATcol=cat_fits[cat_hdu].get_colnames() @@ -60,16 +58,16 @@ def __call__(cls, image, fname_lincor): # # Define the correction being made. # - nonlinear=[] - linearA=[] - linearB=[] + nonlinear = [] + linearA = [] + linearB = [] for row in CAT: nonlinear.append(row[0]) linearA.append(row[1]) linearB.append(row[2]) - nonlinear=np.array(nonlinear) - linearA=np.array(linearA) - linearB=np.array(linearB) + nonlinear = np.array(nonlinear) + linearA = np.array(linearA) + linearB = np.array(linearB) interpA = interpolate.interp1d(nonlinear, linearA, kind='linear', copy=True) interpB = interpolate.interp1d(nonlinear, linearB, kind='linear', copy=True) logger.info('Applying Linearity Correction') @@ -78,21 +76,21 @@ def __call__(cls, image, fname_lincor): # Slice over the datasecs for each amplifier. # Apply the correction # - seca = section2slice( image['DATASECA']) - secb = section2slice( image['DATASECB']) + seca = section2slice(image['DATASECA']) + secb = section2slice(image['DATASECB']) # Only fix pixels that are in the range of the nonlinearity table - in_range=np.logical_and(image.data[seca]>=np.min(nonlinear), - image.data[seca]<=np.max(nonlinear)) - image.data[seca][in_range]=interpA(image.data[seca][in_range]) + in_range = np.logical_and(image.data[seca] >= np.min(nonlinear), + image.data[seca] <= np.max(nonlinear)) + image.data[seca][in_range] = interpA(image.data[seca][in_range]) - in_range=np.logical_and(image.data[secb]>=np.min(nonlinear), - image.data[secb]<=np.max(nonlinear)) - image.data[secb][in_range]=interpB(image.data[secb][in_range]) + in_range = np.logical_and(image.data[secb] >= np.min(nonlinear), + image.data[secb] <= np.max(nonlinear)) + image.data[secb][in_range] = interpB(image.data[secb][in_range]) image.write_key('LINCFIL', path.basename(fname_lincor), comment='Nonlinearity correction file') - - ret_code=0 + + ret_code = 0 return ret_code @@ -106,8 +104,8 @@ def step_run(cls, image, config): """ fname_lincor = config.get(cls.step_name, 'lincor') - logger.info('Linearity correction will be applied to %s' % image) - + logger.info('Linearity correction will be applied to %s', image) + ret_code = cls.__call__(image, fname_lincor) return ret_code @@ -115,7 +113,7 @@ def step_run(cls, image, config): def add_step_args(cls, parser): """Add arguments specific application of the BPM """ - parser.add_argument('--lincor', nargs=1, type=str, default=None, + parser.add_argument('--lincor', nargs=1, type=str, default=None, help='Linearity Correction Table') diff --git a/python/pixcorrect/make_mask.py b/python/pixcorrect/make_mask.py index a313a9d..1aca433 100644 --- a/python/pixcorrect/make_mask.py +++ b/python/pixcorrect/make_mask.py @@ -1,15 +1,15 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Apply BPM to mask plane and/or flag saturated pixels """ +import time from os import path import numpy as np -import time from pixcorrect.corr_util import logger, items_must_match -from despyfits.DESImage import DESImage, DESBPMImage, section2slice -from despyfits.maskbits import * from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo +from despyfits.DESImage import DESBPMImage, section2slice +from despyfits.maskbits import * # Which section of the config file to read for this step config_section = 'mask' @@ -20,7 +20,7 @@ class MakeMask(PixCorrectImStep): DEFAULT_SATURATE = False DEFAULT_CLEAR = False - + @classmethod def __call__(cls, image, bpm_im, saturate, clear): """Create or update the mask plane of an image @@ -48,22 +48,22 @@ def __call__(cls, image, bpm_im, saturate, clear): # Check for header keyword of whether it's been done kw = 'DESSAT' if kw in image.header.keys() and not clear: - logger.warning('Skipping saturation check ('+kw+' already set)') + logger.warning('Skipping saturation check (' + kw + ' already set)') else: logger.info('Flagging saturated pixels') nsat = 0 for amp in decaminfo.amps: - sec = section2slice(image['DATASEC'+amp]) - sat = image['SATURAT'+amp] - satpix = image.data[sec]>=sat + sec = section2slice(image['DATASEC' + amp]) + sat = image['SATURAT' + amp] + satpix = image.data[sec] >= sat image.mask[sec][satpix] |= BADPIX_SATURATE nsat += np.count_nonzero(satpix) image.write_key(kw, time.asctime(time.localtime()), - comment = 'Flag saturated pixels') - image.write_key('NSATPIX',nsat, + comment='Flag saturated pixels') + image.write_key('NSATPIX', nsat, comment='Number of saturated pixels') - + logger.debug('Finished flagging saturated pixels') #Now fill in BPM @@ -71,7 +71,7 @@ def __call__(cls, image, bpm_im, saturate, clear): # Check for header keyword of whether it's been done kw = 'DESBPM' if kw in image.header.keys() and not clear: - logger.warning('Skipping BPM application ('+kw+' already set)') + logger.warning('Skipping BPM application (' + kw + ' already set)') else: logger.info('Applying BPM') try: @@ -98,7 +98,7 @@ def __call__(cls, image, bpm_im, saturate, clear): BPMDEF_BIAS_COL | \ BPMDEF_FUNKY_COL | \ BPMDEF_WACKY_PIX - # ERICM Removed BPMDEF_CORR and added FUNKY_COL to the above list + # ERICM Removed BPMDEF_CORR and added FUNKY_COL to the above list mark = (bpm_im.mask & bitmask) != 0 image.mask[mark] |= BADPIX_BPM @@ -134,42 +134,42 @@ def __call__(cls, image, bpm_im, saturate, clear): # For each column find the number of pixels flagged as BIAS_HOT and BIAS_COL N_BIAS_HOT = np.sum((bpm_im.mask & BPMDEF_BIAS_HOT) > 0, axis=0) N_BIAS_COL = np.sum((bpm_im.mask & BPMDEF_BIAS_COL) > 0, axis=0) - maskwidth=bpm_im.mask.shape[1] + maskwidth = bpm_im.mask.shape[1] # First do columns with N_BIAS_COL set for 1 or more pixels - biascols=np.arange(maskwidth)[(N_BIAS_COL > 0)] + biascols = np.arange(maskwidth)[(N_BIAS_COL > 0)] for icol in biascols: - #Clear FUNKY_COL bit if set for all pixels in this column - #The reason for clearing the bit is that the FUNKY_COL detection is - #sensitive to hot bias pixels and may flag those columns by "mistake" - #First clear BAD BPM bit if set because of funky column - image.mask[:,icol][bpm_im.mask[:,icol]==BPMDEF_FUNKY_COL] &= ~BADPIX_BPM - bpm_im.mask[:,icol] -= (bpm_im.mask[:,icol] & BPMDEF_FUNKY_COL ) - #Correctable columns have exactly 1 BIAS_HOT pixel - if N_BIAS_HOT[icol] == 1: - #Correctable pixels have BIAS_COL bit set - bpm_im.mask[:,icol][(bpm_im.mask[:,icol]&BPMDEF_BIAS_COL)>0] |= BPMDEF_CORR - logger.info('Column '+str(icol)+' has 1 hot pixel and is correctable.') - else: - logger.info('Column '+str(icol)+' has '+str(N_BIAS_HOT[icol])+' hot pixels and is NOT correctable.') + #Clear FUNKY_COL bit if set for all pixels in this column + #The reason for clearing the bit is that the FUNKY_COL detection is + #sensitive to hot bias pixels and may flag those columns by "mistake" + #First clear BAD BPM bit if set because of funky column + image.mask[:, icol][bpm_im.mask[:, icol] == BPMDEF_FUNKY_COL] &= ~BADPIX_BPM + bpm_im.mask[:, icol] -= (bpm_im.mask[:, icol] & BPMDEF_FUNKY_COL) + #Correctable columns have exactly 1 BIAS_HOT pixel + if N_BIAS_HOT[icol] == 1: + #Correctable pixels have BIAS_COL bit set + bpm_im.mask[:, icol][(bpm_im.mask[:, icol] & BPMDEF_BIAS_COL) > 0] |= BPMDEF_CORR + logger.info('Column ' + str(icol) + ' has 1 hot pixel and is correctable.') + else: + logger.info('Column ' + str(icol) + ' has ' + str(N_BIAS_HOT[icol]) + ' hot pixels and is NOT correctable.') #Now do columns with FUNKY_COL set. Note that the FUNKY_COL bits have been cleared above #for hot bias columns N_FUNKY_COL = np.sum((bpm_im.mask & BPMDEF_FUNKY_COL) > 0, axis=0) - funkycols=np.arange(maskwidth)[(N_FUNKY_COL > 0)] + funkycols = np.arange(maskwidth)[(N_FUNKY_COL > 0)] for icol in funkycols: - #Correctable pixels have FUNKY_COL bit set - bpm_im.mask[:,icol][(bpm_im.mask[:,icol]&BPMDEF_FUNKY_COL)>0] |= BPMDEF_CORR - logger.info('Column '+str(icol)+' is funky and correctable.') + #Correctable pixels have FUNKY_COL bit set + bpm_im.mask[:, icol][(bpm_im.mask[:, icol] & BPMDEF_FUNKY_COL) > 0] |= BPMDEF_CORR + logger.info('Column ' + str(icol) + ' is funky and correctable.') + - image[kw] = time.asctime(time.localtime()) image.write_key(kw, time.asctime(time.localtime()), - comment = 'Construct mask from BPM') + comment='Construct mask from BPM') if bpm_im.sourcefile is None: image.write_key('BPMFIL', 'UNKNOWN', comment='BPM file used to build mask') else: image.write_key('BPMFIL', path.basename(bpm_im.sourcefile), comment='BPM file used to build mask') - + logger.debug('Finished applying BPM') return ret_code @@ -199,7 +199,7 @@ def step_run(cls, image, config): clear = config.getboolean(cls.step_name, 'clear') else: clear = DEFAULT_CLEAR - + ret_code = cls.__call__(image, bpm_im, saturate, clear) return ret_code @@ -207,7 +207,7 @@ def step_run(cls, image, config): def add_step_args(cls, parser): """Add arguments specific application of the BPM """ - parser.add_argument('-b', '--bpm', + parser.add_argument('-b', '--bpm', help='bad pixel mask filename (optional)') parser.add_argument('--saturate', action='store_true', help='Flag saturated pixels') diff --git a/python/pixcorrect/mask_saturation.py b/python/pixcorrect/mask_saturation.py index 584e4d4..7a73a23 100755 --- a/python/pixcorrect/mask_saturation.py +++ b/python/pixcorrect/mask_saturation.py @@ -1,17 +1,14 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Mask saturated pixels """ # imports import ctypes -from os import path -import numpy as np from pixcorrect.dbc import postcondition -from pixcorrect import proddir from pixcorrect.corr_util import logger, load_shlib from pixcorrect.corr_util import do_once, no_lib_error -from despyfits.DESImage import DESImage, DESImageCStruct from pixcorrect.PixCorrectDriver import PixCorrectImStep +from despyfits.DESImage import DESImageCStruct # constants @@ -33,11 +30,11 @@ class MaskSaturation(PixCorrectImStep): step_name = config_section @classmethod - @do_once(1,'DESSAT') + @do_once(1, 'DESSAT') @postcondition(no_lib_error) def __call__(cls, image): """Mark saturated pixels in the mask of an image - + :Parameters: - `image`: the DESImage in which to mask saturated pixels @@ -46,7 +43,7 @@ def __call__(cls, image): logger.info('Masking saturated pixels') num_saturated = ctypes.c_int() c_call_status = mask_saturation_c(image.cstruct, num_saturated) - logger.info('Masked %d pixels as saturated' % num_saturated.value) + logger.info('Masked %d pixels as saturated', num_saturated.value) return c_call_status mask_saturation = MaskSaturation() diff --git a/python/pixcorrect/mini_compare.py b/python/pixcorrect/mini_compare.py index 2c31412..8fcb2cb 100644 --- a/python/pixcorrect/mini_compare.py +++ b/python/pixcorrect/mini_compare.py @@ -1,15 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Compare two compressed DES images, report size of deviations """ -from os import path import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError -from pixcorrect import proddir from pixcorrect.corr_util import logger -from despyfits.DESImage import DESDataImage, DESImage from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import skyinfo @@ -22,7 +18,7 @@ class MiniCompare(PixCorrectImStep): " and maximum fractional deviations, and a keyword FACTOR giving the overall flux factor" \ " used to normalize them." step_name = config_section - + @classmethod def __call__(cls, in_filename, ref_filename, out_filename, edge=None): """ @@ -37,7 +33,7 @@ def __call__(cls, in_filename, ref_filename, out_filename, edge=None): giving the overall flux factor used to normalize them. - 'edge': number of compressed pixels along each CCD edge to ignore in calculating stats """ - + logger.info('Comparing compressed image' + in_filename + " to reference " + ref_filename) indata = skyinfo.MiniDecam.load(in_filename) @@ -47,8 +43,8 @@ def __call__(cls, in_filename, ref_filename, out_filename, edge=None): raise skyinfo.SkyError("Input and reference are not matching compressions of DECam") resid = indata.vector() / ref.vector() - if edge is not None and edge>0: - stats = resid[indata.edges(edge).vector()==0] + if edge is not None and edge > 0: + stats = resid[indata.edges(edge).vector() == 0] else: stats = np.array(resid) factor = np.median(stats) @@ -62,17 +58,17 @@ def __call__(cls, in_filename, ref_filename, out_filename, edge=None): indata.header['FACTOR'] = factor indata.header['RMS'] = rms indata.header['WORST'] = worst - if edge is not None and edge>0: + if edge is not None and edge > 0: indata.header['EDGE'] = edge indata.save(out_filename) - logger.info('Normalization factor: %f' % factor) - logger.info('RMS deviation: %f' % rms) - logger.info('Worst deviation: %f' % worst) + logger.info('Normalization factor: %f', factor) + logger.info('RMS deviation: %f', rms) + logger.info('Worst deviation: %f', worst) # Create a one-line binary fits table to hold the coefficients logger.debug('Finished image comparison') - ret_code=0 + ret_code = 0 return ret_code @classmethod @@ -87,23 +83,22 @@ def step_run(cls, config): in_filename = config.get(cls.step_name, 'in') out_filename = config.get(cls.step_name, 'out') ref_filename = config.get(cls.step_name, 'ref') - if config.has_option(cls.step_name,'edge'): - edge = config.getint(cls.step_name,'edge') + if config.has_option(cls.step_name, 'edge'): + edge = config.getint(cls.step_name, 'edge') else: edge = None - - ret_code = cls.__call__(in_filename, ref_filename, out_filename,edge) + + ret_code = cls.__call__(in_filename, ref_filename, out_filename, edge) return ret_code @classmethod def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('-r','--ref',type=str, + parser.add_argument('-r', '--ref', type=str, help='Filename for reference compressed image') - parser.add_argument('--edge',type=int, + parser.add_argument('--edge', type=int, help='Number of compressed pixels to ignore at CCD edges') - return @classmethod def run(cls, config): diff --git a/python/pixcorrect/normalize_flat.py b/python/pixcorrect/normalize_flat.py index c807950..62d4fbe 100755 --- a/python/pixcorrect/normalize_flat.py +++ b/python/pixcorrect/normalize_flat.py @@ -1,18 +1,15 @@ -#!/usr/bin/env python -"""Apply a flat correction to a raw DES image +#!/usr/bin/env python3 +"""Apply a flat correction to a raw DES image """ -import ctypes -import sys import os #from os import path import fitsio import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, load_shlib -from despyfits.DESImage import DESImage, DESImageCStruct, scan_fits_section, data_dtype +from pixcorrect.corr_util import logger from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo +from despyfits.DESImage import DESImage, scan_fits_section # Which section of the config file to read for this step config_section = 'normflat' @@ -37,46 +34,46 @@ def __call__(cls, inlist, ccdnorm, ampborder): Applies the correction to each input and writes a separate output file. """ - + logger.info('Initial Read of Flat Field Headers') # - norm_list=[] - scalmean_list=[] - normval=None + norm_list = [] + scalmean_list = [] + normval = None # try: - f1=open(inlist,'r') + f1 = open(inlist, 'r') for line in f1: - line=line.strip() - columns=line.split() - if (os.path.isfile(columns[0])): - tmp_dict={} - tmp_dict['fname']=columns[0] - tmp_dict['oname']=columns[1] - if (tmp_dict['fname'][-2:] == "fz"): - sci_hdu=1 # for .fz + line = line.strip() + columns = line.split() + if os.path.isfile(columns[0]): + tmp_dict = {} + tmp_dict['fname'] = columns[0] + tmp_dict['oname'] = columns[1] + if tmp_dict['fname'][-2:] == "fz": + sci_hdu = 1 # for .fz else: - sci_hdu=0 # for .fits (or .gz) - temp_fits=fitsio.FITS(tmp_dict['fname'],'r') - temp_head=temp_fits[sci_hdu].read_header() + sci_hdu = 0 # for .fits (or .gz) + temp_fits = fitsio.FITS(tmp_dict['fname'], 'r') + temp_head = temp_fits[sci_hdu].read_header() # # Get the CCD number # try: - tmp_dict['ccdnum']=int(temp_head['CCDNUM']) + tmp_dict['ccdnum'] = int(temp_head['CCDNUM']) except: - if (ccdnorm < 1): - tmp_dict['ccdnum']=-1 - pass + if ccdnorm < 1: + tmp_dict['ccdnum'] = -1 + else: print("Warning: image {:s} did not have a CCDNUM keyword!".format(tmp_dict['fname'])) - pass + # # Get the SCALMEAN value # try: - tmp_dict['scalmean']=float(temp_head['SCALMEAN']) + tmp_dict['scalmean'] = float(temp_head['SCALMEAN']) except: raise ValueError("Image %s did not have a SCALMEAN keyword. Aborting!" % tmp_dict['fname']) # @@ -89,56 +86,56 @@ def __call__(cls, inlist, ccdnorm, ampborder): f1.close() except: # -# Input file was not present. +# Input file was not present. # # (type, value, trback)=sys.exc_info() # print("{:s} {:s} {:s} \n".format(inlist,type,value)) - raise IOError("File not found. Missing input list %s " % inlist ) + raise IOError("File not found. Missing input list %s " % inlist) # # All information is now present. Determine the value that will be used in normalization. # - if (ccdnorm > 1): + if ccdnorm > 1: for tmp_rec in norm_list: - if (normval is None): - if (tmp_rec['ccdnum']==ccdnorm): - normval=tmp_rec['ccdnum'] + if normval is None: + if tmp_rec['ccdnum'] == ccdnorm: + normval = tmp_rec['ccdnum'] else: - if (tmp_rec['ccdnum']==ccdnorm): + if tmp_rec['ccdnum'] == ccdnorm: print("Warning: More than one image with CCDNUM={:d} identified") - if (normval is None): + if normval is None: raise ValueError("No image with CCDNUM=%d found among input list. Aborting!" % ccdnorm) - logger.info('Normaliztion: %.2f set based on value from CCD %d ' % (normval,ccdnorm)) + logger.info('Normaliztion: %.2f set based on value from CCD %d ', normval, ccdnorm) else: - a_scalmean=np.array(scalmean_list) - normval=np.median(a_scalmean) - logger.info('Normaliztion: %.2f set based on median value of the ensemble ' % normval ) + a_scalmean = np.array(scalmean_list) + normval = np.median(a_scalmean) + logger.info('Normaliztion: %.2f set based on median value of the ensemble ', normval) # # Go ahead and normalize the set # logger.info('Normalizing list') for tmp_record in norm_list: - logger.info('Working on image: %s ' % (tmp_record['fname']) ) - image=DESImage.load(tmp_record['fname']) - nfactor=tmp_record['scalmean']/normval - nfactor2=nfactor*nfactor - logger.info(' CCD: %2d, relative normalization factor: %.5f ' % (tmp_record['ccdnum'],nfactor) ) - image.data*=nfactor - image.weight*=nfactor2 + logger.info('Working on image: %s ', tmp_record['fname']) + image = DESImage.load(tmp_record['fname']) + nfactor = tmp_record['scalmean'] / normval + nfactor2 = nfactor * nfactor + logger.info(' CCD: %2d, relative normalization factor: %.5f ', tmp_record['ccdnum'], nfactor) + image.data *= nfactor + image.weight *= nfactor2 # # Create keywords that reflect the median value of the flat on each amp. # for amp in decaminfo.amps: - datasecn=scan_fits_section(image,'DATASEC'+amp) - datasecn[0]=datasecn[0]+ampborder - datasecn[1]=datasecn[1]-ampborder - datasecn[2]=datasecn[2]+ampborder - datasecn[3]=datasecn[3]-ampborder - image['FLATMED'+amp]=np.median(image.data[datasecn[2]:datasecn[3]+1,datasecn[0]:datasecn[1]+1]) - - DESImage.save(image,tmp_record['oname']) + datasecn = scan_fits_section(image, 'DATASEC' + amp) + datasecn[0] += ampborder + datasecn[1] -= ampborder + datasecn[2] += ampborder + datasecn[3] -= ampborder + image['FLATMED' + amp] = np.median(image.data[datasecn[2]:datasecn[3] + 1, datasecn[0]:datasecn[1] + 1]) + + DESImage.save(image, tmp_record['oname']) logger.debug('Finished applying Flat') - ret_code=0 + ret_code = 0 return ret_code diff --git a/python/pixcorrect/null_weights.py b/python/pixcorrect/null_weights.py index 0fed611..649c81c 100644 --- a/python/pixcorrect/null_weights.py +++ b/python/pixcorrect/null_weights.py @@ -1,15 +1,13 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Apply BPM to mask plane and/or flag saturated pixels """ -from os import path +import time import numpy as np -from pixcorrect.corr_util import logger -from despyfits.DESImage import DESImage from despyfits import maskbits from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo -import time +from pixcorrect.corr_util import logger # Which section of the config file to read for this step config_section = 'nullweight' @@ -17,6 +15,7 @@ class NullWeightsError(Exception): def __init__(self, value): self.value = value + super().__init__() def __str__(self): return repr(self.value) @@ -26,7 +25,7 @@ class NullWeights(PixCorrectImStep): DEFAULT_RESATURATE = False DEFAULT_NULL_MASK = '0' - + @classmethod def __call__(cls, image, null_mask, resaturate): """Create or update the mask plane of an image @@ -43,41 +42,40 @@ def __call__(cls, image, null_mask, resaturate): if image.mask is None: raise NullWeightsError('Mask is missing in image') - if null_mask!=0: + if null_mask != 0: logger.info('Nulling weight image from mask bits') - + if image.weight is None and image.variance is None: raise NullWeightsError('Weight is missing in image') weight = image.get_weight() - kill = np.array( image.mask & null_mask, dtype=bool) + kill = np.array(image.mask & null_mask, dtype=bool) weight[kill] = 0. - image['HISTORY'] =time.asctime(time.localtime()) + \ + image['HISTORY'] = time.asctime(time.localtime()) + \ ' Null weights with mask 0x{:04X}'.format(null_mask) logger.debug('Finished nulling weight image') - + if resaturate: logger.info('Re-saturating pixels from mask bits') - sat = np.array( image.mask & maskbits.BADPIX_SATURATE, dtype=bool) + sat = np.array(image.mask & maskbits.BADPIX_SATURATE, dtype=bool) try: saturation_level = image['SATURATE'] - except (ValueError,KeyError): + except (ValueError, KeyError): # If there is no SATURATE, try taking max of amps maxsat = 0. try: for amp in decaminfo.amps: - maxsat = max(maxsat, image['SATURAT'+amp]) + maxsat = max(maxsat, image['SATURAT' + amp]) except: logger.error('SATURATx header keywords not found') raise NullWeightsError('SATURATx header keywords not found') saturation_level = maxsat logger.warning('Taking SATURATE as max of single-amp SATURATx values') - + image.data[sat] = 1.01 * saturation_level image['HISTORY'] = time.asctime(time.localtime()) + \ ' Set saturated pixels to {:.0f}'.format(saturation_level) logger.debug('Finished nulling weight image') - ret_code = 0 return ret_code @@ -97,7 +95,7 @@ def step_run(cls, image, config): # for option in config.options(section): # print " ", option, "=", config.get(section, option) #print "########" - + if config.has_option(cls.step_name, 'null_mask'): null_mask = maskbits.parse_badpix_mask(config.get(cls.step_name, 'null_mask')) else: diff --git a/python/pixcorrect/pixcorrect_cp.py b/python/pixcorrect/pixcorrect_cp.py index feb82e5..33c9d4c 100755 --- a/python/pixcorrect/pixcorrect_cp.py +++ b/python/pixcorrect/pixcorrect_cp.py @@ -1,20 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Do image-by-image pixel level corrections """ # imports -from functools import partial -import ctypes import sys -import numpy as np -import pyfits +from despyfits.DESImage import DESImage, DESBPMImage -from despyfits.DESImage import DESImage, DESBPMImage, section2slice - -from pixcorrect import corr_util -from pixcorrect import imtypes -from pixcorrect.dbc import precondition, postcondition from pixcorrect.corr_util import logger from pixcorrect.bias_correct import bias_correct @@ -40,7 +32,7 @@ class PixCorrectCP(PixCorrectMultistep): step_name = config_section description = 'Do image-by-image pixel level corrections' _image_types = {'bpm': DESBPMImage} - + def image_data(self, image_name): """Return a DESImage object for a configured image @@ -63,30 +55,29 @@ def image_data(self, image_name): fname = self.config.get(self.config_section, image_name) im = image_class.load(fname) - logger.info('Reading %s image from %s' % (image_name, fname)) + logger.info('Reading %s image from %s', image_name, fname) self._image_data[image_name] = im return im @classmethod - def _check_return(cls,retval): + def _check_return(cls, retval): """ Exit the program if the retval is nonzero. """ - if retval!=0: + if retval != 0: sys.exit(retval) - return - + def __call__(self): """Do image-by-image pixel level corrections """ - # All the code here, asside from one call for each step, should + # All the code here, asside from one call for each step, should # be assiciated with shoveling data between steps. Everything else should # take inside the code for its respective step. # Get the science image - self.sci = DESImage.load(self.config.get('pixcorrect_cp','in')) + self.sci = DESImage.load(self.config.get('pixcorrect_cp', 'in')) # Bias subtraction if self.do_step('bias'): @@ -95,8 +86,8 @@ def __call__(self): # Linearization if self.do_step('lincor'): - lincor_fname=self.config.get('pixcorrect_cp','lincor') - self._check_return(linearity_correct(self.sci,lincor_fname)) + lincor_fname = self.config.get('pixcorrect_cp', 'lincor') + self._check_return(linearity_correct(self.sci, lincor_fname)) # Make the mask plane and mark saturated pixels. Note that flags # are set to mark saturated pixels and keep any previously existing mask bits. @@ -106,30 +97,30 @@ def __call__(self): saturate=True, clear=False)) - flat_gaincorrect = self.config.getboolean('pixcorrect_cp','flat_gaincorrect') + flat_gaincorrect = self.config.getboolean('pixcorrect_cp', 'flat_gaincorrect') # get gains ahead of time so that jump can be removed from a flat with no gain correction - gain_preserve={} - if (flat_gaincorrect): - tmp_gains={} - avg_gain=0.0 + gain_preserve = {} + if flat_gaincorrect: + tmp_gains = {} + avg_gain = 0.0 for amp in decaminfo.amps: tmp_gains[amp] = self.sci['GAIN'+amp] - avg_gain=avg_gain+tmp_gains[amp] + avg_gain = avg_gain + tmp_gains[amp] for amp in decaminfo.amps: - gain_preserve[amp]=2.0*tmp_gains[amp]/avg_gain + gain_preserve[amp] = 2.0 * tmp_gains[amp] / avg_gain # print avg_gain # print gain_preserve if self.do_step('gain'): self._check_return(gain_correct(self.sci)) - + # B/F correction if self.do_step('bf'): bf_fname = self.config.get('pixcorrect_cp', 'bf') self._check_return(bf_correct(self.sci, bf_fname, bfinfo.DEFAULT_BFMASK)) - + # If done with the BPM; let python reclaim the memory if not self.do_step('fixcol'): self.clean_im('bpm') @@ -138,7 +129,7 @@ def __call__(self): # Flat field if self.do_step('flat'): # allow_mismatch = self.config.get('pixcorrect_cp','flat_gaincorrect') - print "flat_gaincorrect: ",flat_gaincorrect + print("flat_gaincorrect: ", flat_gaincorrect) # for amp in decaminfo.amps: # self.flat[gain_preserve[amp]['sec']]*=gain_preserve[amp]['cor'] self._check_return(flat_correct_cp(self.sci, self.flat, gain_preserve)) @@ -152,8 +143,8 @@ def __call__(self): # Make mini-sky image if self.do_step('mini'): - mini = self.config.get('pixcorrect_cp','mini') - blocksize = self.config.getint('pixcorrect_cp','blocksize') + mini = self.config.get('pixcorrect_cp', 'mini') + blocksize = self.config.getint('pixcorrect_cp', 'blocksize') self._check_return(sky_compress(self.sci, mini, blocksize, @@ -161,8 +152,8 @@ def __call__(self): # Subtract sky and make weight plane - forcing option to do "sky-only" weight if self.do_step('sky'): - sky_fname = self.config.get('pixcorrect_cp','sky') - fit_fname = self.config.get('pixcorrect_cp','skyfit') + sky_fname = self.config.get('pixcorrect_cp', 'sky') + fit_fname = self.config.get('pixcorrect_cp', 'skyfit') self._check_return(sky_subtract(self.sci, fit_fname, sky_fname, @@ -170,7 +161,7 @@ def __call__(self): self.flat)) if not self.do_step('addweight'): self.clean_im('flat') - + # Star flatten if self.do_step('starflat'): self._check_return(starflat_correct(self.sci, self.starflat)) @@ -186,7 +177,7 @@ def __call__(self): # We need to fix the step_name if we want to call 'step_run' null_weights.__class__.step_name = self.config_section logger.info("Running null_weights") - self._check_return(null_weights.step_run(self.sci,self.config)) + self._check_return(null_weights.step_run(self.sci, self.config)) out_fname = self.config.get('pixcorrect_cp', 'out') self.sci.save(out_fname) @@ -199,13 +190,13 @@ def add_step_args(cls, parser): """ parser.add_argument('--bias', default=None, help='Bias correction image') - parser.add_argument('--lincor', default=None, + parser.add_argument('--lincor', default=None, help='linearity correction Table') - parser.add_argument('--bf', default=None, + parser.add_argument('--bf', default=None, help='brighter/fatter correction Table') parser.add_argument('--gain', action='store_true', default=False, help='convert ADU to e- using gain values in hdr') - parser.add_argument('--bpm', default=None, + parser.add_argument('--bpm', default=None, help='bad pixel mask filename') parser.add_argument('--flat', default=None, help='Dome flat correction image') @@ -230,7 +221,5 @@ def add_step_args(cls, parser): # Adds --resaturate and --null_mask from null_weights class null_weights.add_step_args(parser) - return - if __name__ == '__main__': PixCorrectCP.main() diff --git a/python/pixcorrect/pixcorrect_im.py b/python/pixcorrect/pixcorrect_im.py index 2135e05..9f6608b 100755 --- a/python/pixcorrect/pixcorrect_im.py +++ b/python/pixcorrect/pixcorrect_im.py @@ -1,20 +1,18 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + +# $Id: pixcorrect_im.py 47952 2019-01-03 21:04:53Z rgruendl $ +# $Rev:: 47952 $: # Revision of last commit. +# $LastChangedBy:: rgruendl $: # Author of last commit. +# $LastChangedDate:: 2019-01-03 15:04:53 #$: # Date of last commit. + """Do image-by-image pixel level corrections """ # imports -from functools import partial -import ctypes import sys -import numpy as np -import pyfits - from despyfits.DESImage import DESImage, DESBPMImage -from pixcorrect import corr_util -from pixcorrect import imtypes -from pixcorrect.dbc import precondition, postcondition from pixcorrect.corr_util import logger from pixcorrect.bias_correct import bias_correct @@ -28,6 +26,8 @@ from pixcorrect.sky_subtract import sky_subtract from pixcorrect.bf_correct import bf_correct from pixcorrect.add_weight import add_weight +from pixcorrect.lightbulb import lightbulb +from pixcorrect.cti import cticheck from pixcorrect.starflat_correct import starflat_correct from pixcorrect import bfinfo from pixcorrect import skyinfo @@ -39,7 +39,7 @@ class PixCorrectIm(PixCorrectMultistep): step_name = config_section description = 'Do image-by-image pixel level corrections' _image_types = {'bpm': DESBPMImage} - + def image_data(self, image_name): """Return a DESImage object for a configured image @@ -62,30 +62,30 @@ def image_data(self, image_name): fname = self.config.get(self.config_section, image_name) im = image_class.load(fname) - logger.info('Reading %s image from %s' % (image_name, fname)) + logger.info('Reading %s image from %s', image_name, fname) self._image_data[image_name] = im return im @classmethod - def _check_return(cls,retval): + def _check_return(cls, retval): """ Exit the program if the retval is nonzero. """ - if retval!=0: + if retval != 0: sys.exit(retval) - return - + def __call__(self): """Do image-by-image pixel level corrections """ - # All the code here, asside from one call for each step, should + + # All the code here, asside from one call for each step, should # be assiciated with shoveling data between steps. Everything else should # take inside the code for its respective step. # Get the science image - self.sci = DESImage.load(self.config.get('pixcorrect_im','in')) + self.sci = DESImage.load(self.config.get('pixcorrect_im', 'in')) # Bias subtraction if self.do_step('bias'): @@ -94,8 +94,8 @@ def __call__(self): # Linearization if self.do_step('lincor'): - lincor_fname=self.config.get('pixcorrect_im','lincor') - self._check_return(linearity_correct(self.sci,lincor_fname)) + lincor_fname = self.config.get('pixcorrect_im', 'lincor') + self._check_return(linearity_correct(self.sci, lincor_fname)) # Make the mask plane and mark saturated pixels. Note that flags # are set to mark saturated pixels and keep any previously existing mask bits. @@ -107,7 +107,7 @@ def __call__(self): if self.do_step('gain'): self._check_return(gain_correct(self.sci)) - + # If done with the BPM; let python reclaim the memory if not self.do_step('fixcols'): self.clean_im('bpm') @@ -123,17 +123,25 @@ def __call__(self): self._check_return(bf_correct(self.sci, bf_fname, bfinfo.DEFAULT_BFMASK)) - + # Flat field if self.do_step('flat'): self._check_return(flat_correct(self.sci, self.flat)) if not self.do_step('sky'): self.clean_im('flat') + # LightBulb + if self.do_step('lightbulb'): + self._check_return(lightbulb(self.sci)) + + # CTI Check + if self.do_step('cticheck'): + self._check_return(cticheck(self.sci)) + # Make mini-sky image if self.do_step('mini'): - mini = self.config.get('pixcorrect_im','mini') - blocksize = self.config.getint('pixcorrect_im','blocksize') + mini = self.config.get('pixcorrect_im', 'mini') + blocksize = self.config.getint('pixcorrect_im', 'blocksize') self._check_return(sky_compress(self.sci, mini, blocksize, @@ -141,8 +149,8 @@ def __call__(self): # Subtract sky and make weight plane - forcing option to do "sky-only" weight if self.do_step('sky'): - sky_fname = self.config.get('pixcorrect_im','sky') - fit_fname = self.config.get('pixcorrect_im','skyfit') + sky_fname = self.config.get('pixcorrect_im', 'sky') + fit_fname = self.config.get('pixcorrect_im', 'skyfit') self._check_return(sky_subtract(self.sci, fit_fname, sky_fname, @@ -150,7 +158,7 @@ def __call__(self): self.flat)) if not self.do_step('addweight'): self.clean_im('flat') - + # Star flatten if self.do_step('starflat'): self._check_return(starflat_correct(self.sci, self.starflat)) @@ -166,7 +174,7 @@ def __call__(self): # We need to fix the step_name if we want to call 'step_run' null_weights.__class__.step_name = self.config_section logger.info("Running null_weights") - self._check_return(null_weights.step_run(self.sci,self.config)) + self._check_return(null_weights.step_run(self.sci, self.config)) out_fname = self.config.get('pixcorrect_im', 'out') self.sci.save(out_fname) @@ -179,13 +187,13 @@ def add_step_args(cls, parser): """ parser.add_argument('--bias', default=None, help='Bias correction image') - parser.add_argument('--lincor', default=None, + parser.add_argument('--lincor', default=None, help='linearity correction Table') - parser.add_argument('--bf', default=None, + parser.add_argument('--bf', default=None, help='brighter/fatter correction Table') parser.add_argument('--gain', action='store_true', default=False, help='convert ADU to e- using gain values in hdr') - parser.add_argument('--bpm', default=None, + parser.add_argument('--bpm', default=None, help='bad pixel mask filename') parser.add_argument('--flat', default=None, help='Dome flat correction image') @@ -204,11 +212,14 @@ def add_step_args(cls, parser): help='Star flat correction image') parser.add_argument('--addweight', action='store_true', default=False, help='Add a weight map to the image if none exists') + parser.add_argument('--lightbulb', action='store_true', default=False, + help='Perform check for known lIghtbulb(s)') + parser.add_argument('--cticheck', action='store_true', default=False, + help='Perform check for CTI') # Adds --resaturate and --null_mask from null_weights class null_weights.add_step_args(parser) - return if __name__ == '__main__': PixCorrectIm.main() diff --git a/python/pixcorrect/row_interp.py b/python/pixcorrect/row_interp.py index 5db2f36..093c3cd 100644 --- a/python/pixcorrect/row_interp.py +++ b/python/pixcorrect/row_interp.py @@ -1,16 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Fill bad pixels with values to the left and/or right on the same row. """ -from os import path -import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError import time +import numpy as np -from pixcorrect import proddir from pixcorrect.corr_util import logger -from despyfits.DESImage import DESImage from pixcorrect.PixCorrectDriver import PixCorrectImStep from despyfits import maskbits @@ -21,25 +17,25 @@ class RowInterp(PixCorrectImStep): description = "Interpolate along rows using mean of pixel values to left " \ "and/or to right of regions of pixels targeted for interpolation." step_name = config_section - + DEFAULT_MINCOLS = 1 # Narrowest feature to interpolate DEFAULT_MAXCOLS = None # Widest feature to interpolate. None means no limit. DEFAULT_INTERP_MASK = maskbits.BADPIX_BPM + \ - maskbits.BADPIX_SATURATE +\ - maskbits.BADPIX_CRAY +\ - maskbits.BADPIX_STAR +\ - maskbits.BADPIX_TRAIL +\ - maskbits.BADPIX_EDGEBLEED +\ - maskbits.BADPIX_STREAK # Mask bits that trigger interpolation + maskbits.BADPIX_SATURATE +\ + maskbits.BADPIX_CRAY +\ + maskbits.BADPIX_STAR +\ + maskbits.BADPIX_TRAIL +\ + maskbits.BADPIX_EDGEBLEED +\ + maskbits.BADPIX_STREAK # Mask bits that trigger interpolation DEFAULT_INVALID_MASK = maskbits.BADPIX_BPM + \ - maskbits.BADPIX_SATURATE +\ - maskbits.BADPIX_BADAMP +\ - maskbits.BADPIX_CRAY +\ - maskbits.BADPIX_STAR +\ - maskbits.BADPIX_TRAIL +\ - maskbits.BADPIX_EDGEBLEED +\ - maskbits.BADPIX_EDGE +\ - maskbits.BADPIX_STREAK # Mask bits that invalidate a pixel as a source + maskbits.BADPIX_SATURATE +\ + maskbits.BADPIX_BADAMP +\ + maskbits.BADPIX_CRAY +\ + maskbits.BADPIX_STAR +\ + maskbits.BADPIX_TRAIL +\ + maskbits.BADPIX_EDGEBLEED +\ + maskbits.BADPIX_EDGE +\ + maskbits.BADPIX_STREAK # Mask bits that invalidate a pixel as a source # of data to use in interpolation. @classmethod @@ -62,7 +58,7 @@ def __call__(cls, image, - `interp_mask`: Mask bits that will trigger interpolation - `invalid_mask`: Mask bits invalidating a pixel as interpolation source. """ - + logger.info('Interpolating along rows') if image.mask is None: @@ -75,60 +71,60 @@ def __call__(cls, image, # Then make arrays has_?? which says whether left side is valid # and an array with the value just to the left/right of the run. work = np.array(interpolate) - work[:,1:] = np.logical_and(interpolate[:,1:], ~interpolate[:,:-1]) - ystart,xstart = np.where(work) + work[:, 1:] = np.logical_and(interpolate[:, 1:], ~interpolate[:, :-1]) + ystart, xstart = np.where(work) work = np.array(interpolate) - work[:,:-1] = np.logical_and(interpolate[:,:-1], ~interpolate[:,1:]) + work[:, :-1] = np.logical_and(interpolate[:, :-1], ~interpolate[:, 1:]) yend, xend = np.where(work) - xend = xend + 1 # Make the value one-past-end + xend += 1 # Make the value one-past-end # If we've done this correctly, every run has a start and an end. - if not np.all(ystart==yend): + if not np.all(ystart == yend): logger.error("Logic problem, ystart and yend not equal.") return 1 # Narrow our list to runs of the desired length range - use = xend-xstart >= min_cols + use = xend - xstart >= min_cols if max_cols is not None: - use = np.logical_and(xend-xstart<=max_cols, use) + use = np.logical_and(xend - xstart <= max_cols, use) xstart = xstart[use] xend = xend[use] ystart = ystart[use] # Now determine which runs have valid data at left/right - xleft = np.maximum(0, xstart-1) - has_left = ~np.array(image.mask[ystart,xleft] & invalid_mask, dtype=bool) - has_left = np.logical_and(xstart>=1,has_left) - left_value = image.data[ystart,xleft] - - xright = np.minimum(work.shape[1]-1, xend) - has_right = ~np.array(image.mask[ystart,xright] & invalid_mask, dtype=bool) - has_right = np.logical_and(xend= 1, has_left) + left_value = image.data[ystart, xleft] + + xright = np.minimum(work.shape[1] - 1, xend) + has_right = ~np.array(image.mask[ystart, xright] & invalid_mask, dtype=bool) + has_right = np.logical_and(xend < work.shape[1], has_right) + right_value = image.data[ystart, xright] + # Assign right-side value to runs having just right data - for run in np.where(np.logical_and(~has_left,has_right))[0]: - image.data[ystart[run],xstart[run]:xend[run]] = right_value[run] - image.mask[ystart[run],xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP + for run in np.where(np.logical_and(~has_left, has_right))[0]: + image.data[ystart[run], xstart[run]:xend[run]] = right_value[run] + image.mask[ystart[run], xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP # Assign left-side value to runs having just left data - for run in np.where(np.logical_and(has_left,~has_right))[0]: - image.data[ystart[run],xstart[run]:xend[run]] = left_value[run] - image.mask[ystart[run],xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP + for run in np.where(np.logical_and(has_left, ~has_right))[0]: + image.data[ystart[run], xstart[run]:xend[run]] = left_value[run] + image.mask[ystart[run], xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP # Assign mean of left and right to runs having both sides - for run in np.where(np.logical_and(has_left,has_right))[0]: - image.data[ystart[run],xstart[run]:xend[run]] = \ - 0.5*(left_value[run]+right_value[run]) - image.mask[ystart[run],xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP + for run in np.where(np.logical_and(has_left, has_right))[0]: + image.data[ystart[run], xstart[run]:xend[run]] = \ + 0.5*(left_value[run] + right_value[run]) + image.mask[ystart[run], xstart[run]:xend[run]] |= maskbits.BADPIX_INTERP # Add to image history - image['HISTORY'] =time.asctime(time.localtime()) + \ + image['HISTORY'] = time.asctime(time.localtime()) + \ ' row_interp over mask 0x{:04X}'.format(interp_mask) - + logger.debug('Finished interpolating') - ret_code=0 + ret_code = 0 return ret_code @classmethod @@ -166,20 +162,19 @@ def step_run(cls, image, config): def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('--min_cols', nargs=1, - default=None, + parser.add_argument('--min_cols', nargs=1, + default=None, help='minimum width of region to interpolate') - parser.add_argument('--max_cols', nargs=1, - default=None, + parser.add_argument('--max_cols', nargs=1, + default=None, help='maximum width of region to interpolate') - parser.add_argument('--interp_mask', nargs=1, - default=None, + parser.add_argument('--interp_mask', nargs=1, + default=None, help='bitmask for MSK plane defining pixels to interpolate') - parser.add_argument('--invalid_mask', nargs=1, - default=None, + parser.add_argument('--invalid_mask', nargs=1, + default=None, help='bitmask for MSK plane defining pixels' ' unusable for interpolation') - return row_interp = RowInterp() diff --git a/python/pixcorrect/row_zipper.py b/python/pixcorrect/row_zipper.py index 76e1dac..acf7a17 100644 --- a/python/pixcorrect/row_zipper.py +++ b/python/pixcorrect/row_zipper.py @@ -1,16 +1,9 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Fill bad pixels with values to the left and/or right on the same row. """ -from os import path -import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError -import time - -from pixcorrect import proddir from pixcorrect.corr_util import logger -from despyfits.DESImage import DESImage from pixcorrect.PixCorrectDriver import PixCorrectImStep from despyfits import maskbits from despyastro import zipper_interp as zipp @@ -23,40 +16,40 @@ class ZipperInterp(PixCorrectImStep): description = "Interpolate along rows using mean of pixel values to left " \ "and/or to right of regions of pixels targeted for interpolation." step_name = config_section - + DEFAULT_MINCOLS = 1 # Narrowest feature to interpolate DEFAULT_MAXCOLS = None # Widest feature to interpolate. None means no limit. DEFAULT_INTERP_MASK = maskbits.BADPIX_BPM + \ - maskbits.BADPIX_SATURATE +\ - maskbits.BADPIX_CRAY +\ - maskbits.BADPIX_STAR +\ - maskbits.BADPIX_TRAIL +\ - maskbits.BADPIX_EDGEBLEED +\ - maskbits.BADPIX_STREAK # Mask bits that trigger interpolation + maskbits.BADPIX_SATURATE +\ + maskbits.BADPIX_CRAY +\ + maskbits.BADPIX_STAR +\ + maskbits.BADPIX_TRAIL +\ + maskbits.BADPIX_EDGEBLEED +\ + maskbits.BADPIX_STREAK # Mask bits that trigger interpolation DEFAULT_INVALID_MASK = maskbits.BADPIX_BPM + \ - maskbits.BADPIX_SATURATE +\ - maskbits.BADPIX_BADAMP +\ - maskbits.BADPIX_CRAY +\ - maskbits.BADPIX_STAR +\ - maskbits.BADPIX_TRAIL +\ - maskbits.BADPIX_EDGEBLEED +\ - maskbits.BADPIX_EDGE +\ - maskbits.BADPIX_STREAK # Mask bits that invalidate a pixel as a source + maskbits.BADPIX_SATURATE +\ + maskbits.BADPIX_BADAMP +\ + maskbits.BADPIX_CRAY +\ + maskbits.BADPIX_STAR +\ + maskbits.BADPIX_TRAIL +\ + maskbits.BADPIX_EDGEBLEED +\ + maskbits.BADPIX_EDGE +\ + maskbits.BADPIX_STREAK # Mask bits that invalidate a pixel as a source # of data to use in interpolation. DEFAULT_BLOCK_SIZE = 1 DEFAULT_ADD_NOISE = False - DEFAULT_CLOBBER = False + DEFAULT_CLOBBER = False @classmethod def __call__(cls, image, mask, interp_mask=DEFAULT_INTERP_MASK, - BADPIX_INTERP= maskbits.BADPIX_INTERP, + BADPIX_INTERP=maskbits.BADPIX_INTERP, min_cols=DEFAULT_MINCOLS, max_cols=DEFAULT_MAXCOLS, invalid_mask=DEFAULT_INVALID_MASK, add_noise=DEFAULT_ADD_NOISE, - clobber = DEFAULT_CLOBBER, - block_size = DEFAULT_BLOCK_SIZE, + clobber=DEFAULT_CLOBBER, + block_size=DEFAULT_BLOCK_SIZE, logger=logger): """ Interpolate over selected pixels by inserting average of pixels to left and right @@ -76,7 +69,7 @@ def __call__(cls, image, mask, # Pass the locals as kwargs kwargs = locals() image, mask = zipp.zipper_interp_rows(**kwargs) - return image,mask + return image, mask @classmethod def step_run(cls, image, config): @@ -88,17 +81,17 @@ def step_run(cls, image, config): min_cols = config.getint(cls.step_name, 'min_cols') max_cols = config.getint(cls.step_name, 'max_cols') - interp_mask = maskbits.parse_badpix_mask(config.get(cls.step_name,'interp_mask')) - invalid_mask = maskbits.parse_badpix_mask(config.get(cls.step_name,'invalid_mask')) - add_noise = config.getboolean(cls.step_name, 'add_noise') - clobber = config.getboolean(cls.step_name, 'clobber') - block_size = config.getint(cls.step_name, 'block_size') + interp_mask = maskbits.parse_badpix_mask(config.get(cls.step_name, 'interp_mask')) + invalid_mask = maskbits.parse_badpix_mask(config.get(cls.step_name, 'invalid_mask')) + add_noise = config.getboolean(cls.step_name, 'add_noise') + clobber = config.getboolean(cls.step_name, 'clobber') + block_size = config.getint(cls.step_name, 'block_size') kwargs = locals() logger.info("Will run row_zipper function with:") for key in kwargs.keys(): - logger.info("--%s %s" % (key,kwargs[key])) + logger.info("--%s %s", key, kwargs[key]) # Now we call the function image.data, image.mask = cls.__call__(image.data, image.mask, @@ -109,17 +102,16 @@ def step_run(cls, image, config): add_noise=add_noise, block_size=block_size, clobber=clobber) - return @classmethod def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('--min_cols', nargs=1, default=cls.DEFAULT_MINCOLS, + parser.add_argument('--min_cols', nargs=1, default=cls.DEFAULT_MINCOLS, help='minimum width of region to interpolate') - parser.add_argument('--max_cols', nargs=1, default=cls.DEFAULT_MAXCOLS, + parser.add_argument('--max_cols', nargs=1, default=cls.DEFAULT_MAXCOLS, help='maximum width of region to interpolate') - parser.add_argument('--interp_mask', nargs=1, default=cls.DEFAULT_INTERP_MASK, + parser.add_argument('--interp_mask', nargs=1, default=cls.DEFAULT_INTERP_MASK, help='bitmask for MSK plane defining pixels to interpolate') parser.add_argument('--invalid_mask', nargs=1, default=cls.DEFAULT_INVALID_MASK, help='bitmask for MSK plane defining pixels unusable for interpolation') @@ -127,9 +119,8 @@ def add_step_args(cls, parser): help="Clobber output fits file") parser.add_argument("--add_noise", action='store_true', default=cls.DEFAULT_ADD_NOISE, help="Add Poisson Noise to the zipper") - parser.add_argument("--block_size", type=int,default=cls.DEFAULT_BLOCK_SIZE, + parser.add_argument("--block_size", type=int, default=cls.DEFAULT_BLOCK_SIZE, help="Block size of zipper in x-direction (row)") - return row_zipper = ZipperInterp() diff --git a/python/pixcorrect/rowinterp_nullweight.py b/python/pixcorrect/rowinterp_nullweight.py index 87fa31c..37ec4ce 100755 --- a/python/pixcorrect/rowinterp_nullweight.py +++ b/python/pixcorrect/rowinterp_nullweight.py @@ -1,14 +1,14 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + +import time from pixcorrect.null_weights import null_weights from pixcorrect.row_interp import row_interp from pixcorrect.corr_util import logger from pixcorrect.PixCorrectDriver import PixCorrectMultistep -from despyfits.maskbits import parse_badpix_mask from despyfits.DESImage import DESImage from despymisc.miscutils import elapsed_time -import time class RowInterpNullWeight(PixCorrectMultistep): @@ -18,8 +18,8 @@ class RowInterpNullWeight(PixCorrectMultistep): # Fix the step_name for passing the command-line arguments to the classes null_weights.__class__.step_name = config_section - row_interp.__class__.step_name = config_section - + row_interp.__class__.step_name = config_section + def __call__(self): """ Run row_interp and null_weights in one step, we run the tasks @@ -28,27 +28,27 @@ def __call__(self): t0 = time.time() # Get the science image - input_image = self.config.get(self.config_section,'in') + input_image = self.config.get(self.config_section, 'in') self.sci = DESImage.load(input_image) # Run null_weights t1 = time.time() - logger.info("Running null_weights on: %s" % input_image) - null_weights.step_run(self.sci,self.config) - logger.info("Time NullWeights : %s" % elapsed_time(t1)) + logger.info("Running null_weights on: %s", input_image) + null_weights.step_run(self.sci, self.config) + logger.info("Time NullWeights : %s", elapsed_time(t1)) # Run row_interp t2 = time.time() - logger.info("Running row_interp on: %s" % input_image) - row_interp.step_run(self.sci,self.config) - logger.info("Time RowInterp : %s" % elapsed_time(t2)) - + logger.info("Running row_interp on: %s", input_image) + row_interp.step_run(self.sci, self.config) + logger.info("Time RowInterp : %s", elapsed_time(t2)) + # Write out the image output_image = self.config.get(self.config_section, 'out') self.sci.save(output_image) - logger.info("Wrote new file: %s" % output_image) - logger.info("Time Total: %s" % elapsed_time(t0)) - + logger.info("Wrote new file: %s", output_image) + logger.info("Time Total: %s", elapsed_time(t0)) + return 0 @classmethod @@ -57,7 +57,6 @@ def add_step_args(cls, parser): """ null_weights.add_step_args(parser) row_interp.add_step_args(parser) - return if __name__ == '__main__': RowInterpNullWeight.main() diff --git a/python/pixcorrect/scale_flat.py b/python/pixcorrect/scale_flat.py index cefc91f..d68fbaf 100755 --- a/python/pixcorrect/scale_flat.py +++ b/python/pixcorrect/scale_flat.py @@ -1,18 +1,12 @@ -#!/usr/bin/env python -"""Apply a flat correction to a raw DES image +#!/usr/bin/env python3 +"""Apply a flat correction to a raw DES image """ -import ctypes -import sys -import os -#from os import path -import fitsio import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, load_shlib -from despyfits.DESImage import DESImage, DESImageCStruct, scan_fits_section, data_dtype +from pixcorrect.corr_util import logger from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import decaminfo +from despyfits.DESImage import scan_fits_section # Which section of the config file to read for this step config_section = 'scaleflat' @@ -28,7 +22,7 @@ class ScaleFlat(PixCorrectImStep): step_name = config_section @classmethod - def __call__(cls, image, normfactor, ampborder ): + def __call__(cls, image, normfactor, ampborder): """Apply a flat field correction to an image :Parameters: @@ -38,32 +32,32 @@ def __call__(cls, image, normfactor, ampborder ): Applies the correction to each input and writes a separate output file. """ - + logger.info('Normalizing Flat Image') - scalmean=image['SCALMEAN'] - nfactor=scalmean/normfactor - nfactor2=nfactor*nfactor - logger.info('SCALMEAN=%.2f NORMFACTOR=%.2f NORMALIZATION=%.5f' % (scalmean,normfactor,nfactor) ) - image['NORMFACT']=normfactor + scalmean = image['SCALMEAN'] + nfactor = scalmean/normfactor + nfactor2 = nfactor*nfactor + logger.info('SCALMEAN=%.2f NORMFACTOR=%.2f NORMALIZATION=%.5f', scalmean, normfactor, nfactor) + image['NORMFACT'] = normfactor # - image.data*=nfactor + image.data *= nfactor if image.weight is not None: - image.weight*=nfactor2 + image.weight *= nfactor2 elif image.variance is not None: image.variance /= nfactor2 # # Create keywords that reflect the median value of the flat on each amp. # for amp in decaminfo.amps: - datasecn=scan_fits_section(image,'DATASEC'+amp) - datasecn[0]=datasecn[0]+ampborder - datasecn[1]=datasecn[1]-ampborder - datasecn[2]=datasecn[2]+ampborder - datasecn[3]=datasecn[3]-ampborder - image['FLATMED'+amp]=np.median(image.data[datasecn[2]:datasecn[3]+1,datasecn[0]:datasecn[1]+1]) - + datasecn = scan_fits_section(image, 'DATASEC' + amp) + datasecn[0] += ampborder + datasecn[1] -= ampborder + datasecn[2] += ampborder + datasecn[3] -= ampborder + image['FLATMED' + amp] = np.median(image.data[datasecn[2]:datasecn[3] + 1, datasecn[0]:datasecn[1] + 1]) + logger.debug('Finished applying normalization to Flat') - ret_code=0 + ret_code = 0 return ret_code @@ -81,13 +75,13 @@ def step_run(cls, image, config): normfactorfile = config.get(cls.step_name, 'normfactorfile') try: - f_normfactorfile=open(normfactorfile,'r') + f_normfactorfile = open(normfactorfile, 'r') except: raise IOError("File not found. Missing normfactorfile %s " % normfactorfile) for line in f_normfactorfile: - line=line.strip() - columns=line.split() - normfactor=float(columns[0]) + line = line.strip() + columns = line.split() + normfactor = float(columns[0]) f_normfactorfile.close() ampborder = config.getint(cls.step_name, 'ampborder') diff --git a/python/pixcorrect/sky_combine.py b/python/pixcorrect/sky_combine.py index be9842d..3c4212a 100755 --- a/python/pixcorrect/sky_combine.py +++ b/python/pixcorrect/sky_combine.py @@ -1,17 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Combine all of the individual CCD's mini-sky images into one FITS image for whole array """ -from os import path -import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError - -from pixcorrect import proddir from pixcorrect.corr_util import logger -from despyfits.DESImage import DESDataImage, DESImage from pixcorrect.PixCorrectDriver import PixCorrectDriver, filelist_to_list from pixcorrect import skyinfo +from despyfits.DESImage import DESDataImage # Which section of the config file to read for this step config_section = 'skycombine' @@ -19,9 +14,9 @@ class SkyCombine(PixCorrectDriver): description = "Combine sky images of all CCDs in one exposure" step_name = config_section - propagate = ['FILTER','DATE-OBS','EXPNUM','INSTRUME','BAND','NITE'] + propagate = ['FILTER', 'DATE-OBS', 'EXPNUM', 'INSTRUME', 'BAND', 'NITE'] # Header keywords to copy from a single-chip image into the output image - + @classmethod def __call__(cls, in_filenames, out_filename, mask_value, invalid): """ @@ -35,7 +30,7 @@ def __call__(cls, in_filenames, out_filename, mask_value, invalid): - `mask_value`: value inserted into sky pixels with no data - `invalid`: list of detpos values for CCDs to be left out of sky image """ - + logger.info('Combining sky') out = None @@ -43,9 +38,9 @@ def __call__(cls, in_filenames, out_filename, mask_value, invalid): for f in in_filenames: try: small = DESDataImage.load(f) - except (ValueError,IOError) as v: + except (ValueError, IOError): # A missing minisky file is not fatal: - logger.warning('SkyCombine could not load minisky '+f) + logger.warning('SkyCombine could not load minisky ' + f) continue if small['DETPOS'].strip() in invalid: # Skip any unwanted CCDs @@ -54,44 +49,43 @@ def __call__(cls, in_filenames, out_filename, mask_value, invalid): if out is None: out = skyinfo.MiniDecam(blocksize, mask_value, invalid) out.copy_header_info(small, cls.propagate, require=False) - + if blocksize != out.blocksize: raise skyinfo.SkyError('Mismatched blocksizes for SkyCombine') - out.fill(small.data,small['DETPOS'].strip()) + out.fill(small.data, small['DETPOS'].strip()) # Issue warnings for mismatches of data, but skip if # quantities are not in headers - for k in ('BAND','NITE','EXPNUM'): + for k in ('BAND', 'NITE', 'EXPNUM'): try: v1 = out[k] v2 = small[k] - if not v1==v2: + if v1 != v2: logger.warning('Mismatched {:s} in file {:s}: {} vs {}'.\ - format(k,f,v1,v2)) + format(k, f, v1, v2)) except: pass out.save(out_filename) logger.debug('Finished sky combination') - ret_code=0 + ret_code = 0 return ret_code @classmethod def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('--ccdnums',type=str,default=skyinfo.DEFAULT_CCDNUMS, + parser.add_argument('--ccdnums', type=str, default=skyinfo.DEFAULT_CCDNUMS, help='Range(s) of ccdnums to combine') - parser.add_argument('--miniskyfiles',type=str,default=skyinfo.DEFAULT_MINISKY_FILES, + parser.add_argument('--miniskyfiles', type=str, default=skyinfo.DEFAULT_MINISKY_FILES, help='Filename template for single-chip minisky images') - parser.add_argument('--miniskylist',type=str,default=None, help='File containing a list of single-chip minisky images') - parser.add_argument('-o','--outfilename',type=str, + parser.add_argument('--miniskylist', type=str, default=None, help='File containing a list of single-chip minisky images') + parser.add_argument('-o', '--outfilename', type=str, help='Filename for combined minisky FITS image') parser.add_argument('--mask_value', type=float, default=skyinfo.DEFAULT_MASK_VALUE, help='Value of pixels without valid sky information') parser.add_argument('--invalid', type=str, default=skyinfo.DEFAULT_IGNORE, help='Value(s) of DETPOS to ignore in sky image') - return @classmethod def run(cls, config): @@ -102,39 +96,39 @@ def run(cls, config): """ - if config.has_option(cls.step_name,'maskvalue'): + if config.has_option(cls.step_name, 'maskvalue'): mask_value = config.getfloat(cls.step_name, 'maskvalue') else: mask_value = skyinfo.DEFAULT_MASK_VALUE - if config.has_option(cls.step_name,'invalid'): + if config.has_option(cls.step_name, 'invalid'): baddet = config.get(cls.step_name, 'invalid') else: baddet = skyinfo.DEFAULT_IGNORE invalid = baddet.split(',') - if config.has_option(cls.step_name,'ccdnums'): - ccdranges = config.get(cls.step_name,'ccdnums') + if config.has_option(cls.step_name, 'ccdnums'): + ccdranges = config.get(cls.step_name, 'ccdnums') else: ccdranges = skyinfo.DEFAULT_CCDNUMS ccdnumlist = skyinfo.parse_ranges(ccdranges) if config.has_option(cls.step_name, 'miniskylist'): - miniskylist = config.get(cls.step_name,'miniskylist') - in_filenames=filelist_to_list(miniskylist) + miniskylist = config.get(cls.step_name, 'miniskylist') + in_filenames = filelist_to_list(miniskylist) else: if config.has_option(cls.step_name, 'miniskyfiles'): - miniskyfiles = config.get(cls.step_name,'miniskyfiles') + miniskyfiles = config.get(cls.step_name, 'miniskyfiles') else: miniskyfiles = skyinfo.DEFAULT_MINISKY_FILES in_filenames = [] for i in ccdnumlist: # in_filenames.append(miniskyfiles.format(i)) in_filenames.append(miniskyfiles % i) - + out_filename = config.get(cls.step_name, 'outfilename') - logger.info('Sky combine output to %s' % out_filename) - + logger.info('Sky combine output to %s', out_filename) + ret_code = cls.__call__(in_filenames, out_filename, mask_value, invalid) return ret_code diff --git a/python/pixcorrect/sky_compress.py b/python/pixcorrect/sky_compress.py index 2528ecd..cb08e1e 100755 --- a/python/pixcorrect/sky_compress.py +++ b/python/pixcorrect/sky_compress.py @@ -1,42 +1,40 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Produce a miniature sky-level image of a single CCD by taking medians of boxes in image """ -import ctypes -from os import path +from configparser import NoOptionError import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError -from pixcorrect import proddir from pixcorrect.corr_util import logger -from despyfits.DESImage import DESDataImage, DESImage from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import skyinfo +from despyfits.DESImage import DESDataImage, DESImage + # Which section of the config file to read for this step config_section = 'skycompress' class SkyCompress(PixCorrectImStep): description = "Produce compressed image of sky background" step_name = config_section - propagate = ['FILTER','DATE-OBS','EXPNUM','CCDNUM','DETPOS','INSTRUME', - 'BAND','NITE'] + propagate = ['FILTER', 'DATE-OBS', 'EXPNUM', 'CCDNUM', 'DETPOS', 'INSTRUME', + 'BAND', 'NITE'] # Header keywords to copy from source image into compressed image - + @classmethod def __call__(cls, image, skyfilename, blocksize, bitmask): """Produce compressed image of sky background :Parameters: - - `image`: the DESImage to be compressed. + - `image`: the DESImage to be compressed. - `skyfilename`: filename for the output compressed sky image - `blocksize`: side length of squares in which medians are taken - `bitmask`: Bitmask that will be or'ed with mask plane of image (if any) to mark pixels to be ignored in calculating block median. """ - + logger.info('Compressing sky') nx = image.data.shape[1] / blocksize @@ -47,17 +45,17 @@ def __call__(cls, image, skyfilename, blocksize, bitmask): # Apply bit mask to the mask plane if any. Superpixels # with no unmasked pixels will be filled with value -1 if image.mask is None: - sky = np.median(image.data.reshape(ny,blocksize,nx,blocksize)\ - .swapaxes(1,2)\ - .reshape(ny,nx,blocksize*blocksize), axis=2) + sky = np.median(image.data.reshape(ny, blocksize, nx, blocksize)\ + .swapaxes(1, 2)\ + .reshape(ny, nx, blocksize * blocksize), axis=2) else: - data = np.ma.array(image.data, mask= (image.mask & bitmask), - fill_value=-1.) - sky = np.ma.median(data.reshape(ny,blocksize,nx,blocksize)\ - .swapaxes(1,2)\ - .reshape(ny,nx,blocksize*blocksize), axis=2) + data = np.ma.array(image.data, mask=(image.mask & bitmask), + fill_value=-1.) + sky = np.ma.median(data.reshape(ny, blocksize, nx, blocksize)\ + .swapaxes(1, 2)\ + .reshape(ny, nx, blocksize * blocksize), axis=2) sky = np.ma.getdata(sky) - + # Create HDU for output image, add some header info, save output to file outimage = DESDataImage(sky) outimage['BLOCKSIZ'] = blocksize @@ -66,7 +64,7 @@ def __call__(cls, image, skyfilename, blocksize, bitmask): outimage.save(skyfilename) logger.debug('Finished sky compression') - ret_code=0 + ret_code = 0 return ret_code @@ -81,17 +79,17 @@ def step_run(cls, image, config): """ ### ?? Put defaults in here ?? - if config.has_option(cls.step_name,'blocksize'): + if config.has_option(cls.step_name, 'blocksize'): blocksize = config.getint(cls.step_name, 'blocksize') else: blocksize = skyinfo.DEFAULT_BLOCKSIZE - if config.has_option(cls.step_name,'bitmask'): + if config.has_option(cls.step_name, 'bitmask'): bitmask = config.getint(cls.step_name, 'bitmask') else: bitmask = skyinfo.DEFAULT_SKYMASK skyfilename = config.get(cls.step_name, 'skyfilename') - logger.info('Sky compression will be done for %s' % image) - + logger.info('Sky compression will be done for %s', image) + ret_code = cls.__call__(image, skyfilename, blocksize, bitmask) return ret_code @@ -105,7 +103,6 @@ def add_step_args(cls, parser): help='Size of squares in which median is taken for sky') parser.add_argument('--bitmask', type=int, default=skyinfo.DEFAULT_SKYMASK, help='Mask image bits for pixels to ignore in sky estimate') - return @classmethod def run(cls, config): diff --git a/python/pixcorrect/sky_fit.py b/python/pixcorrect/sky_fit.py index b651188..f9bd86e 100644 --- a/python/pixcorrect/sky_fit.py +++ b/python/pixcorrect/sky_fit.py @@ -1,16 +1,10 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Fit templates to a mini-sky image of a full exposure, returns best-fit coefficients and statistics on the residuals to fit. """ -from os import path -import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError - -from pixcorrect import proddir from pixcorrect.corr_util import logger, items_must_match -from despyfits.DESImage import DESDataImage, DESImage from pixcorrect.PixCorrectDriver import PixCorrectImStep from pixcorrect import skyinfo @@ -20,7 +14,7 @@ class SkyFit(PixCorrectImStep): description = "Fit coefficients of sky templates to mini-sky image" step_name = config_section - + @classmethod def __call__(cls, in_filename, out_filename, pc_filename, clip_sigma): """ @@ -32,14 +26,14 @@ def __call__(cls, in_filename, out_filename, pc_filename, clip_sigma): - `pc_filename`: filename for the stored mini-sky principal component array - `clip_sigma`: Number of sigma to mark outliers ignored from fitting & stats """ - + logger.info('Fitting sky') mini = skyinfo.MiniDecam.load(in_filename) templates = skyinfo.MiniskyPC.load(pc_filename) try: # Insure using the correct filter's PCA - items_must_match(mini.header,templates.header,'BAND') + items_must_match(mini.header, templates.header, 'BAND') except: return 1 templates.fit(mini, clip_sigma) @@ -47,7 +41,7 @@ def __call__(cls, in_filename, out_filename, pc_filename, clip_sigma): # Create a one-line binary fits table to hold the coefficients logger.debug('Finished sky fitting') - ret_code=0 + ret_code = 0 return ret_code @classmethod @@ -59,7 +53,7 @@ def step_run(cls, config): """ - if config.has_option(cls.step_name,'clipsigma'): + if config.has_option(cls.step_name, 'clipsigma'): clip_sigma = config.getfloat(cls.step_name, 'clipsigma') else: clip_sigma = skyinfo.DEFAULT_CLIP_SIGMA @@ -68,8 +62,8 @@ def step_run(cls, config): out_filename = config.get(cls.step_name, 'outfilename') pc_filename = config.get(cls.step_name, 'pcfilename') - logger.info('Sky fitting output to %s' % out_filename) - + logger.info('Sky fitting output to %s', out_filename) + ret_code = cls.__call__(in_filename, out_filename, pc_filename, clip_sigma) return ret_code @@ -77,15 +71,14 @@ def step_run(cls, config): def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('--infilename',type=str, + parser.add_argument('--infilename', type=str, help='Filename for input minisky FITS image to fit') - parser.add_argument('--outfilename',type=str, + parser.add_argument('--outfilename', type=str, help='Filename for minisky FITS image with fit results/resids') - parser.add_argument('--pcfilename',type=str, + parser.add_argument('--pcfilename', type=str, help='Filename for minisky principal components') parser.add_argument('--clip_sigma', type=float, default=skyinfo.DEFAULT_CLIP_SIGMA, help='Rejection threshold for robust fitting/statistics') - return @classmethod def run(cls, config): diff --git a/python/pixcorrect/sky_pca.py b/python/pixcorrect/sky_pca.py index d57c379..0da8826 100644 --- a/python/pixcorrect/sky_pca.py +++ b/python/pixcorrect/sky_pca.py @@ -1,16 +1,10 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Perform robust PCA on ensemble of mini-sky images and save results to file. """ -from os import path import numpy as np -import fitsio -from ConfigParser import SafeConfigParser, NoOptionError -from argparse import ArgumentParser - -from pixcorrect import proddir from pixcorrect.corr_util import logger, items_must_match from pixcorrect.PixCorrectDriver import PixCorrectDriver from pixcorrect import skyinfo @@ -29,36 +23,36 @@ def rank1(m): per exposure. Outer product of these is the model (med[:,np.newaxis]*amps). """ # Iterate medians a couple of times: - med = np.ones(m.shape[0],dtype=float) - for i in range(2): - amps = np.median(m/med[:,np.newaxis],axis=0) - med = np.median(m/amps,axis=1) + med = np.ones(m.shape[0], dtype=float) + for _ in range(2): + amps = np.median(m / med[:, np.newaxis], axis=0) + med = np.median(m / amps, axis=1) # Set median image to have unit median: norm = np.median(med) med /= norm amps *= norm - return med,amps + return med, amps -def pca(m,nkeep=20): +def pca(m, nkeep=20): """ Perform PCA on data matrix m. Return u*s,s,v for the top nkeep PC's. """ R = np.dot(m.T, m) - s,v = np.linalg.eigh(R) + s, v = np.linalg.eigh(R) # Sort eigenvalues and retain just top nkeep idx = np.argsort(s)[::-1][:nkeep] - v = v[:,idx] + v = v[:, idx] s = s[idx] - return np.dot(v.T, m.T).T,np.sqrt(s),v + return np.dot(v.T, m.T).T, np.sqrt(s), v -def clip(data,model,nsigma=4): +def clip(data, model, nsigma=4): diff = data - model - avg,var,n = clippedMean(diff,nsigma,maxSample=1000000) - logger.info('Clipped mean and RMS sky residual: {:f} +- {:f}'.format(avg,np.sqrt(var))) - diff = np.abs(diff-avg) > nsigma*np.sqrt(var) - out = np.where(diff, model+avg, data) + avg, var, _ = clippedMean(diff, nsigma, maxSample=1000000) + logger.info('Clipped mean and RMS sky residual: {:f} +- {:f}'.format(avg, np.sqrt(var))) + diff = np.abs(diff - avg) > nsigma * np.sqrt(var) + out = np.where(diff, model + avg, data) return out, np.count_nonzero(diff) def process(m, npc): @@ -68,30 +62,29 @@ def process(m, npc): # Construct rank-1 approximation to data and scale it out # so all entries are near unity med, amps = rank1(m) - data = m / med[:,np.newaxis] + data = m / med[:, np.newaxis] data /= amps - + # The initial model is all 1's model = np.ones_like(data) # Now iterate clip, PCA process - npc_clip = max(npc, 10) # Number of PCs used during clipping n_clip_iterations = 3 # Rounds of clipping of outliers n_clip_sigma = 4 # Number of sigma for clipping during iteration n_clip_sigma_final = 3 # Final clipping pass - for i in range(n_clip_iterations): - work,n = clip(data,model,n_clip_sigma) - us,s,v = pca(work,npc) - model = np.dot(us[:,:npc],v[:,:npc].T) + for _ in range(n_clip_iterations): + work, _ = clip(data, model, n_clip_sigma) + us, s, v = pca(work, npc) + model = np.dot(us[:, :npc], v[:, :npc].T) # And a final pass with tighter clip - work,n = clip(data,model,n_clip_sigma_final) - us,s,v = pca(work,npc) - + work, _ = clip(data, model, n_clip_sigma_final) + us, s, v = pca(work, npc) + # Restore the rank-1 scalings - U = us[:,:npc] * med[:,np.newaxis] - V = v[:,:npc] * amps[:,np.newaxis] + U = us[:, :npc] * med[:, np.newaxis] + V = v[:, :npc] * amps[:, np.newaxis] # Now rescale the us to have unit variance along columns - norms = np.sqrt(np.sum(U*U,axis=0)/U.shape[0]) + norms = np.sqrt(np.sum(U * U, axis=0) / U.shape[0]) U /= norms V *= norms return U, s, V @@ -99,7 +92,7 @@ def process(m, npc): class SkyPCA(PixCorrectDriver): description = "Perform robust PCA of a collection of MiniDecam images" step_name = config_section - + @classmethod def __call__(cls, in_filenames, out_filename, npc, reject_rms): """ @@ -111,7 +104,7 @@ def __call__(cls, in_filenames, out_filename, npc, reject_rms): - `npc`: Number of principal components to retain - `reject_rms`: Exclude exposures with fractional RMS above this """ - + logger.info('Collecting images for PCA') if npc > skyinfo.MAX_PC: raise skyinfo.SkyError("Requested number of sky pc's {:d} is above MAX_PC".format(npc)) @@ -139,17 +132,17 @@ def __call__(cls, in_filenames, out_filename, npc, reject_rms): hdr['MAXNITE'] = mini.header['NITE'] else: if mini.blocksize != blocksize \ - or mini.invalid!=invalid \ - or mini.halfS7!=halfS7: - logger.error('Mismatched minisky configuration in file ' + f) - raise skyinfo.SkyError('Mismatched minisky configuration in file ' + f) + or mini.invalid != invalid \ + or mini.halfS7 != halfS7: + logger.error('Mismatched minisky configuration in file ' + f) + raise skyinfo.SkyError('Mismatched minisky configuration in file ' + f) try: # Die if there is a filter mismatch among exposures - items_must_match(hdr,mini.header,'BAND') + items_must_match(hdr, mini.header, 'BAND') except: return 1 - hdr['MINNITE'] = min(hdr['MINNITE'],mini.header['NITE']) - hdr['MAXNITE'] = max(hdr['MAXNITE'],mini.header['NITE']) + hdr['MINNITE'] = min(hdr['MINNITE'], mini.header['NITE']) + hdr['MAXNITE'] = max(hdr['MAXNITE'], mini.header['NITE']) mm.append(np.array(v)) expnums.append(int(mini.header['EXPNUM'])) m = np.vstack(mm).transpose() @@ -157,40 +150,39 @@ def __call__(cls, in_filenames, out_filename, npc, reject_rms): logger.info("Start first PCA cycle") U, S, v = process(m, npc) - + pc = skyinfo.MiniskyPC(U, blocksize=blocksize, mask_value=mask_value, - invalid = invalid, - header = hdr, - halfS7 = halfS7) + invalid=invalid, + header=hdr, + halfS7=halfS7) # Refit each exposure to this PCA, nexp = len(in_filenames) - V = np.zeros((nexp, U.shape[1]),dtype=float) - rms = np.zeros(nexp,dtype=float) - frac = np.zeros(nexp,dtype=float) + V = np.zeros((nexp, U.shape[1]), dtype=float) + rms = np.zeros(nexp, dtype=float) + frac = np.zeros(nexp, dtype=float) for i in range(nexp): - mini.fill_from(m[:,i]) - pc.fit(mini,clip_sigma=3.) + mini.fill_from(m[:, i]) + pc.fit(mini, clip_sigma=3.) rms[i] = mini.rms use = rms <= reject_rms - logger.info('Retained {:d} out of {:d} exposures'.format(np.count_nonzero(use), - nexp)) + logger.info('Retained {:d} out of {:d} exposures'.format(np.count_nonzero(use), nexp)) # New PCA excluding outliers logger.info("Start second PCA cycle") - U, S, v = process(m[:,use], npc) + U, S, v = process(m[:, use], npc) pc.U = U pc.save(out_filename) - + # Recollect statistics and save logger.info("Collecting statistics") for i in range(nexp): - mini.fill_from(m[:,i]) - pc.fit(mini,clip_sigma=3.) - V[i,:] = mini.coeffs + mini.fill_from(m[:, i]) + pc.fit(mini, clip_sigma=3.) + V[i, :] = mini.coeffs rms[i] = mini.rms frac[i] = mini.frac @@ -199,7 +191,7 @@ def __call__(cls, in_filenames, out_filename, npc, reject_rms): expnums, V, rms, frac, use, S) logger.debug('Finished PCA') - ret_code=0 + ret_code = 0 return ret_code @classmethod @@ -223,12 +215,12 @@ def run(cls, config): return ret_code @classmethod - def add_step_args(cls,parser): - parser.add_argument('-i','--inlist',type=str, + def add_step_args(cls, parser): + parser.add_argument('-i', '--inlist', type=str, help='File holding names of all input minisky files') - parser.add_argument('-o','--outfilename',type=str, + parser.add_argument('-o', '--outfilename', type=str, help='Filename for PCA results/resids') - parser.add_argument('-n','--npc',type=int,default=4, + parser.add_argument('-n', '--npc', type=int, default=4, help='Number of principal components to retain') parser.add_argument('--reject_rms', type=float, default=0.005, help='Reject exposures with RMS resids from PCA fit above this') diff --git a/python/pixcorrect/sky_subtract.py b/python/pixcorrect/sky_subtract.py index 949b224..1088411 100644 --- a/python/pixcorrect/sky_subtract.py +++ b/python/pixcorrect/sky_subtract.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Subtract sky from image by summing sky principal components with pre-computed coefficients for this exposure. @@ -8,13 +8,10 @@ from os import path import numpy as np -from ConfigParser import SafeConfigParser, NoOptionError -from pixcorrect import proddir -from pixcorrect.corr_util import logger, do_once, items_must_match from despyfits.DESImage import DESDataImage, DESImage, weight_dtype, section2slice +from pixcorrect.corr_util import logger, do_once, items_must_match from pixcorrect.PixCorrectDriver import PixCorrectImStep -from pixcorrect.PixCorrectDriver import PixCorrectMultistep from pixcorrect import skyinfo from pixcorrect.skyinfo import SkyError from pixcorrect import decaminfo @@ -26,14 +23,13 @@ class SkySubtract(PixCorrectImStep): description = "Subtract sky from images based on principal-component fit and calculate" +\ " weight image" - - step_name = config_section + + step_name = config_section config_section = config_section - + @classmethod - @do_once(1,'DESSKYSB') - def __call__(cls, image, fit_filename, pc_filename, - weight, dome, skymodel_filename): + @do_once(1, 'DESSKYSB') + def __call__(cls, image, fit_filename, pc_filename, weight, dome, skymodel_filename): """ Subtract sky from image using previous principal-components fit. Optionally build weight image from fitted sky or all counts, in which case the dome flat @@ -47,65 +43,65 @@ def __call__(cls, image, fit_filename, pc_filename, - `weight`: 'none' to skip weights, 'sky' to calculate weight at sky level, 'all' to use all counts - `dome`: DESImage for the dome flat, needed if weight is not 'none'. - - `skymodel_filename`: optional output filename for 'sky' + - `skymodel_filename`: optional output filename for 'sky' """ - if weight=='sky' and fit_filename is None: + if weight == 'sky' and fit_filename is None: raise SkyError('Cannot make sky-only weight map without doing sky subtraction') - + if fit_filename is not None: logger.info('Subtracting sky') mini = skyinfo.MiniDecam.load(fit_filename) templates = skyinfo.SkyPC.load(pc_filename) if templates.detpos != image['DETPOS']: # Quit if we don't have the right CCD to subtract - logger.error('Image DETPOS {:s} does not match sky template {:s}'.format(\ - templates.detpos,image['DETPOS'])) + logger.error('Image DETPOS {:s} does not match sky template {:s}'.format( + templates.detpos, image['DETPOS'])) return 1 try: image['BAND'] except: image['BAND'] = decaminfo.get_band(image['FILTER']) try: - items_must_match(image,mini.header,'BAND','EXPNUM') - items_must_match(image,templates.header,'BAND') + items_must_match(image, mini.header, 'BAND', 'EXPNUM') + items_must_match(image, templates.header, 'BAND') # ??? Could check that template and image use same dome flat except: return 1 sky = templates.sky(mini.coeffs) image.data -= sky - image.write_key('SKYSBFIL', path.basename(pc_filename), comment = 'Sky subtraction template file') - for i,c in enumerate(mini.coeffs): + image.write_key('SKYSBFIL', path.basename(pc_filename), comment='Sky subtraction template file') + for i, c in enumerate(mini.coeffs): image.write_key('SKYPC{:>02d}'.format(i), c, comment='Sky template coefficient') logger.info('Finished sky subtraction') # # Optionally write the sky model that was subtracted from the image. # - if (skymodel_filename is not None): + if skymodel_filename is not None: # Create HDU for output skymodel, add some header info, save output to file logger.info('Optional output of skymodel requested') skymodel_image = DESDataImage(sky) - skymodel_image.write_key('SKYSBFIL', path.basename(pc_filename), comment = 'Sky subtraction template file') - for i,c in enumerate(mini.coeffs): + skymodel_image.write_key('SKYSBFIL', path.basename(pc_filename), comment='Sky subtraction template file') + for i, c in enumerate(mini.coeffs): skymodel_image.write_key('SKYPC{:>02d}'.format(i), c, comment='Sky template coefficient') - skymodel_image.write_key('BAND', image['BAND'], comment = 'Band') - skymodel_image.write_key('EXPNUM', image['EXPNUM'], comment = 'Exposure Number') - skymodel_image.write_key('CCDNUM', image['CCDNUM'], comment = 'CCD Number') - skymodel_image.write_key('NITE', image['NITE'], comment = 'Night') + skymodel_image.write_key('BAND', image['BAND'], comment='Band') + skymodel_image.write_key('EXPNUM', image['EXPNUM'], comment='Exposure Number') + skymodel_image.write_key('CCDNUM', image['CCDNUM'], comment='CCD Number') + skymodel_image.write_key('NITE', image['NITE'], comment='Night') # skymodel_image.copy_header_info(image, cls.propagate, require=False) ## ?? catch exception from write error below? skymodel_image.save(skymodel_filename) - + else: sky = None - - if weight=='none': + + if weight == 'none': do_weight = False sky_weight = False - elif weight=='sky': + elif weight == 'sky': do_weight = True sky_weight = True - elif weight=='all': + elif weight == 'all': do_weight = True sky_weight = False else: @@ -114,7 +110,7 @@ def __call__(cls, image, fit_filename, pc_filename, if do_weight: if dome is None: raise SkyError('sky_subtract needs dome flat when making weights') - + if sky_weight: logger.info('Constructing weight image from sky image') data = sky @@ -131,7 +127,7 @@ def __call__(cls, image, fit_filename, pc_filename, image.weight = None image.variance = None logger.warning('Overwriting existing weight image') - + """ We assume in constructing the weight (=inverse variance) image that the input image here has been divided by the dome flat already, and that @@ -164,27 +160,27 @@ def __call__(cls, image, fit_filename, pc_filename, """ # Transform the sky image into a variance image - var = np.array(data, dtype = weight_dtype) + var = np.array(data, dtype=weight_dtype) for amp in decaminfo.amps: - sec = section2slice(image['DATASEC'+amp]) - invgain = (image['FLATMED'+amp]/image['GAIN'+amp]) / dome.data[sec] + sec = section2slice(image['DATASEC' + amp]) + invgain = (image['FLATMED' + amp] / image['GAIN' + amp]) / dome.data[sec] var[sec] += image['RDNOISE'+amp]**2 * invgain var[sec] *= invgain # Add noise from the dome flat shot noise, if present if dome.weight is not None: - var += data * data / (dome.weight*dome.data * dome.data) + var += data * data / (dome.weight * dome.data * dome.data) elif dome.variance is not None: var += data * data * dome.variance / (dome.data * dome.data) - + image.variance = var # Now there are statistics desired for the output image header. # First, the median variance at sky level on the two amps, SKYVAR[AB] meds = [] for amp in decaminfo.amps: - sec = section2slice(image['DATASEC'+amp]) - v = np.median(var[sec][::4,::4]) - image.write_key('SKYVAR'+amp, v, + sec = section2slice(image['DATASEC' + amp]) + v = np.median(var[sec][::4, ::4]) + image.write_key('SKYVAR' + amp, v, comment='Median noise variance at sky level, amp ' + amp) meds.append(v) # SKYSIGMA is overall average noise level @@ -193,11 +189,11 @@ def __call__(cls, image, fit_filename, pc_filename, # SKYBRITE is a measure of sky brightness. Use the sky image if we've got it, else # use the data if sky is None: - skybrite = np.median(data[::4,::4]) + skybrite = np.median(data[::4, ::4]) else: - skybrite = np.median(sky[::2,::2]) - image.write_key('SKYBRITE', skybrite, comment = 'Median sky brightness') - + skybrite = np.median(sky[::2, ::2]) + image.write_key('SKYBRITE', skybrite, comment='Median sky brightness') + logger.debug('Finished weight construction') # Run null_mask or resaturate if requested in the command-line @@ -206,9 +202,9 @@ def __call__(cls, image, fit_filename, pc_filename, # We need to fix the step_name if we want to call 'step_run' null_weights.__class__.step_name = config_section #null_weights.__class__.step_name = cls.config_section - null_weights.step_run(image,cls.config) + null_weights.step_run(image, cls.config) - ret_code=0 + ret_code = 0 return ret_code @classmethod @@ -223,31 +219,31 @@ def step_run(cls, image, config): # Passing config to the class cls.config = config - if config.has_option(cls.step_name,'fitfilename'): + if config.has_option(cls.step_name, 'fitfilename'): fit_filename = config.get(cls.step_name, 'fitfilename') else: fit_filename = None - if config.has_option(cls.step_name,'pcfilename'): + if config.has_option(cls.step_name, 'pcfilename'): pc_filename = config.get(cls.step_name, 'pcfilename') else: pc_filename = None - weight = config.get(cls.step_name,'weight') + weight = config.get(cls.step_name, 'weight') - if config.has_option(cls.step_name,'domefilename'): - dome_filename = config.get(cls.step_name,'domefilename') + if config.has_option(cls.step_name, 'domefilename'): + dome_filename = config.get(cls.step_name, 'domefilename') dome = DESImage.load(dome_filename) else: dome = None - if config.has_option(cls.step_name,'skymodel'): - skymodel_filename = config.get(cls.step_name,'skymodel') + if config.has_option(cls.step_name, 'skymodel'): + skymodel_filename = config.get(cls.step_name, 'skymodel') else: skymodel_filename = None - logger.info('Sky fitting output to %s' % image) - + logger.info('Sky fitting output to %s', image) + ret_code = cls.__call__(image, fit_filename, pc_filename, weight, dome, skymodel_filename) return ret_code @@ -256,23 +252,21 @@ def step_run(cls, image, config): def add_step_args(cls, parser): """Add arguments specific to sky compression """ - parser.add_argument('--fitfilename',type=str, + parser.add_argument('--fitfilename', type=str, help='Filename for minisky FITS image with PC coefficients') - parser.add_argument('--pcfilename',type=str, + parser.add_argument('--pcfilename', type=str, help='Filename for full-res sky principal components') - parser.add_argument('--domefilename',type=str, + parser.add_argument('--domefilename', type=str, help='Filename for dome flat (for weight calculation)') - parser.add_argument('--weight', choices=('sky','all','none'), + parser.add_argument('--weight', choices=('sky', 'all', 'none'), default=skyinfo.DEFAULT_WEIGHT, help='Construct weight from sky photons, ' \ 'from all photons, or not at all') - parser.add_argument('--skymodel',type=str, + parser.add_argument('--skymodel', type=str, help='Optional output file showing the model sky that was subtracted.') # Adding the null_weights options null_weights.add_step_args(parser) - return - @classmethod def do_step(cls, step_name): if not cls.config.has_option(cls.config_section, step_name): diff --git a/python/pixcorrect/sky_template.py b/python/pixcorrect/sky_template.py index e4e2922..c8cc091 100644 --- a/python/pixcorrect/sky_template.py +++ b/python/pixcorrect/sky_template.py @@ -1,20 +1,17 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Use pre-established PCA coefficients to fit build full-res sky templates Try a faster sigma-clipping approach that also uses mask images as a start """ +import time from os import path import numpy as np import fitsio -from ConfigParser import SafeConfigParser, NoOptionError -from argparse import ArgumentParser -import time from despyfits.DESImage import DESDataImage -from pixcorrect import proddir -from pixcorrect.corr_util import logger,items_must_match +from pixcorrect.corr_util import logger, items_must_match from pixcorrect.PixCorrectDriver import PixCorrectDriver from pixcorrect import skyinfo from pixcorrect.clippedMean import clippedMean @@ -26,17 +23,17 @@ class SkyTemplate(PixCorrectDriver): description = "Create full-resolution sky templates based on previous PCA" step_name = config_section - + @classmethod def __call__(cls, in_filename, out_filename, ccdnum, input_template=None, input_list=None, - good_filename = None, - reject_rms = None, - mem_use = 8., - bitmask = skyinfo.DEFAULT_SKYMASK): + good_filename=None, + reject_rms=None, + mem_use=8., + bitmask=skyinfo.DEFAULT_SKYMASK): """ Create full-resolution sky templates based on previous PCA. Does this pixel by pixel, via robust fitting of the data in the input @@ -56,21 +53,21 @@ def __call__(cls, - `input_list`: name of a file containing expnum, filename pairs, one pair per line, separated by whitespace. - `good_filename`: Name of a FITS file in which to save number of images - contributing to each pixel's fit. No output if None. + contributing to each pixel's fit. No output if None. - `reject_rms`: Exclude exposures with fractional RMS residual sky above this. If this is None, just uses the exposures that PCA used. - `mem_use:` Number of GB to target for memory usage (Default = 8) - `bitmask:` Applied to MASK extension of images for initial bad-pixel exclusion. """ - + logger.info('Starting sky template construction') # Need exactly one of these two arguments: - if not ( (input_template is None) ^ (input_list is None)): + if not input_template is None ^ input_list is None: logger.error('Need exactly one of input_template and input_list to be given') return 1 - + # Acquire PCA information, including the table of info on input exposures pc = skyinfo.MiniskyPC.load(in_filename) pctab = skyinfo.MiniskyPC.get_exposures(in_filename) @@ -80,7 +77,7 @@ def __call__(cls, # Quit if we are requesting template for a CCD that was not compressed detpos = decaminfo.detpos_dict[ccdnum] try: - mini.index_of(detpos,1,1) + mini.index_of(detpos, 1, 1) except skyinfo.SkyError: logger.error('Template requested for CCDNUM not included in PCA') return 1 @@ -97,75 +94,75 @@ def __call__(cls, # Get filenames for the full-res images from list: if input_list is not None: filenames = {} - flist = np.loadtxt(input_list,dtype=str) - for expnum,filename in flist: + flist = np.loadtxt(input_list, dtype=str) + for expnum, filename in flist: filenames[int(expnum)] = filename del flist # Now warn if we are missing expnums and remove from usable exposure list - for i in range(len(use)): - if use[i] and not int(pctab['EXPNUM'][i]) in filenames.keys(): + for i, val in enumerate(use): + if val and int(pctab['EXPNUM'][i]) not in filenames.keys(): use[i] = False logger.warning('No input filename given for expnum ' + str(expnum)) - + nimg = np.count_nonzero(use) expnums = [] vv = [] - for i in range(len(use)): - if use[i]: + for i, val in enumerate(use): + if val: vv.append(pctab['COEFFS'][i]) expnums.append(pctab['EXPNUM'][i]) V = np.vstack(vv) del vv - + # We'll re-normalize each exposure, and its coefficients, by V[0] - norms = np.array(V[:,0]) - V = V.T/norms # V is now of shape (npc,nimg) + norms = np.array(V[:, 0]) + V = V.T / norms # V is now of shape (npc,nimg) # The linear solutions will require this: - ainv = np.linalg.inv( np.dot(V,V.T)) - + ainv = np.linalg.inv(np.dot(V, V.T)) + nexp = V.shape[1] npc = pc.U.shape[1] ySize = decaminfo.shape[0] xSize = decaminfo.shape[1] # Create the output array - out = np.zeros( (npc, ySize, xSize), dtype=np.float32) + out = np.zeros((npc, ySize, xSize), dtype=np.float32) # And an array to hold the number of exposures used at each pixel: if good_filename is not None: - ngood = np.zeros( (ySize, xSize), dtype=np.int16) + ngood = np.zeros((ySize, xSize), dtype=np.int16) # Decide how many rows of blocks we'll read from files at a time bytes_per_row = 4 * xSize * pc.blocksize * nimg xBlocks = xSize / pc.blocksize - yBlocks = min( int(np.floor( 0.8* mem_use * (2**30) / bytes_per_row)), - ySize / pc.blocksize) + yBlocks = min(int(np.floor(0.8* mem_use * (2**30) / bytes_per_row)), + ySize / pc.blocksize) if yBlocks < 1: logger.warning('Proceeding even though mem_use is not enough to store 1 row of blocks') yBlocks = 1 - - d = {'ccd':ccdnum} # A dictionary used to assign names to files + + d = {'ccd': ccdnum} # A dictionary used to assign names to files hdr = {} # A dictionary of information to go into output image header # A mask of zero is equivalent to no masking: - if bitmask==0: + if bitmask == 0: bitmask = None - + nonConvergentBlocks = 0 # Keep count of blocks where clipping does not converge. - + # Collect input data in chunks of yBlocks rows of blocks, then process one block at a time. - for yStart in range(0,ySize,yBlocks*pc.blocksize): + for yStart in range(0, ySize, yBlocks * pc.blocksize): # Acquire the pixel data into a 3d array - yStop = min(ySize,yStart+yBlocks*pc.blocksize) - logger.info('Working on rows {:d} -- {:d}'.format(yStart,yStop)) - data = np.zeros( (nimg, yStop-yStart, xSize), dtype=np.float32) + yStop = min(ySize, yStart + yBlocks * pc.blocksize) + logger.info('Working on rows {:d} -- {:d}'.format(yStart, yStop)) + data = np.zeros((nimg, yStop - yStart, xSize), dtype=np.float32) # Mask image: - mask = np.zeros( (nimg, yStop-yStart, xSize), dtype=bool) + mask = np.zeros((nimg, yStop - yStart, xSize), dtype=bool) - for i,expnum in enumerate(expnums): - d['expnum']=expnum + for i, expnum in enumerate(expnums): + d['expnum'] = expnum if input_template is None: # Get the filename from the input list filename = filenames[expnum] @@ -174,14 +171,14 @@ def __call__(cls, filename = input_template.format(**d) logger.debug('Getting pixels from ' + filename) with fitsio.FITS(filename) as fits: - data[i,:,:] = fits['SCI'][yStart:yStop, :xSize] + data[i, :, :] = fits['SCI'][yStart:yStop, :xSize] if bitmask is None: - mask[i,:,:] = True + mask[i, :, :] = True else: - m = np.array(fits['MSK'][yStart:yStop, :xSize],dtype=np.int16) - mask[i,:,:] = (m & bitmask)==0 + m = np.array(fits['MSK'][yStart:yStop, :xSize], dtype=np.int16) + mask[i, :, :] = (m & bitmask) == 0 del m - if yStart==0: + if yStart == 0: # First time through the images we will be collecting/checking # header information from the contributing images hdrin = fits['SCI'].read_header() @@ -210,82 +207,81 @@ def __call__(cls, else: logger.error('No CCDNUM in ' + filename) return 1 - if len(hdr)==0: + if hdr: # First exposure will establish values for the output hdr['BAND'] = usehdr['BAND'] hdr['MINNITE'] = usehdr['NITE'] hdr['MAXNITE'] = usehdr['NITE'] hdr['CCDNUM'] = usehdr['CCDNUM'] - if hdr['CCDNUM']!=ccdnum: - logger.error('Wrong ccdnum {:d} in {:s}'.format( - ccdnum,filename)) + if hdr['CCDNUM'] != ccdnum: + logger.error('Wrong ccdnum {:d} in {:s}'.format(ccdnum, filename)) hdr['FLATFIL'] = usehdr['FLATFIL'] else: # Check that this exposure matches the others try: - items_must_match(hdr, usehdr, 'BAND','CCDNUM','FLATFIL') + items_must_match(hdr, usehdr, 'BAND', 'CCDNUM', 'FLATFIL') except: return 1 - hdr['MINNITE'] = min(hdr['MINNITE'],usehdr['NITE']) - hdr['MAXNITE'] = max(hdr['MAXNITE'],usehdr['NITE']) - - data /= norms[:,np.newaxis,np.newaxis] # Apply norms to be near unity - + hdr['MINNITE'] = min(hdr['MINNITE'], usehdr['NITE']) + hdr['MAXNITE'] = max(hdr['MAXNITE'], usehdr['NITE']) + + data /= norms[:, np.newaxis, np.newaxis] # Apply norms to be near unity + # Now cycle through all blocks - for jb in range((yStop-yStart)/pc.blocksize): - for ib in range(xSize/pc.blocksize): - logger.debug('Fitting for block ({:d},{:d})'.format(jb+yStart/pc.blocksize,ib)) - if ccdnum==decaminfo.ccdnums['S7'] and \ + for jb in range((yStop - yStart) / pc.blocksize): + for ib in range(xSize / pc.blocksize): + logger.debug('Fitting for block ({:d},{:d})'.format(jb + yStart / pc.blocksize, ib)) + if ccdnum == decaminfo.ccdnums['S7'] and \ pc.halfS7 and \ - ib >= xSize/pc.blocksize/2: + ib >= xSize / pc.blocksize / 2: # If we are looking at the bad amp of S7, we'll just # store the median of the normalized images in PC0. # The other PC's stay at zero. out[0, - yStart+jb*pc.blocksize:yStart+(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize] = \ - np.median(data[:, - jb*pc.blocksize:(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize], - axis=0) + yStart + jb * pc.blocksize: yStart + (jb + 1) * pc.blocksize, + ib * pc.blocksize: (ib + 1) * pc.blocksize] = \ + np.median(data[:, + jb * pc.blocksize: (jb + 1) * pc.blocksize, + ib * pc.blocksize: (ib + 1) * pc.blocksize], + axis=0) continue - + # Use PCA of this block as starting guess at solution index = mini.index_of(detpos, - yStart/pc.blocksize + jb, + yStart / pc.blocksize + jb, ib) - guess = np.array(pc.U[index,:]) + guess = np.array(pc.U[index, :]) # Extract the data for this block into (nexp,npix) array block = np.array(data[:, - jb*pc.blocksize:(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize]) - block.resize(nexp, pc.blocksize*pc.blocksize) + jb * pc.blocksize: (jb + 1) * pc.blocksize, + ib * pc.blocksize: (ib + 1) * pc.blocksize]) + block.resize(nexp, pc.blocksize * pc.blocksize) bmask = np.array(mask[:, - jb*pc.blocksize:(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize]) - bmask.resize(nexp, pc.blocksize*pc.blocksize) + jb * pc.blocksize: (jb + 1) * pc.blocksize, + ib * pc.blocksize: (ib + 1) * pc.blocksize]) + bmask.resize(nexp, pc.blocksize * pc.blocksize) # We'll scale the guess in each pixel by the typical ratio # of this pixel's data to the PCA model for the block, and # also estimate noise as dispersion about this guess model = np.dot(guess, V) - ratio = block / model[:,np.newaxis] - scale, var, n = clippedMean(ratio,4,axis=0) - clip = 3. * np.sqrt(var.data)*scale.data + ratio = block / model[:, np.newaxis] + scale, var, n = clippedMean(ratio, 4, axis=0) + clip = 3. * np.sqrt(var.data) * scale.data # First guess at solution is the outer product of superblock PCA # with the scaling per pixel - soln = guess[:,np.newaxis]*scale.data + soln = guess[:, np.newaxis] * scale.data del scale, var, ratio, n # Linear solution with clipping iteration MAX_ITERATIONS = 20 TOLERANCE = 0.0001 for i in range(MAX_ITERATIONS): - model = np.dot(V.T,soln) + model = np.dot(V.T, soln) # Residuals from model are used to clip resid = block - model # Find clipped points and masked ones @@ -294,59 +290,59 @@ def __call__(cls, # Set residual to zero at bad pixels resid[~good] = 0. # Get shift in linear solution from residuals: - dsoln = np.dot(ainv, np.dot(V,resid)) + dsoln = np.dot(ainv, np.dot(V, resid)) soln += dsoln # Calculate largest change in model as convergence criterion - shift = np.max(np.abs(np.dot(V.T,dsoln))) - logger.debug('Iteration {:d}, model shift {:f}'.format(i,shift)) + shift = np.max(np.abs(np.dot(V.T, dsoln))) + logger.debug('Iteration {:d}, model shift {:f}'.format(i, shift)) if shift < TOLERANCE: break - if i==MAX_ITERATIONS-1: + if i == MAX_ITERATIONS - 1: nonConvergentBlocks = nonConvergentBlocks + 1 - + # Save results into big matrices - soln.resize(npc,pc.blocksize,pc.blocksize) + soln.resize(npc, pc.blocksize, pc.blocksize) out[:, - yStart+jb*pc.blocksize:yStart+(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize] = soln + yStart + jb * pc.blocksize: yStart + (jb + 1) * pc.blocksize, + ib * pc.blocksize: (ib + 1) * pc.blocksize] = soln if good_filename is not None: # Gin up a masked array because it allows counting along an axis nblock = np.ma.count_masked(\ - np.ma.masked_array(np.zeros_like(good),good),axis=0) - nblock.resize(pc.blocksize,pc.blocksize) - ngood[yStart+jb*pc.blocksize:yStart+(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize] = nblock + np.ma.masked_array(np.zeros_like(good), good), axis=0) + nblock.resize(pc.blocksize, pc.blocksize) + ngood[yStart + jb * pc.blocksize: yStart + (jb + 1) * pc.blocksize, + ib * pc.blocksize: (ib + 1) * pc.blocksize] = nblock del nblock del resid, model, good, dsoln, block del data if nonConvergentBlocks > 0: - logger.warning('Clipping did not converge for {:d} blocks out of {:d}'.format\ - (nonConvergentBlocks,xBlocks*(ySize/pc.blocksize))) + logger.warning('Clipping did not converge for {:d} blocks out of {:d}'.format + (nonConvergentBlocks, xBlocks * (ySize / pc.blocksize))) # Add a history line about creation here hdr['HISTORY'] = time.asctime(time.localtime()) + \ ' Build sky template from PCA file {:s}'.format(path.basename(in_filename)) # Save the template into the outfile - spc = skyinfo.SkyPC(out,detpos,header=hdr) + spc = skyinfo.SkyPC(out, detpos, header=hdr) spc.save(out_filename) del out - + # Save the number of good sky pixels in another extension if good_filename is not None: - gimg = DESDataImage(ngood, header={'DETPOS':detpos, - 'CCDNUM':ccdnum}) + gimg = DESDataImage(ngood, header={'DETPOS': detpos, + 'CCDNUM': ccdnum}) logger.debug('Writing ngood to ' + good_filename) gimg.save(good_filename) del gimg, ngood - + logger.debug('Finished sky template') - ret_code=0 + ret_code = 0 return ret_code @classmethod def run(cls, config): - """Customized execution for sky template. + """Customized execution for sky template. :Parameters: - `config`: the configuration from which to get other parameters @@ -356,25 +352,25 @@ def run(cls, config): infile = config.get(cls.step_name, 'infile') out_filename = config.get(cls.step_name, 'outfilename') ccdnum = config.getint(cls.step_name, 'ccdnum') - mem_use = config.getfloat(cls.step_name,'mem_use') + mem_use = config.getfloat(cls.step_name, 'mem_use') - if config.has_option(cls.step_name,'input_template'): - input_template = config.get(cls.step_name,'input_template') + if config.has_option(cls.step_name, 'input_template'): + input_template = config.get(cls.step_name, 'input_template') else: input_template = None - if config.has_option(cls.step_name,'input_list'): - input_list = config.get(cls.step_name,'input_list') + if config.has_option(cls.step_name, 'input_list'): + input_list = config.get(cls.step_name, 'input_list') else: input_list = None - - if config.has_option(cls.step_name,'reject_rms'): - reject_rms = config.getfloat(cls.step_name,'reject_rms') + + if config.has_option(cls.step_name, 'reject_rms'): + reject_rms = config.getfloat(cls.step_name, 'reject_rms') else: reject_rms = None - if config.has_option(cls.step_name,'good_filename'): - good_filename = config.get(cls.step_name,'good_filename') + if config.has_option(cls.step_name, 'good_filename'): + good_filename = config.get(cls.step_name, 'good_filename') else: good_filename = None @@ -386,22 +382,21 @@ def run(cls, config): reject_rms=reject_rms, mem_use=mem_use, good_filename=good_filename, - bitmask = skyinfo.DEFAULT_SKYMASK) + bitmask=skyinfo.DEFAULT_SKYMASK) return ret_code @classmethod - def add_step_args(cls,parser): - - parser.add_argument('-i','--infile',type=str, + def add_step_args(cls, parser): + parser.add_argument('-i', '--infile', type=str, help='File with PCA information (from sky_pca)') - parser.add_argument('-o','--outfilename',type=str, + parser.add_argument('-o', '--outfilename', type=str, help='Name for output FITS template file') - parser.add_argument('-c','--ccdnum',type=int, + parser.add_argument('-c', '--ccdnum', type=int, help='CCDNUM of device for which to build templates') - parser.add_argument('--input_template',type=str, + parser.add_argument('--input_template', type=str, help='String which yields filenames of individual FITS images when formatted,' ' e.g. D{expnum:08d}_{ccd:02d}_fp.fits') - parser.add_argument('--input_list',type=str, + parser.add_argument('--input_list', type=str, help='File holding pairs of expnum, filename on each line giving input images') parser.add_argument('--reject_rms', type=float, help='Reject exposures with RMS resids from PCA fit above this') @@ -409,7 +404,6 @@ def add_step_args(cls,parser): help='FITS file to hold counts of valid exposures per pixel') parser.add_argument('--mem_use', type=float, default=8., help='Number of GB of memory usage to target') - return sky_template = SkyTemplate() diff --git a/python/pixcorrect/sky_template_slow.py b/python/pixcorrect/sky_template_slow.py index 98faff2..9cc9d36 100644 --- a/python/pixcorrect/sky_template_slow.py +++ b/python/pixcorrect/sky_template_slow.py @@ -1,21 +1,18 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Use pre-established PCA coefficients to fit build full-res sky templates """ from os import path +from argparse import ArgumentParser + import numpy as np import fitsio -from ConfigParser import SafeConfigParser, NoOptionError -from argparse import ArgumentParser - -from pixcorrect import proddir +from pixcorrect import proddir, skyinfo, decaminfo from pixcorrect.corr_util import logger from pixcorrect.PixCorrectDriver import PixCorrectImStep -from pixcorrect import skyinfo from pixcorrect.clippedMean import clippedMean -from pixcorrect import decaminfo # Which section of the config file to read for this step config_section = 'skytemplate' @@ -23,7 +20,7 @@ class SkyTemplate(PixCorrectImStep): description = "Create full-resolution sky templates based on previous PCA" step_name = config_section - + @classmethod def __call__(cls, in_filename, out_filename, ccdnum, img_template, reject_rms, mem_use): """ @@ -41,23 +38,23 @@ def __call__(cls, in_filename, out_filename, ccdnum, img_template, reject_rms, m If this is None, just uses the exposures that PCA used. - `mem_use:` Number of GB to target for memory usage """ - + logger.info('Starting sky template construction') # Acquire PCA information, including the table of info on input exposures pc = skyinfo.MiniskyPC.load(in_filename) # ??? Should make this table part of the MiniskyPC class: - pctab = fitsio.read(in_filename,ext='EXPOSURES') + pctab = fitsio.read(in_filename, ext='EXPOSURES') # Build a MiniDECam that has our choice of CCDs that we can use for indexing. mini = pc.get_pc(0) # Quit if we are requesting template for a CCD that was not compressed detpos = decaminfo.detpos_dict[ccdnum] try: - mini.index_of(detpos,1,1) - except SkyError: + mini.index_of(detpos, 1, 1) + except skyinfo.SkyError: logger.error('Template requested for CCDNUM not included in PCA') - return(1) + return 1 # Select exposures we'll use if reject_rms is None: @@ -71,72 +68,70 @@ def __call__(cls, in_filename, out_filename, ccdnum, img_template, reject_rms, m expnums = [] vv = [] - for i in range(len(use)): - if use[i]: + for i, val in enumerate(use): + if val: vv.append(pctab['COEFFS'][i]) expnums.append(pctab['EXPNUM'][i]) V = np.vstack(vv) del vv - + # We'll re-normalize each exposure, and its coefficients, by V[0] - norms = np.array(V[:,0]) - V = V.T/norms # V is now of shape (npc,nimg) + norms = np.array(V[:, 0]) + V = V.T / norms # V is now of shape (npc,nimg) npc = pc.U.shape[1] ySize = decaminfo.shape[0] xSize = decaminfo.shape[1] # Create the output array - out = np.zeros( (npc, ySize, xSize), dtype=np.float32) + out = np.zeros((npc, ySize, xSize), dtype=np.float32) # Only fill half of it for the bad amp: - if ccdnum==decaminfo.ccdnums['S7'] and pc.halfS7: - xSize = xSize/2 - + if ccdnum == decaminfo.ccdnums['S7'] and pc.halfS7: + xSize /= 2 + # Decide how many rows of blocks we'll read from files at a time bytes_per_row = 4 * xSize * pc.blocksize * nimg - xBlocks = xSize / pc.blocksize - yBlocks = min( int(np.floor( mem_use * (2**30) / bytes_per_row)), - ySize / pc.blocksize) + #xBlocks = xSize / pc.blocksize + yBlocks = min(int(np.floor(mem_use * (2**30) / bytes_per_row)), + ySize / pc.blocksize) if yBlocks < 1: logger.warning('Proceeding even though mem_use is not enough to store 1 row of blocks') yBlocks = 1 - + d = {'ccd':ccdnum} # Collect input data in chunks of yBlocks rows of blocks, then process one block at a time. - for yStart in range(0,ySize,yBlocks*pc.blocksize): + for yStart in range(0, ySize, yBlocks * pc.blocksize): # Acquire the pixel data into a 3d array - yStop = min(ySize,yStart+yBlocks*pc.blocksize) - logger.info('Working on rows {:d} -- {:d}'.format(yStart,yStop)) - data = np.zeros( (nimg, yStop-yStart, xSize), dtype=np.float32) + yStop = min(ySize, yStart + yBlocks * pc.blocksize) + logger.info('Working on rows {:d} -- {:d}'.format(yStart, yStop)) + data = np.zeros((nimg, yStop - yStart, xSize), dtype=np.float32) - for i,expnum in enumerate(expnums): - d['expnum']=expnum + for i, expnum in enumerate(expnums): + d['expnum'] = expnum filename = img_template.format(**d) logger.debug('Getting pixels from ' + filename) with fitsio.FITS(filename) as fits: - data[i,:,:] = fits['SCI'][yStart:yStop, :xSize] - data /= norms[:,np.newaxis,np.newaxis] # Apply norms to be near zero - + data[i, :, :] = fits['SCI'][yStart:yStop, :xSize] + data /= norms[:, np.newaxis, np.newaxis] # Apply norms to be near zero + # Now cycle through all blocks - for jb in range((yStop-yStart)/pc.blocksize): - for ib in range(xSize/pc.blocksize): - logger.debug('Fitting for block ({:d},{:d})'.format(jb+yStart/pc.blocksize,ib)) + for jb in range((yStop - yStart) / pc.blocksize): + for ib in range(xSize / pc.blocksize): + logger.debug('Fitting for block ({:d},{:d})'.format(jb + yStart / pc.blocksize, ib)) # Use PCA of this block as starting guess at solution - index = mini.index_of(detpos, - yStart/pc.blocksize + jb, - ib) - guess = np.array(pc.U[index,:]) + index = mini.index_of(detpos, yStart / pc.blocksize + jb, ib) + guess = np.array(pc.U[index, :]) # We'll scale the guess in each pixel by the typical ratio # of this pixel's data to the PCA model for the block: model = np.dot(guess, V) ratio = data[:, - jb*pc.blocksize:(jb+1)*pc.blocksize, - ib*pc.blocksize:(ib+1)*pc.blocksize] / model[:,np.newaxis,np.newaxis] - scale, var, n = clippedMean(ratio,4,axis=0) + jb * pc.blocksize:(jb + 1) * pc.blocksize, + ib * pc.blocksize:(ib + 1) * pc.blocksize] / model[:, np.newaxis, np.newaxis] + scale, var, n = clippedMean(ratio, 4, axis=0) logger.debug('Var, scale, ratio shapes: ' + str(var.shape) + \ ' ' + str(scale.shape) + ' ' + str(ratio.shape)) @@ -144,27 +139,27 @@ def __call__(cls, in_filename, out_filename, ccdnum, img_template, reject_rms, m # Solve each pixel in the block: for jp in range(pc.blocksize): for ip in range(pc.blocksize): - cost = skyinfo.ClippedCost(3*np.sqrt(var[jp,ip])) + cost = skyinfo.ClippedCost(3 * np.sqrt(var[jp, ip])) # Execute and save the fit - out[:, yStart+jb*pc.blocksize+jp, ib*pc.blocksize+ip] = \ - skyinfo.linearFit(data[:, jb*pc.blocksize+jp, ib*pc.blocksize+ip], + out[:, yStart + jb * pc.blocksize + jp, ib * pc.blocksize + ip] = \ + skyinfo.linearFit(data[:, jb * pc.blocksize + jp, ib * pc.blocksize + ip], V, - guess*scale[jp,ip], + guess * scale[jp, ip], cost) del data # Save the template into the outfile - spc = skyinfo.SkyPC(out,detpos) - spc.save(out_filename, clobber=True) - + spc = skyinfo.SkyPC(out, detpos) + spc.save(out_filename) + logger.debug('Finished sky template') - ret_code=0 + ret_code = 0 return ret_code @classmethod def run(cls, config): - """Customized execution for sky template. + """Customized execution for sky template. :Parameters: - `config`: the configuration from which to get other parameters @@ -175,10 +170,10 @@ def run(cls, config): out_filename = config.get(cls.step_name, 'outfilename') ccdnum = config.getint(cls.step_name, 'ccdnum') reject_rms = config.getfloat(cls.step_name, 'reject_rms') - mem_use = config.getfloat(cls.step_name,'mem_use') - image_template = config.get(cls.step_name,'image_template') - if config.has_option(cls.step_name,'reject_rms'): - reject_rms = config.getfloat(cls.step_name,'reject_rms') + mem_use = config.getfloat(cls.step_name, 'mem_use') + image_template = config.get(cls.step_name, 'image_template') + if config.has_option(cls.step_name, 'reject_rms'): + reject_rms = config.getfloat(cls.step_name, 'reject_rms') else: reject_rms = None @@ -194,29 +189,25 @@ def run(cls, config): def parser(cls): """Generate a parser """ - default_config = path.join(proddir, 'etc', cls.step_name+'.config') - default_out_config = path.join(cls.step_name+'-as_run'+'.config') + default_config = path.join(proddir, 'etc', cls.step_name + '.config') + default_out_config = path.join(cls.step_name + '-as_run' + '.config') # Argument parser parser = ArgumentParser(description=cls.description) parser.add_argument("config", default=default_config, nargs="?", help="Configuration file filename") - parser.add_argument('-s', '--saveconfig', - default=default_out_config, - help="output config file") - parser.add_argument('-l', '--log', - default=cls.step_name+".log", - help="the name of the logfile") - parser.add_argument('-v', '--verbose', action="count", - help="be verbose") - - parser.add_argument('-i','--infile',type=str, + parser.add_argument('-s', '--saveconfig', + default=default_out_config, help="output config file") + parser.add_argument('-l', '--log', + default=cls.step_name+".log", help="the name of the logfile") + parser.add_argument('-v', '--verbose', action="count", help="be verbose") + parser.add_argument('-i', '--infile', type=str, help='File with PCA information (from sky_pca)') - parser.add_argument('-o','--outfilename',type=str, + parser.add_argument('-o', '--outfilename', type=str, help='Name for output FITS template file') - parser.add_argument('-c','--ccdnum',type=int, + parser.add_argument('-c', '--ccdnum', type=int, help='CCDNUM of device for which to build templates') - parser.add_argument('--image_template',type=str,default='D{expnum:08d}_{ccd:02d}_fp.fits', + parser.add_argument('--image_template', type=str, default='D{expnum:08d}_{ccd:02d}_fp.fits', help='String which yields filenames of individual FITS images when formatted') parser.add_argument('--reject_rms', type=float, help='Reject exposures with RMS resids from PCA fit above this') diff --git a/python/pixcorrect/skyinfo.py b/python/pixcorrect/skyinfo.py index 881ace4..fa2c709 100644 --- a/python/pixcorrect/skyinfo.py +++ b/python/pixcorrect/skyinfo.py @@ -36,6 +36,7 @@ class SkyError(Exception): """ def __init__(self, value): self.value = value + super().__init__() def __str__(self): return repr(self.value) @@ -48,18 +49,17 @@ def parse_ranges(ccdlist): s = set() for r1 in ccdlist.split(','): r2 = r1.split('-') - if len(r2)==1: + if len(r2) == 1: s.add(int(r2[0])) - elif len(r2)==2: - for j in range(int(r2[0]),int(r2[1])+1): + elif len(r2) == 2: + for j in range(int(r2[0]), int(r2[1]) + 1): s.add(j) else: raise ValueError('Bad integer range expression in parse_ranges: ' + r1) return sorted(s) - -class MiniDecam(object): +class MiniDecam: """ Class holding a decimated image of the full DECam science array. Each pixel in this image represents a (blocksize x blocksize) region in @@ -86,8 +86,8 @@ def __init__(self, blocksize=DEFAULT_BLOCKSIZE, mask_value=DEFAULT_MASK_VALUE, invalid=DEFAULT_IGNORE.split(','), - header = None, - halfS7 = True): + header=None, + halfS7=True): """ MiniDecam is compressed version of chosen subset of full science array. @@ -107,14 +107,14 @@ def __init__(self, self.invalid.add(detpos.strip()) self.header = fitsio.FITSHDR(header) self.halfS7 = halfS7 - + self._chip = [decaminfo.shape[0] / blocksize, - decaminfo.shape[1] / blocksize] # The shape of a decimated CCD - if decaminfo.shape[0]%self._chip[0] != 0 or \ - decaminfo.shape[1]%self._chip[1] != 0: + decaminfo.shape[1] / blocksize] # The shape of a decimated CCD + if decaminfo.shape[0] % self._chip[0] != 0 or \ + decaminfo.shape[1] % self._chip[1] != 0: # Raise exception if image is not multiple of blocksize. raise SkyError('MiniImage blocksize ' + str(blocksize) + - ' does not evenly divide images') + ' does not evenly divide images') # Get the smallest x,y coords in unbinned uber-system x0 = None @@ -126,49 +126,47 @@ def __init__(self, if x0 is None: x0 = xy[0] y0 = xy[1] - x0 = min(x0,xy[0]) - y0 = min(y0,xy[1]) + x0 = min(x0, xy[0]) + y0 = min(y0, xy[1]) self.xmin = x0 self.ymin = y0 - + xmax = 0 ymax = 0 for detpos in decaminfo.ccdnums.keys(): if detpos in self.invalid: continue - y,x = self._corner_of(detpos) - if self.halfS7 and detpos=='S7': - xmax = max(xmax, x+self._chip[1]/2) + y, x = self._corner_of(detpos) + if self.halfS7 and detpos == 'S7': + xmax = max(xmax, x + self._chip[1] / 2) else: - xmax = max(xmax, x+self._chip[1]) - ymax = max(ymax, y+self._chip[0]) + xmax = max(xmax, x + self._chip[1]) + ymax = max(ymax, y + self._chip[0]) # Create the data and mask images - self.data = np.ones( (ymax,xmax), dtype=data_dtype) * self.mask_value + self.data = np.ones((ymax, xmax), dtype=data_dtype) * self.mask_value self.mask = np.zeros(self.data.shape, dtype=bool) # Mark all useful regions in mask: for detpos in decaminfo.ccdnums.keys(): if detpos in self.invalid: continue - y,x = self._corner_of(detpos) - if self.halfS7 and detpos=='S7': - self.mask[y:y+self._chip[0], x:x+self._chip[1]/2] = True + y, x = self._corner_of(detpos) + if self.halfS7 and detpos == 'S7': + self.mask[y:y + self._chip[0], x:x + self._chip[1] / 2] = True else: - self.mask[y:y+self._chip[0], x:x+self._chip[1]] = True - - return + self.mask[y:y + self._chip[0], x:x + self._chip[1]] = True - def _corner_of(self,detpos): + def _corner_of(self, detpos): """ Return 2d coordinates of the (0,0) pixel of this detector. """ if detpos in self.invalid or detpos not in decaminfo.ccdnums.keys(): raise SkyError('Invalid detpos in MiniDecam: ' + detpos) - - x = (decaminfo.ccdCorners[detpos][0]-self.xmin)/self.blocksize - y = (decaminfo.ccdCorners[detpos][1]-self.ymin)/self.blocksize - return y,x + + x = (decaminfo.ccdCorners[detpos][0] - self.xmin) / self.blocksize + y = (decaminfo.ccdCorners[detpos][1] - self.ymin) / self.blocksize + return y, x @property def coeffs(self): @@ -184,18 +182,17 @@ def coeffs(self): return np.array(c, dtype=float) @coeffs.setter - def coeffs(self,c): + def coeffs(self, c): # First get rid of any existing SKYPC values for ipc in range(MAX_PC): kw = 'SKYPC{:>02d}'.format(ipc) if kw in self.header: self.header.delete(kw) # Then add new ones. - for ipc,val in enumerate(c): + for ipc, val in enumerate(c): kw = 'SKYPC{:>02d}'.format(ipc) self.header[kw] = float(val) - return - + @property def rms(self): """ @@ -204,9 +201,8 @@ def rms(self): return self.header['SKYRMS'] @rms.setter - def rms(self,rms): + def rms(self, rms): self.header['SKYRMS'] = rms - return @property def frac(self): @@ -216,11 +212,10 @@ def frac(self): return self.header['SKYFRAC'] @frac.setter - def frac(self,frac): + def frac(self, frac): self.header['SKYFRAC'] = frac - return - - def fill(self,data, detpos): + + def fill(self, data, detpos): """ Fill the portion of the mini-image corresponding to detpos with the array given by data. Does not do anything for ignored chips. @@ -229,13 +224,12 @@ def fill(self,data, detpos): return if data.shape != tuple(self._chip): raise SkyError('MiniDecam.fill input data has wrong shape ' + str(data.shape)) - y,x = self._corner_of(detpos) - if self.halfS7 and detpos=='S7': - self.data[ y:y+self._chip[0], x:x+self._chip[1]/2 ] = \ - data[:,:data.shape[1]/2] + y, x = self._corner_of(detpos) + if self.halfS7 and detpos == 'S7': + self.data[y:y + self._chip[0], x:x + self._chip[1] / 2] = \ + data[:, :data.shape[1] / 2] else: - self.data[ y:y+self._chip[0], x:x+self._chip[1] ] = data - return + self.data[y:y + self._chip[0], x:x + self._chip[1]] = data def vector(self): """ @@ -244,25 +238,24 @@ def vector(self): """ return self.data[self.mask] - def fill_from(self,vectorIn): + def fill_from(self, vectorIn): """ Set the data array equal to the values in the flattened array vectorIn """ self.data[self.mask] = vectorIn - return - def index_of(self,detpos, j, i): + def index_of(self, detpos, j, i): """ Return the index in the flattened vector() that would contain pixel (j,i) in *compressed* version of the chosen CCD. (j,i) are in the numpy convention. Raises an exception for an invalid detpos. """ tmp = np.zeros_like(self.mask) - y,x = self._corner_of(detpos) - tmp[y+j, x+i] = True + y, x = self._corner_of(detpos) + tmp[y + j, x + i] = True return np.where(tmp[self.mask])[0][0] - def edges(self,npix): + def edges(self, npix): """ Return a new MiniDECam which has value of 1 in pixels that are within npix of the edge of a CCD, and 0 in other valid pixels. @@ -275,7 +268,7 @@ def edges(self,npix): out = MiniDecam(blocksize=self.blocksize, mask_value=self.mask_value, invalid=self.invalid, - halfS7 = self.halfS7) + halfS7=self.halfS7) # Then set all unmasked pixels to zero: out.data[out.mask] = 0. @@ -283,18 +276,18 @@ def edges(self,npix): for detpos in decaminfo.ccdnums.keys(): if detpos in self.invalid: continue - y,x = self._corner_of(detpos) + y, x = self._corner_of(detpos) yend = y + self._chip[0] xend = x + self._chip[1] - if self.halfS7 and detpos=='S7': - xend = x + self._chip[1]/2 - out.data[y:y+npix, x:xend] = 1. - out.data[yend-npix:yend,x:xend] = 1. - out.data[y:yend, x:x+npix] = 1. - out.data[y:yend, xend-npix:xend] = 1. + if self.halfS7 and detpos == 'S7': + xend = x + self._chip[1] / 2 + out.data[y:y + npix, x:xend] = 1. + out.data[yend - npix:yend, x:xend] = 1. + out.data[y:yend, x:x + npix] = 1. + out.data[y:yend, xend - npix:xend] = 1. return out - def save(self,filename): + def save(self, filename): """ Save the mini-image to primary extension of a FITS file. """ @@ -304,12 +297,11 @@ def save(self,filename): self.header['HALFS7'] = '' baddet = '' for detpos in self.invalid: - if len(baddet)>0: + if baddet: baddet = baddet + ',' baddet = baddet + detpos self.header['BADDET'] = baddet fitsio.write(filename, self.data, header=self.header, clobber=True) - return def copy_header_info(self, source, keywords, require=False): """ @@ -324,18 +316,17 @@ def copy_header_info(self, source, keywords, require=False): try: value = source[kw] self.header[kw] = value - except (ValueError,KeyError): + except (ValueError, KeyError): if require: raise KeyError('copy_header_info did not find required keyword ' + kw) - return - + @classmethod def load(cls, filename): """ Extracts mini-image and header from primary extension of the given FITS file. Returns image,header """ - d,hdr = fitsio.read(filename,header=True) + d, hdr = fitsio.read(filename, header=True) blocksize = hdr['BLOCKSIZ'] mask_value = hdr['MASKVAL'] halfS7 = 'HALFS7' in hdr.keys() @@ -344,7 +335,7 @@ def load(cls, filename): out.data = d return out -class MiniskyPC(object): +class MiniskyPC: """ Class containing principle components of compressed sky images. """ @@ -353,8 +344,8 @@ def __init__(self, blocksize=DEFAULT_BLOCKSIZE, mask_value=DEFAULT_MASK_VALUE, invalid=DEFAULT_IGNORE.split(','), - header = None, - halfS7 = True): + header=None, + halfS7=True): """ :Parameters: @@ -370,22 +361,21 @@ def __init__(self, self.header = fitsio.FITSHDR(header) self.halfS7 = halfS7 # ?? Check that dimensions of U match the size of a MiniSky with chosen params - return @classmethod - def load(cls,filename): + def load(cls, filename): """ Retrieve PC's from a FITS file, from primary or specified extension """ - U,hdr = fitsio.read(filename,ext='U', header=True) + U, hdr = fitsio.read(filename, ext='U', header=True) blocksize = hdr['BLOCKSIZ'] mask_value = hdr['MASKVAL'] halfS7 = 'HALFS7' in hdr.keys() invalid = [j.strip() for j in hdr['BADDET'].split(',')] - return cls(U, blocksize=blocksize, mask_value=mask_value, + return cls(U, blocksize=blocksize, mask_value=mask_value, invalid=invalid, halfS7=halfS7, header=hdr) - def save(self,filename): + def save(self, filename): """ Save the PCs into a FITS file of the given name, in specified extension Clobber=True by default; otherwise will always append a new extension to the file @@ -396,12 +386,11 @@ def save(self,filename): self.header['HALFS7'] = '' baddet = '' for detpos in self.invalid: - if len(baddet)>0: + if baddet: baddet = baddet + ',' baddet = baddet + detpos self.header['BADDET'] = baddet fitsio.write(filename, self.U, extname='U', clobber=True, header=self.header) - return def fit(self, mini, clip_sigma=3.): """ @@ -423,56 +412,55 @@ def fit(self, mini, clip_sigma=3.): # Restrict the data and the templates to the superpixels where we have valid data use = y != mini.mask_value y = y[use] - x = self.U[use,:] - + x = self.U[use, :] + # Create first guess as simply the 0th template, scaled to match median - aStart = np.zeros(x.shape[1],dtype=float) - aStart[0] = np.median(y / x[:,0]) + aStart = np.zeros(x.shape[1], dtype=float) + aStart[0] = np.median(y / x[:, 0]) # Determine a sigma for the residuals and build a cost function, 4-sigma clipping - avg,var,n = clippedMean(y-x[:,0]*aStart[0],4) - cost = ClippedCost(4*np.sqrt(var)) + _, var, n = clippedMean(y - x[:, 0] * aStart[0], 4) + cost = ClippedCost(4 * np.sqrt(var)) # Initial fit a = linearFit(y, x.T, aStart, cost) # Repeat fit with updated variance estimate and clipping threshold - avg,var,n = clippedMean(y-np.dot(x,a),4) - cost = ClippedCost(clip_sigma*np.sqrt(var)) + _, var, n = clippedMean(y - np.dot(x, a), 4) + cost = ClippedCost(clip_sigma * np.sqrt(var)) a = linearFit(y, x.T, a, cost) # Get statistics of fractional residuals to this fit - avg,var,n = clippedMean(y/np.dot(x,a)-1.,clip_sigma) + avg, var, n = clippedMean(y / np.dot(x, a) - 1., clip_sigma) v = mini.vector() - v[use] = y / np.dot(x,a) - 1. + v[use] = y / np.dot(x, a) - 1. mini.fill_from(v) mini.coeffs = a mini.rms = np.sqrt(var) - mini.frac = 1.-float(n)/len(y) - return + mini.frac = 1. - float(n) / len(y) def get_pc(self, ipc): """ Return a MiniDecam object that contains values of one of the principal components """ - if ipc<0 or ipc>=self.U.shape[1]: + if ipc < 0 or ipc >= self.U.shape[1]: raise SkyError('Request for non-existent PC #{:d}' \ ' in MiniskyPC.get_pc'.format(ipc)) out = MiniDecam(blocksize=self.blocksize, - mask_value = self.mask_value, - invalid = self.invalid, - halfS7 = self.halfS7) - out.fill_from(self.U[:,ipc]) + mask_value=self.mask_value, + invalid=self.invalid, + halfS7=self.halfS7) + out.fill_from(self.U[:, ipc]) return out - + @classmethod def get_exposures(cls, filename): """ Get table of information on individual exposures from the named MiniskyPC file """ - tab1 = fitsio.read(filename,ext='EXPOSURES') - tab = {'EXPNUM':tab1['EXPNUM'], - 'COEFFS':tab1['COEFFS'], - 'RMS':tab1['RMS'], - 'FRAC':tab1['FRAC'], - 'USE': (tab1['USE']!=0)} # *** Need to convert byte to bool for fitsio bug + tab1 = fitsio.read(filename, ext='EXPOSURES') + tab = {'EXPNUM': tab1['EXPNUM'], + 'COEFFS': tab1['COEFFS'], + 'RMS': tab1['RMS'], + 'FRAC': tab1['FRAC'], + 'USE': (tab1['USE'] != 0)} # *** Need to convert byte to bool for fitsio bug return tab @classmethod @@ -480,7 +468,7 @@ def get_pc_rms(cls, filename): """ Get vector of the RMS signal in each PC from an exposure table stored with it """ - h = fitsio.read_header(filename,ext='EXPOSURES') + h = fitsio.read_header(filename, ext='EXPOSURES') rms = [] for i in range(MAX_PC): try: @@ -497,28 +485,27 @@ def save_exposures(cls, filename, expnums, coeffs, rms, frac, use, s): results for exposures. """ # *** Note saving the USE array in bytes since fitsio has a bug reading bools - tab = {'EXPNUM':np.array(expnums,dtype=np.int32), - 'COEFFS':coeffs, - 'RMS':rms, - 'FRAC':frac, - 'USE':np.array(use,dtype=np.int8)} - + tab = {'EXPNUM': np.array(expnums, dtype=np.int32), + 'COEFFS': coeffs, + 'RMS': rms, + 'FRAC': frac, + 'USE': np.array(use, dtype=np.int8)} + # Place the rms in each PC into header keywords h = {} - for i in range(min(MAX_PC,len(s))): + for i in range(min(MAX_PC, len(s))): h['PCRMS{:02d}'.format(i)] = s[i] - with fitsio.FITS(filename,'rw') as fits: + with fitsio.FITS(filename, 'rw') as fits: fits.write(tab, header=h, extname='EXPOSURES') - return -class SkyPC(object): +class SkyPC: """ Full-resolution sky principal components (templates) """ # ??? Add ability to use templates that have been subsampled to save I/O time. extname = 'TEMPLATES' - + def __init__(self, d, detpos, header=None): """ A sky pc is a 3d array with index 0 enumerating 2d principal components of sky. @@ -526,39 +513,37 @@ def __init__(self, d, detpos, header=None): self.d = np.array(d) self.detpos = detpos self.header = fitsio.FITSHDR(header) - return @classmethod def load(cls, filename): """ Get a sky pc from the 3d array stored in named extension of FITS file """ - d,h = fitsio.read(filename, ext=cls.extname, header=True) + d, h = fitsio.read(filename, ext=cls.extname, header=True) if len(d.shape) != 3: raise SkyError("SkyTemplates.load did not find 3d array in " + filename) detpos = h['DETPOS'] - return cls(d,detpos,header=h) + return cls(d, detpos, header=h) def save(self, filename): """ Save a sky pc as a FITS file under given extension name (or primary). If clobber=False, it is appended as a new extension. """ - self.header['DETPOS']=self.detpos - self.header['CCDNUM']=decaminfo.ccdnums[self.detpos] + self.header['DETPOS'] = self.detpos + self.header['CCDNUM'] = decaminfo.ccdnums[self.detpos] fitsio.write(filename, self.d, extname=self.extname, clobber=True, header=self.header) - return def sky(self, coeffs): """ Return a 2d array constructed from applying the specified coefficients """ - if len(coeffs.shape)!=1 or self.d.shape[0] != len(coeffs): + if len(coeffs.shape) != 1 or self.d.shape[0] != len(coeffs): raise SkyError("Wrong number of coefficients for SkyTemplates.sky: " + str(coeffs.shape)) # ?? or return np.tensordot(coeffs, self.d, axes=1) - return np.sum(coeffs[:,np.newaxis,np.newaxis]*self.d, axis=0) - + return np.sum(coeffs[:, np.newaxis, np.newaxis] * self.d, axis=0) + def linearFit(y, x, aStart, cost, dump=False): """ @@ -580,22 +565,22 @@ def linearFit(y, x, aStart, cost, dump=False): iterations = 0 MAX_ITERATIONS = 10 COST_TOLERANCE = 0.01 - while (iterations < MAX_ITERATIONS): + while iterations < MAX_ITERATIONS: dy = y - np.dot(a, x) totcost, d1, d2 = cost(dy) if dump: - print iterations,':',totcost, a - if oldCost!=None: - if oldCost - totcost < COST_TOLERANCE*oldCost: + print(iterations, ':', totcost, a) + if oldCost is not None: + if oldCost - totcost < COST_TOLERANCE * oldCost: # done! return a oldCost = totcost - + # do an update beta = np.dot(x, d1) - alpha = np.dot(x*d2, x.T) + alpha = np.dot(x * d2, x.T) a += np.linalg.solve(alpha, beta) - print "Too many iterations in linearFit" + print("Too many iterations in linearFit") return np.zeros_like(aStart) class ClippedCost: @@ -605,11 +590,10 @@ class ClippedCost: """ def __init__(self, limit): self.limit = limit - return + def __call__(self, data): use = np.abs(data) < self.limit - cost = np.sum((data*data)[use]) / np.count_nonzero(use) - d1 = np.where(use,data,0.) + cost = np.sum((data * data)[use]) / np.count_nonzero(use) + d1 = np.where(use, data, 0.) d2 = np.where(use, 1., 0.) - return cost,d1,d2 - + return cost, d1, d2 diff --git a/python/pixcorrect/skyplot.py b/python/pixcorrect/skyplot.py index a13aeea..84a7d0c 100644 --- a/python/pixcorrect/skyplot.py +++ b/python/pixcorrect/skyplot.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python """ Package of plots to make about sky subtraction. Called from command line, the first argument is @@ -6,14 +5,13 @@ second argument is the name for a multipage pdf of diagnostic plots. """ -import sys, os import numpy as np import pylab as pl from pixcorrect import skyinfo -from argparse import ArgumentParser + cmap = 'cubehelix' -def showMini(mini,vrange=None): +def showMini(mini, vrange=None): """ Make a 2d plot of a mini-sky image. Use percentiles of the valid pixels to set the @@ -28,7 +26,6 @@ def showMini(mini,vrange=None): pl.imshow(mini.data, interpolation='nearest', origin='lower', aspect='equal', vmin=vmin, vmax=vmax, cmap=cmap) pl.colorbar() - return def showPCA(pcfile): """ @@ -36,22 +33,21 @@ def showPCA(pcfile): """ pca = skyinfo.MiniskyPC.load(pcfile) npc = pca.U.shape[1] - nrows = (npc-1)/4 + 1 - ncols = min(npc,4) - fig,axx = pl.subplots(nrows,ncols,squeeze=True) - fig.set_size_inches(2*ncols,2*nrows) + nrows = (npc - 1) / 4 + 1 + ncols = min(npc, 4) + fig, axx = pl.subplots(nrows, ncols, squeeze=True) + fig.set_size_inches(2 * ncols, 2 * nrows) for ipc in range(npc): - irow = ipc/4 - icol = ipc%4 - fig.sca(axx[irow,icol]) + irow = ipc / 4 + icol = ipc % 4 + fig.sca(axx[irow, icol]) pl.axis('off') pc = pca.get_pc(ipc) vmin = np.percentile(pc.vector(), 1.) vmax = np.percentile(pc.vector(), 99.) pl.imshow(pc.data, interpolation='nearest', origin='lower', aspect='equal', - vmin=vmin, vmax=vmax, cmap=cmap) - pl.text(10,200,'PC{:d}'.format(ipc),color='white') - return + vmin=vmin, vmax=vmax, cmap=cmap) + pl.text(10, 200, 'PC{:d}'.format(ipc), color='white') def rmsVsPc(pcfile): """ @@ -59,30 +55,29 @@ def rmsVsPc(pcfile): """ rms = skyinfo.MiniskyPC.get_pc_rms(pcfile) pl.semilogy(range(len(rms)), rms, 'ro') - pl.xlim(-0.5,len(rms)+0.5) + pl.xlim(-0.5, len(rms) + 0.5) pl.xlabel('PC Number') pl.ylabel('RMS signal') pl.grid() pl.title('RMS vs PC for ' + pcfile) - return - -def showResids2(m,model,mask,sh): + +def showResids2(m, model, mask, sh): """ Make an image of residuals in the first 100 frames """ - out = np.ones(((sh[0]+2)*10,(sh[1]+2)*10),dtype=float) * -1 - img = np.ones(sh,dtype=float)*-1 + out = np.ones(((sh[0] + 2) * 10, (sh[1] + 2) * 10), dtype=float) * -1 + img = np.ones(sh, dtype=float) * -1 for i in range(10): - x0=i*(sh[1]+2) + x0 = i * (sh[1] + 2) for j in range(10): - y0 = j*(sh[0]+2) - img[mask] = m[:,10*i+j]-model[:,10*i+j] - print sh, img.shape, x0, y0 - out[y0:y0+sh[0],x0:x0+sh[1]] = img + y0 = j * (sh[0] + 2) + img[mask] = m[:, 10 * i + j] - model[:, 10 * i + j] + print(sh, img.shape, x0, y0) + out[y0:y0 + sh[0], x0:x0 + sh[1]] = img return out -def pcaReport(pcafile,pdffile): +def pcaReport(pcafile, pdffile): """ Make a set of plots for quality control on a PCA output pcafile is the output of sky_pca @@ -96,4 +91,3 @@ def pcaReport(pcafile,pdffile): showPCA(pcafile) pp.savefig() pp.close() - return diff --git a/python/pixcorrect/starflat_correct.py b/python/pixcorrect/starflat_correct.py index 84261d8..44d6bca 100644 --- a/python/pixcorrect/starflat_correct.py +++ b/python/pixcorrect/starflat_correct.py @@ -1,17 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Apply a star flat correction to a raw DES image. Uses all the code from flat_combine but changes only the FITS keywords used to register actions """ from os import path -import numpy as np -from pixcorrect import proddir -from pixcorrect.corr_util import logger, do_once -from despyfits.DESImage import DESImage -from despyfits import maskbits -from pixcorrect.PixCorrectDriver import PixCorrectImStep -from pixcorrect import decaminfo +from pixcorrect.corr_util import do_once from pixcorrect.flat_correct import FlatCorrect # Which section of the config file to read for this step @@ -22,7 +16,7 @@ class StarFlatCorrect(FlatCorrect): step_name = config_section @classmethod - @do_once(1,'DESSTAR') + @do_once(1, 'DESSTAR') def __call__(cls, image, flat_im): """Apply a flat field correction to an image diff --git a/setup.py b/setup.py index 828a3b0..81fa9d9 100644 --- a/setup.py +++ b/setup.py @@ -1,47 +1,44 @@ import os import distutils -from distutils.core import setup +from distutils.core import setup, Extension import glob -import shlib -from shlib.build_shlib import SharedLibrary - bin_files = glob.glob('bin/*') #inc_files = glob.glob("include/*.h") #doc_files = glob.glob("doc/*.*") + glob.glob("doc/*/*") -libbiascorrect = SharedLibrary( +libbiascorrect = Extension( 'biascorrect', sources = ['src/libbiascorrect.c'], include_dirs = ['include', '%s/include' % os.environ['IMSUPPORT_DIR'], '%s/include' % os.environ['DESPYFITS_DIR']], extra_compile_args = ['-O3','-g','-Wall','-shared','-fPIC']) -libbpm = SharedLibrary( +libbpm = Extension( 'bpm', sources = ['src/libbpm.c'], include_dirs = ['include', '%s/include' % os.environ['IMSUPPORT_DIR'], '%s/include' % os.environ['DESPYFITS_DIR']], extra_compile_args = ['-O3','-g','-Wall','-shared','-fPIC']) -libfixcol = SharedLibrary( +libfixcol = Extension( 'fixcol', sources = ['src/libfixcol.c'], include_dirs = ['include', '%s/include' % os.environ['IMSUPPORT_DIR'], '%s/include' % os.environ['DESPYFITS_DIR']], extra_compile_args = ['-O3','-g','-Wall','-shared','-fPIC']) -libflatcorrect = SharedLibrary( +libflatcorrect = Extension( 'flatcorrect', sources = ['src/libflatcorrect.c'], include_dirs = ['include', '%s/include' % os.environ['IMSUPPORT_DIR'], '%s/include' % os.environ['DESPYFITS_DIR']], extra_compile_args = ['-O3','-g','-Wall','-shared','-fPIC']) -libmasksatr = SharedLibrary( +libmasksatr = Extension( 'masksatr', sources = ['src/libmasksatr.c'], include_dirs = ['include', '%s/include' % os.environ['IMSUPPORT_DIR'], '%s/include' % os.environ['DESPYFITS_DIR']], extra_compile_args = ['-O3','-g','-Wall','-shared','-fPIC']) -libfpnumber = SharedLibrary( +libfpnumber = Extension( 'fpnumber', sources = ['src/libfpnumber.c'], include_dirs = ['include', '%s/include' % os.environ['IMSUPPORT_DIR'], '%s/include' % os.environ['DESPYFITS_DIR']], @@ -49,11 +46,11 @@ # The main call setup(name='pixcorrect', - version ='0.5.0', + version ='0.5.8', description = "Pixel-level image correction", author = "Eric Neilsen", author_email = "neilsen@fnal.gov", - shlibs = [libbiascorrect, libbpm, libfixcol, libflatcorrect, libmasksatr, libfpnumber], + ext_modules = [libbiascorrect, libbpm, libfixcol, libflatcorrect, libmasksatr, libfpnumber], packages = ['pixcorrect'], package_dir = {'': 'python'}, scripts = bin_files, From f3abccff41a35b6d5d65ea07e19b42b8c2fd74ca Mon Sep 17 00:00:00 2001 From: Doug Friedel Date: Fri, 17 Jan 2020 10:47:14 -0600 Subject: [PATCH 2/4] missed this in update --- ups/pixcorrect.table.eups | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ups/pixcorrect.table.eups b/ups/pixcorrect.table.eups index 887a98e..f9a9623 100644 --- a/ups/pixcorrect.table.eups +++ b/ups/pixcorrect.table.eups @@ -1,6 +1,9 @@ -# depyfits will load imsuport and shlibs -setupRequired(despyfits 0.5.0+0) -setupRequired(scipy 0.14.0+7) +setupRequired(despyfits) +setupRequired(numpy) +setupRequired(scipy) +setupRequired(despymisc) +setupRequired(matplotlib) +setupRequired(despyastro) envAppend(PYTHONPATH, ${PRODUCT_DIR}/python) envAppend(PATH, ${PRODUCT_DIR}/bin) From cf722d1a77d793ebe2d2a5c28bb946e3c9787adb Mon Sep 17 00:00:00 2001 From: Doug Friedel Date: Tue, 28 Jan 2020 09:56:25 -0600 Subject: [PATCH 3/4] updated version number --- setup.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 81fa9d9..43f5b0e 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,11 @@ -import os +import os import distutils from distutils.core import setup, Extension import glob bin_files = glob.glob('bin/*') -#inc_files = glob.glob("include/*.h") -#doc_files = glob.glob("doc/*.*") + glob.glob("doc/*/*") +#inc_files = glob.glob("include/*.h") +#doc_files = glob.glob("doc/*.*") + glob.glob("doc/*/*") libbiascorrect = Extension( @@ -46,7 +46,7 @@ # The main call setup(name='pixcorrect', - version ='0.5.8', + version ='3.0.0', description = "Pixel-level image correction", author = "Eric Neilsen", author_email = "neilsen@fnal.gov", From bd10d49af42c3402d2c3968a610f2b6eb5b54979 Mon Sep 17 00:00:00 2001 From: Doug Friedel Date: Tue, 28 Jan 2020 09:57:28 -0600 Subject: [PATCH 4/4] updated dependency versions --- pixcorrect_test.build | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pixcorrect_test.build b/pixcorrect_test.build index 043014c..1d14cef 100755 --- a/pixcorrect_test.build +++ b/pixcorrect_test.build @@ -8,8 +8,8 @@ export PRODUCT_DIR=$HOME/build-test/pixcorrect echo "Will Install to: $PRODUCT_DIR" source $EUPS_DIR/desdm_eups_setup.sh -setup -v scipy 0.14.0+9 -setup -v despyfits 0.5.3+0 +setup -v scipy +setup -v despyfits 3.0.0 #setup -v -r ~/build-test/despyfits export PYTHONPATH=$PRODUCT_DIR/python:$PYTHONPATH