From 73a0b9432719c01a7c82b53db903536c736bcd45 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Mon, 14 Aug 2023 12:20:42 -0400 Subject: [PATCH 01/90] Begin refactor of Cython code by moving it into its own module --- .gitignore | 1 + setup.py | 4 ++-- src/stcal/ramp_fitting/ols_cas22/__init__.py | 3 +++ .../ramp_fitting/{ols_cas22.pyx => ols_cas22/_ols_cas22.pyx} | 0 4 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 src/stcal/ramp_fitting/ols_cas22/__init__.py rename src/stcal/ramp_fitting/{ols_cas22.pyx => ols_cas22/_ols_cas22.pyx} (100%) diff --git a/.gitignore b/.gitignore index c4fc59e8..72d64360 100644 --- a/.gitignore +++ b/.gitignore @@ -139,6 +139,7 @@ dmypy.json # Cython debug symbols cython_debug/ +src/stcal/ramp_fitting/ols_cas22/*.c # setuptools-scm generated module src/stcal/_version.py diff --git a/setup.py b/setup.py index 3d3d8fb6..55817d6d 100644 --- a/setup.py +++ b/setup.py @@ -6,8 +6,8 @@ Options.docstrings = True Options.annotate = False -extensions = [Extension('stcal.ramp_fitting.ols_cas22', - ['src/stcal/ramp_fitting/ols_cas22.pyx'], +extensions = [Extension('stcal.ramp_fitting.ols_cas22._ols_cas22', + ['src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx'], include_dirs=[np.get_include()], extra_compile_args=['-std=c99'])] diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py new file mode 100644 index 00000000..427965a9 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -0,0 +1,3 @@ +from ._ols_cas22 import fit_ramps + +__all__ = ['fit_ramps'] diff --git a/src/stcal/ramp_fitting/ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx similarity index 100% rename from src/stcal/ramp_fitting/ols_cas22.pyx rename to src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx From d3d8c311e98a0a5df7709ce0c794526e6138248c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Mon, 14 Aug 2023 15:15:16 -0400 Subject: [PATCH 02/90] Clean up the fit_one_ramp function --- .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 69 ++++++++++--------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index a57960b1..323c9c3f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -25,7 +25,7 @@ cdef inline float get_weight_power(float s): @cython.cdivision(True) cdef inline (float, float, float) fit_one_ramp( float [:] resultants, int start, int end, float read_noise, - float [:] tbar, float [:] tau, int [:] nn): + float [:] t_bar, float [:] tau, int [:] nn): """Fit a portion of single ramp using the Casertano+22 algorithm. Parameters @@ -38,7 +38,7 @@ cdef inline (float, float, float) fit_one_ramp( ending point of portion to fit within this pixel read_noise : float read noise for this pixel - tbar : float [:] + t_bar : float [:] mean times of resultants tau : float [:] variance weighted mean times of resultants @@ -49,18 +49,24 @@ cdef inline (float, float, float) fit_one_ramp( ------- slope : float fit slope - slopereadvar : float + slope_read_var : float read noise induced variance in slope - slopepoissonvar : float + slope_poisson_var : float coefficient of Poisson-noise induced variance in slope multiply by true flux to get actual Poisson variance. """ + cdef int n_res = end - start + 1 + + # Special case where there is no or one resultant, there is no fit. + if n_res <= 1: + return 0, 0, 0 + + # Else, do the fitting. cdef int i = 0, j = 0 - cdef int nres = end - start + 1 - cdef float ww[2048] - cdef float kk[2048] - cdef float slope = 0, slopereadvar = 0, slopepoissonvar = 0 - cdef float tbarmid = (tbar[start] + tbar[end]) / 2 + cdef float wfi_weight[2048] + cdef float coeffs[2048] + cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 + cdef float tbarmid = (t_bar[start] + t_bar[end]) / 2 # Casertano+2022 Eq. 44 # Note we've departed from Casertano+22 slightly; @@ -73,44 +79,39 @@ cdef inline (float, float, float) fit_one_ramp( # It's easy to use up a lot of dynamic range on something like # (tbar - tbarmid) ** 10. Rescale these. - cdef float tscale = (tbar[end] - tbar[start]) / 2 - if tscale == 0: - tscale = 1 - cdef float f0 = 0, f1 = 0, f2 = 0 + cdef float t_scale = (t_bar[end] - t_bar[start]) / 2 + t_scale = 1 if t_scale == 0 else t_scale - # Special case where there is no or one resultant, there is no fit. - if nres <= 1: - return 0, 0, 0 + cdef float f0 = 0, f1 = 0, f2 = 0 - # Else, do the fitting. with cython.cpow(True): # Issue when tbar[] == tbarmid causes exception otherwise - for i in range(nres): + for i in range(n_res): # Casertano+22, Eq. 45 - ww[i] = ((((1 + weight_power) * nn[start + i]) / + wfi_weight[i] = ((((1 + weight_power) * nn[start + i]) / (1 + weight_power * nn[start + i])) * - fabs((tbar[start + i] - tbarmid) / tscale) ** weight_power) + fabs((t_bar[start + i] - tbarmid) / t_scale) ** weight_power) # Casertano+22 Eq. 35 - f0 += ww[i] - f1 += ww[i] * tbar[start + i] - f2 += ww[i] * tbar[start + i]**2 + f0 += wfi_weight[i] + f1 += wfi_weight[i] * t_bar[start + i] + f2 += wfi_weight[i] * t_bar[start + i]**2 # Casertano+22 Eq. 36 - cdef float dd = f2 * f0 - f1 ** 2 - if dd == 0: + cdef float det = f2 * f0 - f1 ** 2 + if det == 0: return (0.0, 0.0, 0.0) - for i in range(nres): + for i in range(n_res): # Casertano+22 Eq. 37 - kk[i] = (f0 * tbar[start + i] - f1) * ww[i] / dd - for i in range(nres): + coeffs[i] = (f0 * t_bar[start + i] - f1) * wfi_weight[i] / det + for i in range(n_res): # Casertano+22 Eq. 38 - slope += kk[i] * resultants[start + i] + slope += coeffs[i] * resultants[start + i] # Casertano+22 Eq. 39 - slopereadvar += kk[i] ** 2 * read_noise ** 2 / nn[start + i] + slope_read_var += coeffs[i] ** 2 * read_noise ** 2 / nn[start + i] # Casertano+22 Eq 40 - slopepoissonvar += kk[i] ** 2 * tau[start + i] - for j in range(i + 1, nres): - slopepoissonvar += 2 * kk[i] * kk[j] * tbar[start + i] + slope_poisson_var += coeffs[i] ** 2 * tau[start + i] + for j in range(i + 1, n_res): + slope_poisson_var += 2 * coeffs[i] * coeffs[j] * t_bar[start + i] - return (slope, slopereadvar, slopepoissonvar) + return (slope, slope_read_var, slope_poisson_var) @cython.boundscheck(False) From afe4450b26909bc5526f04d484cad877e340eb14 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 15 Aug 2023 09:27:11 -0400 Subject: [PATCH 03/90] Split cython code into two modules --- setup.py | 18 ++- .../ramp_fitting/ols_cas22/_fit_one_ramp.pxd | 3 + .../ramp_fitting/ols_cas22/_fit_one_ramp.pyx | 119 ++++++++++++++++++ .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 110 +--------------- 4 files changed, 137 insertions(+), 113 deletions(-) create mode 100644 src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd create mode 100644 src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx diff --git a/setup.py b/setup.py index 55817d6d..8b2beb43 100644 --- a/setup.py +++ b/setup.py @@ -6,9 +6,19 @@ Options.docstrings = True Options.annotate = False -extensions = [Extension('stcal.ramp_fitting.ols_cas22._ols_cas22', - ['src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx'], - include_dirs=[np.get_include()], - extra_compile_args=['-std=c99'])] +extensions = [ + Extension( + 'stcal.ramp_fitting.ols_cas22._fit_one_ramp', + ['src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx'], + include_dirs=[np.get_include()], + extra_compile_args=['-std=c99'] + ), + Extension( + 'stcal.ramp_fitting.ols_cas22._ols_cas22', + ['src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx'], + include_dirs=[np.get_include()], + extra_compile_args=['-std=c99'] + ), +] setup(ext_modules=cythonize(extensions)) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd new file mode 100644 index 00000000..7757a9d3 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd @@ -0,0 +1,3 @@ +cdef (float, float, float) fit_one_ramp( + float [:] resultants, int start, int end, float read_noise, + float [:] t_bar, float [:] tau, int [:] nn) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx new file mode 100644 index 00000000..92633376 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx @@ -0,0 +1,119 @@ + +from libc.math cimport sqrt, fabs +import numpy as np +cimport numpy as np + +cimport cython + +# Casertano+2022, Table 2 +cdef float[2][6] PTABLE = [ + [-np.inf, 5, 10, 20, 50, 100], + [0, 0.4, 1, 3, 6, 10]] +cdef int PTABLE_LENGTH = 6 + +cdef inline float get_weight_power(float s): + cdef int i + for i in range(PTABLE_LENGTH): + if s < PTABLE[0][i]: + return PTABLE[1][i - 1] + return PTABLE[1][i] + + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef inline (float, float, float) fit_one_ramp( + float [:] resultants, int start, int end, float read_noise, + float [:] t_bar, float [:] tau, int [:] n_reads): + """Fit a portion of single ramp using the Casertano+22 algorithm. + + Parameters + ---------- + resultants : float [:] + array of resultants for single pixel + start : int + starting point of portion to fit within this pixel + end : int + ending point of portion to fit within this pixel + read_noise : float + read noise for this pixel + t_bar : float [:] + mean times of resultants + tau : float [:] + variance weighted mean times of resultants + n_reads : int [:] + number of reads contributing to reach resultant + + Returns + ------- + slope : float + fit slope + slope_read_var : float + read noise induced variance in slope + slope_poisson_var : float + coefficient of Poisson-noise induced variance in slope + multiply by true flux to get actual Poisson variance. + """ + cdef int n_resultants = end - start + 1 + + # Special case where there is no or one resultant, there is no fit. + if n_resultants <= 1: + return 0, 0, 0 + + # Else, do the fitting. + cdef int i = 0, j = 0 + cdef float weights[2048] + cdef float coeffs[2048] + cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 + cdef float t_bar_mid = (t_bar[start] + t_bar[end]) / 2 + + # Casertano+2022 Eq. 44 + # Note we've departed from Casertano+22 slightly; + # there s is just resultants[end]. But that doesn't seem good if, e.g., + # a CR in the first resultant has boosted the whole ramp high but there + # is no actual signal. + cdef float s = max(resultants[end] - resultants[start], 0) + s = s / sqrt(read_noise**2 + s) + cdef float power = get_weight_power(s) + + # It's easy to use up a lot of dynamic range on something like + # (tbar - tbarmid) ** 10. Rescale these. + cdef float t_scale = (t_bar[end] - t_bar[start]) / 2 + t_scale = 1 if t_scale == 0 else t_scale + + cdef float f0 = 0, f1 = 0, f2 = 0 + + with cython.cpow(True): # Issue when tbar[] == tbarmid causes exception otherwise + for i in range(n_resultants): + # Casertano+22, Eq. 45 + weights[i] = ((((1 + power) * n_reads[start + i]) / + (1 + power * n_reads[start + i])) * + fabs((t_bar[start + i] - t_bar_mid) / t_scale) ** power) + + # Casertano+22 Eq. 35 + f0 += weights[i] + f1 += weights[i] * t_bar[start + i] + f2 += weights[i] * t_bar[start + i]**2 + + # Casertano+22 Eq. 36 + cdef float det = f2 * f0 - f1 ** 2 + if det == 0: + return (0.0, 0.0, 0.0) + + for i in range(n_resultants): + # Casertano+22 Eq. 37 + coeffs[i] = (f0 * t_bar[start + i] - f1) * weights[i] / det + + for i in range(n_resultants): + # Casertano+22 Eq. 38 + slope += coeffs[i] * resultants[start + i] + + # Casertano+22 Eq. 39 + slope_read_var += coeffs[i] ** 2 * read_noise ** 2 / n_reads[start + i] + + # Casertano+22 Eq 40 + slope_poisson_var += coeffs[i] ** 2 * tau[start + i] + for j in range(i + 1, n_resultants): + slope_poisson_var += 2 * coeffs[i] * coeffs[j] * t_bar[start + i] + + return (slope, slope_read_var, slope_poisson_var) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index 323c9c3f..9c13f7c4 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -1,117 +1,9 @@ -from libc.math cimport sqrt, fabs import numpy as np cimport numpy as np cimport cython from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar - -# Casertano+2022, Table 2 -cdef float[2][6] PTABLE = [ - [-np.inf, 5, 10, 20, 50, 100], - [0, 0.4, 1, 3, 6, 10]] -cdef int PTABLE_LENGTH = 6 - - -cdef inline float get_weight_power(float s): - cdef int ise - for i in range(PTABLE_LENGTH): - if s < PTABLE[0][i]: - return PTABLE[1][i - 1] - return PTABLE[1][i] - - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline (float, float, float) fit_one_ramp( - float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] nn): - """Fit a portion of single ramp using the Casertano+22 algorithm. - - Parameters - ---------- - resultants : float [:] - array of resultants for single pixel - start : int - starting point of portion to fit within this pixel - end : int - ending point of portion to fit within this pixel - read_noise : float - read noise for this pixel - t_bar : float [:] - mean times of resultants - tau : float [:] - variance weighted mean times of resultants - nn : int [:] - number of reads contributing to reach resultant - - Returns - ------- - slope : float - fit slope - slope_read_var : float - read noise induced variance in slope - slope_poisson_var : float - coefficient of Poisson-noise induced variance in slope - multiply by true flux to get actual Poisson variance. - """ - cdef int n_res = end - start + 1 - - # Special case where there is no or one resultant, there is no fit. - if n_res <= 1: - return 0, 0, 0 - - # Else, do the fitting. - cdef int i = 0, j = 0 - cdef float wfi_weight[2048] - cdef float coeffs[2048] - cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 - cdef float tbarmid = (t_bar[start] + t_bar[end]) / 2 - - # Casertano+2022 Eq. 44 - # Note we've departed from Casertano+22 slightly; - # there s is just resultants[end]. But that doesn't seem good if, e.g., - # a CR in the first resultant has boosted the whole ramp high but there - # is no actual signal. - cdef float s = max(resultants[end] - resultants[start], 0) - s = s / sqrt(read_noise**2 + s) - cdef float weight_power = get_weight_power(s) - - # It's easy to use up a lot of dynamic range on something like - # (tbar - tbarmid) ** 10. Rescale these. - cdef float t_scale = (t_bar[end] - t_bar[start]) / 2 - t_scale = 1 if t_scale == 0 else t_scale - - cdef float f0 = 0, f1 = 0, f2 = 0 - - with cython.cpow(True): # Issue when tbar[] == tbarmid causes exception otherwise - for i in range(n_res): - # Casertano+22, Eq. 45 - wfi_weight[i] = ((((1 + weight_power) * nn[start + i]) / - (1 + weight_power * nn[start + i])) * - fabs((t_bar[start + i] - tbarmid) / t_scale) ** weight_power) - # Casertano+22 Eq. 35 - f0 += wfi_weight[i] - f1 += wfi_weight[i] * t_bar[start + i] - f2 += wfi_weight[i] * t_bar[start + i]**2 - # Casertano+22 Eq. 36 - cdef float det = f2 * f0 - f1 ** 2 - if det == 0: - return (0.0, 0.0, 0.0) - for i in range(n_res): - # Casertano+22 Eq. 37 - coeffs[i] = (f0 * t_bar[start + i] - f1) * wfi_weight[i] / det - for i in range(n_res): - # Casertano+22 Eq. 38 - slope += coeffs[i] * resultants[start + i] - # Casertano+22 Eq. 39 - slope_read_var += coeffs[i] ** 2 * read_noise ** 2 / nn[start + i] - # Casertano+22 Eq 40 - slope_poisson_var += coeffs[i] ** 2 * tau[start + i] - for j in range(i + 1, n_res): - slope_poisson_var += 2 * coeffs[i] * coeffs[j] * t_bar[start + i] - - return (slope, slope_read_var, slope_poisson_var) +from stcal.ramp_fitting.ols_cas22._fit_one_ramp cimport fit_one_ramp @cython.boundscheck(False) From 3a5961ec0e31a421a383976b09f754b9257a7ded Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 15 Aug 2023 15:02:10 -0400 Subject: [PATCH 04/90] Add statistic computer --- setup.py | 6 ++ .../ols_cas22/_jump_detection.pyx | 81 +++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx diff --git a/setup.py b/setup.py index 8b2beb43..0d8ed0b3 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,12 @@ include_dirs=[np.get_include()], extra_compile_args=['-std=c99'] ), + Extension( + 'stcal.ramp_fitting.ols_cas22._jump_detection', + ['src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx'], + include_dirs=[np.get_include()], + extra_compile_args=['-std=c99'] + ), Extension( 'stcal.ramp_fitting.ols_cas22._ols_cas22', ['src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx'], diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx new file mode 100644 index 00000000..c5bd673f --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -0,0 +1,81 @@ +import numpy as np +cimport numpy as np +from libc.math cimport sqrt +from libc.stdlib cimport malloc, free +cimport cython + +from stcal.ramp_fitting.ols_cas22._fit_one_ramp import fit_one_ramp + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef inline float correction_factor(int i, int j, float [:] t_bar): + """Compute the correction factor + + Parameters + ---------- + i : int + The index of the first read in the segment + j : int + The index of the last read in the segment + t_bar : float + """ + cdef float denom = t_bar[-1] - t_bar[0] + + if j - i == 1: + return (1 - (t_bar[i + 1] - t_bar[i]) / denom) ** 2 + else: + return (1 - 0.75 * (t_bar[i + 2] - t_bar[i]) / denom) ** 2 + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef inline float delta_var( + int i, int j, float [:] t_bar, float [:] tau, float [:] n_reads, float read_noise, float slope): + + return ( + ( + read_noise * (1 / n_reads[i] + 1 / n_reads[j]) + + slope * (tau[i] + tau[j] - np.min(t_bar[i], t_bar[j])) * correction_factor(i, j, t_bar) + ) / ((t_bar[j] - t_bar[i]) ** 2) + ) + + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef inline float stat( + int i, int j, float[:] resultants, float [:] t_bar, float [:] tau, + float [:] n_reads, float read_noise, float slope): + cdef float delta = ((resultants[j] - resultants[i]) / (t_bar[j] - t_bar[i])) - slope + + return delta / sqrt(delta_var(i, j, t_bar, tau, n_reads, read_noise, slope)) + + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef inline float *statistics( + float [:] resultants, float [:] t_bar, float [:] tau, float [:] n_reads, float read_noise, float slope + ): + cdef int n_stats = len(n_reads), i + + cdef float *stats = malloc(n_stats * sizeof(float)) + cdef float stat_1, stat_2 + + for i in range(n_stats): + stat_1 = stat(i, i + 1, resultants, t_bar, tau, n_reads, read_noise, slope) + stat_2 = stat(i, i + 2, resultants, t_bar, tau, n_reads, read_noise, slope) + + stats[i] = np.max(stat_1, stat_2) + + return stats + + +# @cython.boundscheck(False) +# @cython.wraparound(False) +# @cython.cdivision(True) +# cdef inline (float, float, float) jump_detection( +# float [:] resultants, int start, int end, float read_noise, +# float [:] t_bar, float [:] tau, int [:] n_reads): + From 0ae6afda23e5fb6f0d4bba03c063ac08200ac4ee Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 15 Aug 2023 16:48:27 -0400 Subject: [PATCH 05/90] Convert to nicer data types --- setup.py | 6 ++ src/stcal/ramp_fitting/ols_cas22/_core.pxd | 49 +++++++++++++ src/stcal/ramp_fitting/ols_cas22/_core.pyx | 51 ++++++++++++++ .../ramp_fitting/ols_cas22/_fit_one_ramp.pxd | 6 +- .../ramp_fitting/ols_cas22/_fit_one_ramp.pyx | 70 +++++++------------ .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 24 +++---- 6 files changed, 147 insertions(+), 59 deletions(-) create mode 100644 src/stcal/ramp_fitting/ols_cas22/_core.pxd create mode 100644 src/stcal/ramp_fitting/ols_cas22/_core.pyx diff --git a/setup.py b/setup.py index 0d8ed0b3..ccd9db1f 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,12 @@ Options.annotate = False extensions = [ + Extension( + 'stcal.ramp_fitting.ols_cas22._core', + ['src/stcal/ramp_fitting/ols_cas22/_core.pyx'], + include_dirs=[np.get_include()], + extra_compile_args=['-std=c99'] + ), Extension( 'stcal.ramp_fitting.ols_cas22._fit_one_ramp', ['src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx'], diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd new file mode 100644 index 00000000..6076b267 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -0,0 +1,49 @@ +cdef class Ramp: + # """ + # Class to contain the data for a single pixel ramp to be fit + + # This has to be a class rather than a struct in order to contain memory views + + # Parameters + # ---------- + # resultants : float [:] + # array of resultants for single pixel + # start : int + # starting point of portion to fit within this pixel + # end : int + # ending point of portion to fit within this pixel + # read_noise : float + # read noise for this pixel + # t_bar : float [:] + # mean times of resultants + # tau : float [:] + # variance weighted mean times of resultants + # n_reads : int [:] + # number of reads contributing to reach resultant + # """ + cdef public: + int start, end + float read_noise + float [:] resultants, t_bar, tau + int [:] n_reads + +cdef struct Fit: + # """ + # Output of a single fit + + # Parameters + # ---------- + # slope : float + # fit slope + # slope_read_var : float + # read noise induced variance in slope + # slope_poisson_var : float + # coefficient of Poisson-noise induced variance in slope + # multiply by true flux to get actual Poisson variance. + # """ + + float slope, slope_read_var, slope_poisson_var + +cdef Ramp make_ramp( + float [:] resultants, int start, int end, float read_noise, + float [:] t_bar, float [:] tau, int [:] n_reads) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx new file mode 100644 index 00000000..c6c81302 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -0,0 +1,51 @@ +from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit + + +cdef inline Ramp make_ramp( + float [:] resultants, int start, int end, float read_noise, + float [:] t_bar, float [:] tau, int [:] n_reads): + """ + Fast constructor for the Ramp C class. + + This is signifantly faster than using the `__init__` or `__cinit__` + this is because this does not have to pass through the Python as part + of the construction. + + Parameters + ---------- + resultants : float [:] + array of resultants for single pixel + start : int + starting point of portion to fit within this pixel + end : int + ending point of portion to fit within this pixel + read_noise : float + read noise for this pixel + t_bar : float [:] + mean times of resultants + tau : float [:] + variance weighted mean times of resultants + n_reads : int [:] + number of reads contributing to reach resultant + + Return + ------ + ramp : Ramp + Ramp C-class object + """ + + cdef Ramp ramp = Ramp() + + ramp.start = start + ramp.end = end + + ramp.resultants = resultants + ramp.t_bar = t_bar + ramp.tau = tau + + ramp.read_noise = read_noise + + ramp.n_reads = n_reads + + return ramp + diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd index 7757a9d3..346bec0f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd @@ -1,3 +1,3 @@ -cdef (float, float, float) fit_one_ramp( - float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] nn) \ No newline at end of file +from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit + +cdef Fit fit_one_ramp(Ramp ramp) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx index 92633376..b41aac86 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx @@ -5,6 +5,8 @@ cimport numpy as np cimport cython +from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit + # Casertano+2022, Table 2 cdef float[2][6] PTABLE = [ [-np.inf, 5, 10, 20, 50, 100], @@ -22,63 +24,43 @@ cdef inline float get_weight_power(float s): @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) -cdef inline (float, float, float) fit_one_ramp( - float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] n_reads): +cdef inline Fit fit_one_ramp(Ramp ramp): """Fit a portion of single ramp using the Casertano+22 algorithm. - Parameters ---------- - resultants : float [:] - array of resultants for single pixel - start : int - starting point of portion to fit within this pixel - end : int - ending point of portion to fit within this pixel - read_noise : float - read noise for this pixel - t_bar : float [:] - mean times of resultants - tau : float [:] - variance weighted mean times of resultants - n_reads : int [:] - number of reads contributing to reach resultant + ramp : Ramp + Ramp to fit. Returns ------- - slope : float - fit slope - slope_read_var : float - read noise induced variance in slope - slope_poisson_var : float - coefficient of Poisson-noise induced variance in slope - multiply by true flux to get actual Poisson variance. + fit : Fit + fit struct """ - cdef int n_resultants = end - start + 1 + cdef Fit fit = Fit(0, 0, 0) + cdef int n_resultants = ramp.end - ramp.start + 1 # Special case where there is no or one resultant, there is no fit. if n_resultants <= 1: - return 0, 0, 0 + return fit # Else, do the fitting. cdef int i = 0, j = 0 cdef float weights[2048] cdef float coeffs[2048] - cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 - cdef float t_bar_mid = (t_bar[start] + t_bar[end]) / 2 + cdef float t_bar_mid = (ramp.t_bar[ramp.start] + ramp.t_bar[ramp.end]) / 2 # Casertano+2022 Eq. 44 # Note we've departed from Casertano+22 slightly; # there s is just resultants[end]. But that doesn't seem good if, e.g., # a CR in the first resultant has boosted the whole ramp high but there # is no actual signal. - cdef float s = max(resultants[end] - resultants[start], 0) - s = s / sqrt(read_noise**2 + s) + cdef float s = max(ramp.resultants[ramp.end] - ramp.resultants[ramp.start], 0) + s = s / sqrt(ramp.read_noise**2 + s) cdef float power = get_weight_power(s) # It's easy to use up a lot of dynamic range on something like # (tbar - tbarmid) ** 10. Rescale these. - cdef float t_scale = (t_bar[end] - t_bar[start]) / 2 + cdef float t_scale = (ramp.t_bar[ramp.end] - ramp.t_bar[ramp.start]) / 2 t_scale = 1 if t_scale == 0 else t_scale cdef float f0 = 0, f1 = 0, f2 = 0 @@ -86,34 +68,34 @@ cdef inline (float, float, float) fit_one_ramp( with cython.cpow(True): # Issue when tbar[] == tbarmid causes exception otherwise for i in range(n_resultants): # Casertano+22, Eq. 45 - weights[i] = ((((1 + power) * n_reads[start + i]) / - (1 + power * n_reads[start + i])) * - fabs((t_bar[start + i] - t_bar_mid) / t_scale) ** power) + weights[i] = ((((1 + power) * ramp.n_reads[ramp.start + i]) / + (1 + power * ramp.n_reads[ramp.start + i])) * + fabs((ramp.t_bar[ramp.start + i] - t_bar_mid) / t_scale) ** power) # Casertano+22 Eq. 35 f0 += weights[i] - f1 += weights[i] * t_bar[start + i] - f2 += weights[i] * t_bar[start + i]**2 + f1 += weights[i] * ramp.t_bar[ramp.start + i] + f2 += weights[i] * ramp.t_bar[ramp.start + i]**2 # Casertano+22 Eq. 36 cdef float det = f2 * f0 - f1 ** 2 if det == 0: - return (0.0, 0.0, 0.0) + return fit for i in range(n_resultants): # Casertano+22 Eq. 37 - coeffs[i] = (f0 * t_bar[start + i] - f1) * weights[i] / det + coeffs[i] = (f0 * ramp.t_bar[ramp.start + i] - f1) * weights[i] / det for i in range(n_resultants): # Casertano+22 Eq. 38 - slope += coeffs[i] * resultants[start + i] + fit.slope += coeffs[i] * ramp.resultants[ramp.start + i] # Casertano+22 Eq. 39 - slope_read_var += coeffs[i] ** 2 * read_noise ** 2 / n_reads[start + i] + fit.slope_read_var += coeffs[i] ** 2 * ramp.read_noise ** 2 / ramp.n_reads[ramp.start + i] # Casertano+22 Eq 40 - slope_poisson_var += coeffs[i] ** 2 * tau[start + i] + fit.slope_poisson_var += coeffs[i] ** 2 * ramp.tau[ramp.start + i] for j in range(i + 1, n_resultants): - slope_poisson_var += 2 * coeffs[i] * coeffs[j] * t_bar[start + i] + fit.slope_poisson_var += 2 * coeffs[i] * coeffs[j] * ramp.t_bar[ramp.start + i] - return (slope, slope_read_var, slope_poisson_var) + return fit diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index 9c13f7c4..a736b3d6 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -3,6 +3,8 @@ cimport numpy as np cimport cython from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar + +from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit, make_ramp from stcal.ramp_fitting.ols_cas22._fit_one_ramp cimport fit_one_ramp @@ -94,19 +96,17 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # we should have just filled out the starting and stopping locations # of each ramp. - cdef float slope0, slopereadvar0, slopepoissonvar0 - cdef float [:, :] resview = resultants - cdef float [:] rnview = read_noise - cdef float [:] tbarview = tbar - cdef float [:] tauview = tau - cdef int [:] nnview = nn + cdef Ramp ramp + cdef Fit fit for i in range(nramp): - slope0, slopereadvar0, slopepoissonvar0 = fit_one_ramp( - resview[:, pix[i]], resstart[i], resend[i], rnview[pix[i]], - tbarview, tauview, nnview) - slope[i] = slope0 - slopereadvar[i] = slopereadvar0 - slopepoissonvar[i] = slopepoissonvar0 + ramp = make_ramp(resultants[:, pix[i]], resstart[i], resend[i], + read_noise[pix[i]], tbar, tau, nn) + + fit = fit_one_ramp(ramp) + + slope[i] = fit.slope + slopereadvar[i] = fit.slope_read_var + slopepoissonvar[i] = fit.slope_poisson_var return dict(slope=slope, slopereadvar=slopereadvar, slopepoissonvar=slopepoissonvar, From 62fe82be9b5a74eed35175ed7310eb79818602d9 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 15 Aug 2023 16:54:55 -0400 Subject: [PATCH 06/90] Remove output struct --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 38 ------------------- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 26 ++++++++++++- .../ramp_fitting/ols_cas22/_fit_one_ramp.pxd | 4 +- .../ramp_fitting/ols_cas22/_fit_one_ramp.pyx | 29 ++++++++------ .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 9 +---- 5 files changed, 46 insertions(+), 60 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 6076b267..409dcb86 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,48 +1,10 @@ cdef class Ramp: - # """ - # Class to contain the data for a single pixel ramp to be fit - - # This has to be a class rather than a struct in order to contain memory views - - # Parameters - # ---------- - # resultants : float [:] - # array of resultants for single pixel - # start : int - # starting point of portion to fit within this pixel - # end : int - # ending point of portion to fit within this pixel - # read_noise : float - # read noise for this pixel - # t_bar : float [:] - # mean times of resultants - # tau : float [:] - # variance weighted mean times of resultants - # n_reads : int [:] - # number of reads contributing to reach resultant - # """ cdef public: int start, end float read_noise float [:] resultants, t_bar, tau int [:] n_reads -cdef struct Fit: - # """ - # Output of a single fit - - # Parameters - # ---------- - # slope : float - # fit slope - # slope_read_var : float - # read noise induced variance in slope - # slope_poisson_var : float - # coefficient of Poisson-noise induced variance in slope - # multiply by true flux to get actual Poisson variance. - # """ - - float slope, slope_read_var, slope_poisson_var cdef Ramp make_ramp( float [:] resultants, int start, int end, float read_noise, diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index c6c81302..d6677839 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,4 +1,28 @@ -from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit +from stcal.ramp_fitting.ols_cas22._core cimport Ramp + +cdef class Ramp: + """ + Class to contain the data for a single pixel ramp to be fit + + This has to be a class rather than a struct in order to contain memory views + + Parameters + ---------- + resultants : float [:] + array of resultants for single pixel + start : int + starting point of portion to fit within this pixel + end : int + ending point of portion to fit within this pixel + read_noise : float + read noise for this pixel + t_bar : float [:] + mean times of resultants + tau : float [:] + variance weighted mean times of resultants + n_reads : int [:] + number of reads contributing to reach resultant + """ cdef inline Ramp make_ramp( diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd index 346bec0f..f397385c 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd @@ -1,3 +1,3 @@ -from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit +from stcal.ramp_fitting.ols_cas22._core cimport Ramp -cdef Fit fit_one_ramp(Ramp ramp) \ No newline at end of file +cdef (float, float, float) fit_one_ramp(Ramp ramp) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx index b41aac86..f1b4bf49 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx @@ -5,7 +5,7 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit +from stcal.ramp_fitting.ols_cas22._core cimport Ramp # Casertano+2022, Table 2 cdef float[2][6] PTABLE = [ @@ -24,7 +24,7 @@ cdef inline float get_weight_power(float s): @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) -cdef inline Fit fit_one_ramp(Ramp ramp): +cdef inline (float, float, float) fit_one_ramp(Ramp ramp): """Fit a portion of single ramp using the Casertano+22 algorithm. Parameters ---------- @@ -33,20 +33,25 @@ cdef inline Fit fit_one_ramp(Ramp ramp): Returns ------- - fit : Fit - fit struct + slope : float + fit slope + slope_read_var : float + read noise induced variance in slope + slope_poisson_var : float + coefficient of Poisson-noise induced variance in slope + multiply by true flux to get actual Poisson variance. """ - cdef Fit fit = Fit(0, 0, 0) cdef int n_resultants = ramp.end - ramp.start + 1 # Special case where there is no or one resultant, there is no fit. if n_resultants <= 1: - return fit + return 0, 0, 0 # Else, do the fitting. cdef int i = 0, j = 0 cdef float weights[2048] cdef float coeffs[2048] + cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 cdef float t_bar_mid = (ramp.t_bar[ramp.start] + ramp.t_bar[ramp.end]) / 2 # Casertano+2022 Eq. 44 @@ -80,7 +85,7 @@ cdef inline Fit fit_one_ramp(Ramp ramp): # Casertano+22 Eq. 36 cdef float det = f2 * f0 - f1 ** 2 if det == 0: - return fit + return (0.0, 0.0, 0.0) for i in range(n_resultants): # Casertano+22 Eq. 37 @@ -88,14 +93,14 @@ cdef inline Fit fit_one_ramp(Ramp ramp): for i in range(n_resultants): # Casertano+22 Eq. 38 - fit.slope += coeffs[i] * ramp.resultants[ramp.start + i] + slope += coeffs[i] * ramp.resultants[ramp.start + i] # Casertano+22 Eq. 39 - fit.slope_read_var += coeffs[i] ** 2 * ramp.read_noise ** 2 / ramp.n_reads[ramp.start + i] + slope_read_var += coeffs[i] ** 2 * ramp.read_noise ** 2 / ramp.n_reads[ramp.start + i] # Casertano+22 Eq 40 - fit.slope_poisson_var += coeffs[i] ** 2 * ramp.tau[ramp.start + i] + slope_poisson_var += coeffs[i] ** 2 * ramp.tau[ramp.start + i] for j in range(i + 1, n_resultants): - fit.slope_poisson_var += 2 * coeffs[i] * coeffs[j] * ramp.t_bar[ramp.start + i] + slope_poisson_var += 2 * coeffs[i] * coeffs[j] * ramp.t_bar[ramp.start + i] - return fit + return (slope, slope_read_var, slope_poisson_var) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index a736b3d6..094732fe 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -4,7 +4,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar -from stcal.ramp_fitting.ols_cas22._core cimport Ramp, Fit, make_ramp +from stcal.ramp_fitting.ols_cas22._core cimport Ramp, make_ramp from stcal.ramp_fitting.ols_cas22._fit_one_ramp cimport fit_one_ramp @@ -97,16 +97,11 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # of each ramp. cdef Ramp ramp - cdef Fit fit for i in range(nramp): ramp = make_ramp(resultants[:, pix[i]], resstart[i], resend[i], read_noise[pix[i]], tbar, tau, nn) - fit = fit_one_ramp(ramp) - - slope[i] = fit.slope - slopereadvar[i] = fit.slope_read_var - slopepoissonvar[i] = fit.slope_poisson_var + slope[i], slopereadvar[i], slopepoissonvar[i] = fit_one_ramp(ramp) return dict(slope=slope, slopereadvar=slopereadvar, slopepoissonvar=slopepoissonvar, From d21eaf15f5336d90c6b96dd97891626aec4d37a1 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 15 Aug 2023 19:49:02 -0400 Subject: [PATCH 07/90] Fold fit into ramp class object --- setup.py | 6 - src/stcal/ramp_fitting/ols_cas22/_core.pxd | 13 ++- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 104 ++++++++++++++++- .../ramp_fitting/ols_cas22/_fit_one_ramp.pxd | 3 - .../ramp_fitting/ols_cas22/_fit_one_ramp.pyx | 106 ------------------ .../ols_cas22/_jump_detection.pyx | 31 ++--- .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 15 ++- 7 files changed, 127 insertions(+), 151 deletions(-) delete mode 100644 src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd delete mode 100644 src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx diff --git a/setup.py b/setup.py index ccd9db1f..8b6c7665 100644 --- a/setup.py +++ b/setup.py @@ -13,12 +13,6 @@ include_dirs=[np.get_include()], extra_compile_args=['-std=c99'] ), - Extension( - 'stcal.ramp_fitting.ols_cas22._fit_one_ramp', - ['src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx'], - include_dirs=[np.get_include()], - extra_compile_args=['-std=c99'] - ), Extension( 'stcal.ramp_fitting.ols_cas22._jump_detection', ['src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx'], diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 409dcb86..c023a212 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,11 +1,12 @@ cdef class Ramp: - cdef public: - int start, end - float read_noise - float [:] resultants, t_bar, tau - int [:] n_reads + cdef public int start, end + cdef public float read_noise + cdef public float [:] resultants, t_bar, tau + cdef public int [:] n_reads + + cdef (float, float, float) fit(Ramp self) cdef Ramp make_ramp( float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] n_reads) \ No newline at end of file + float [:] t_bar, float [:] tau, int [:] n_reads) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index d6677839..fba24e5a 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,5 +1,23 @@ +from libc.math cimport sqrt, fabs +import numpy as np +cimport numpy as np +cimport cython + from stcal.ramp_fitting.ols_cas22._core cimport Ramp +# Casertano+2022, Table 2 +cdef float[2][6] PTABLE = [ + [-np.inf, 5, 10, 20, 50, 100], + [0, 0.4, 1, 3, 6, 10]] +cdef int PTABLE_LENGTH = 6 + +cdef inline float get_weight_power(float s): + cdef int i + for i in range(PTABLE_LENGTH): + if s < PTABLE[0][i]: + return PTABLE[1][i - 1] + return PTABLE[1][i] + cdef class Ramp: """ Class to contain the data for a single pixel ramp to be fit @@ -24,10 +42,91 @@ cdef class Ramp: number of reads contributing to reach resultant """ + cdef inline (float, float, float) fit(Ramp self): + """Fit a portion of single ramp using the Casertano+22 algorithm. + + Returns + ------- + slope : float + fit slope + slope_read_var : float + read noise induced variance in slope + slope_poisson_var : float + coefficient of Poisson-noise induced variance in slope + multiply by true flux to get actual Poisson variance. + """ + cdef int n_resultants = self.end - self.start + 1 + + # Special case where there is no or one resultant, there is no fit. + if n_resultants <= 1: + return 0, 0, 0 + + # Else, do the fitting. + cdef int i = 0, j = 0 + cdef float weights[2048] + cdef float coeffs[2048] + cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 + cdef float t_bar_mid = (self.t_bar[self.start] + self.t_bar[self.end]) / 2 + + # Casertano+2022 Eq. 44 + # Note we've departed from Casertano+22 slightly; + # there s is just resultants[end]. But that doesn't seem good if, e.g., + # a CR in the first resultant has boosted the whole ramp high but there + # is no actual signal. + cdef float s = max(self.resultants[self.end] - self.resultants[self.start], 0) + s = s / sqrt(self.read_noise**2 + s) + cdef float power = get_weight_power(s) + + # It's easy to use up a lot of dynamic range on something like + # (tbar - tbarmid) ** 10. Rescale these. + cdef float t_scale = (self.t_bar[self.end] - self.t_bar[self.start]) / 2 + t_scale = 1 if t_scale == 0 else t_scale + + cdef float f0 = 0, f1 = 0, f2 = 0 + + # Issue when tbar[] == tbarmid causes exception otherwise + with cython.cpow(True): + for i in range(n_resultants): + # Casertano+22, Eq. 45 + weights[i] = ((((1 + power) * self.n_reads[self.start + i]) / + (1 + power * self.n_reads[self.start + i])) * + fabs((self.t_bar[self.start + i] - t_bar_mid) / + t_scale) ** power) + + # Casertano+22 Eq. 35 + f0 += weights[i] + f1 += weights[i] * self.t_bar[self.start + i] + f2 += weights[i] * self.t_bar[self.start + i]**2 + + # Casertano+22 Eq. 36 + cdef float det = f2 * f0 - f1 ** 2 + if det == 0: + return (0.0, 0.0, 0.0) + + for i in range(n_resultants): + # Casertano+22 Eq. 37 + coeffs[i] = (f0 * self.t_bar[self.start + i] - f1) * weights[i] / det + + for i in range(n_resultants): + # Casertano+22 Eq. 38 + slope += coeffs[i] * self.resultants[self.start + i] + + # Casertano+22 Eq. 39 + slope_read_var += (coeffs[i] ** 2 * self.read_noise ** 2 / + self.n_reads[self.start + i]) + + # Casertano+22 Eq 40 + slope_poisson_var += coeffs[i] ** 2 * self.tau[self.start + i] + for j in range(i + 1, n_resultants): + slope_poisson_var += (2 * coeffs[i] * coeffs[j] * + self.t_bar[self.start + i]) + + return (slope, slope_read_var, slope_poisson_var) + cdef inline Ramp make_ramp( - float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] n_reads): + float [:] resultants, int start, int end, float read_noise, + float [:] t_bar, float [:] tau, int [:] n_reads): """ Fast constructor for the Ramp C class. @@ -72,4 +171,3 @@ cdef inline Ramp make_ramp( ramp.n_reads = n_reads return ramp - diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd deleted file mode 100644 index f397385c..00000000 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pxd +++ /dev/null @@ -1,3 +0,0 @@ -from stcal.ramp_fitting.ols_cas22._core cimport Ramp - -cdef (float, float, float) fit_one_ramp(Ramp ramp) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx deleted file mode 100644 index f1b4bf49..00000000 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_one_ramp.pyx +++ /dev/null @@ -1,106 +0,0 @@ - -from libc.math cimport sqrt, fabs -import numpy as np -cimport numpy as np - -cimport cython - -from stcal.ramp_fitting.ols_cas22._core cimport Ramp - -# Casertano+2022, Table 2 -cdef float[2][6] PTABLE = [ - [-np.inf, 5, 10, 20, 50, 100], - [0, 0.4, 1, 3, 6, 10]] -cdef int PTABLE_LENGTH = 6 - -cdef inline float get_weight_power(float s): - cdef int i - for i in range(PTABLE_LENGTH): - if s < PTABLE[0][i]: - return PTABLE[1][i - 1] - return PTABLE[1][i] - - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline (float, float, float) fit_one_ramp(Ramp ramp): - """Fit a portion of single ramp using the Casertano+22 algorithm. - Parameters - ---------- - ramp : Ramp - Ramp to fit. - - Returns - ------- - slope : float - fit slope - slope_read_var : float - read noise induced variance in slope - slope_poisson_var : float - coefficient of Poisson-noise induced variance in slope - multiply by true flux to get actual Poisson variance. - """ - cdef int n_resultants = ramp.end - ramp.start + 1 - - # Special case where there is no or one resultant, there is no fit. - if n_resultants <= 1: - return 0, 0, 0 - - # Else, do the fitting. - cdef int i = 0, j = 0 - cdef float weights[2048] - cdef float coeffs[2048] - cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 - cdef float t_bar_mid = (ramp.t_bar[ramp.start] + ramp.t_bar[ramp.end]) / 2 - - # Casertano+2022 Eq. 44 - # Note we've departed from Casertano+22 slightly; - # there s is just resultants[end]. But that doesn't seem good if, e.g., - # a CR in the first resultant has boosted the whole ramp high but there - # is no actual signal. - cdef float s = max(ramp.resultants[ramp.end] - ramp.resultants[ramp.start], 0) - s = s / sqrt(ramp.read_noise**2 + s) - cdef float power = get_weight_power(s) - - # It's easy to use up a lot of dynamic range on something like - # (tbar - tbarmid) ** 10. Rescale these. - cdef float t_scale = (ramp.t_bar[ramp.end] - ramp.t_bar[ramp.start]) / 2 - t_scale = 1 if t_scale == 0 else t_scale - - cdef float f0 = 0, f1 = 0, f2 = 0 - - with cython.cpow(True): # Issue when tbar[] == tbarmid causes exception otherwise - for i in range(n_resultants): - # Casertano+22, Eq. 45 - weights[i] = ((((1 + power) * ramp.n_reads[ramp.start + i]) / - (1 + power * ramp.n_reads[ramp.start + i])) * - fabs((ramp.t_bar[ramp.start + i] - t_bar_mid) / t_scale) ** power) - - # Casertano+22 Eq. 35 - f0 += weights[i] - f1 += weights[i] * ramp.t_bar[ramp.start + i] - f2 += weights[i] * ramp.t_bar[ramp.start + i]**2 - - # Casertano+22 Eq. 36 - cdef float det = f2 * f0 - f1 ** 2 - if det == 0: - return (0.0, 0.0, 0.0) - - for i in range(n_resultants): - # Casertano+22 Eq. 37 - coeffs[i] = (f0 * ramp.t_bar[ramp.start + i] - f1) * weights[i] / det - - for i in range(n_resultants): - # Casertano+22 Eq. 38 - slope += coeffs[i] * ramp.resultants[ramp.start + i] - - # Casertano+22 Eq. 39 - slope_read_var += coeffs[i] ** 2 * ramp.read_noise ** 2 / ramp.n_reads[ramp.start + i] - - # Casertano+22 Eq 40 - slope_poisson_var += coeffs[i] ** 2 * ramp.tau[ramp.start + i] - for j in range(i + 1, n_resultants): - slope_poisson_var += 2 * coeffs[i] * coeffs[j] * ramp.t_bar[ramp.start + i] - - return (slope, slope_read_var, slope_poisson_var) diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index c5bd673f..ea34fc51 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -1,10 +1,9 @@ import numpy as np cimport numpy as np from libc.math cimport sqrt -from libc.stdlib cimport malloc, free +from libc.stdlib cimport malloc cimport cython -from stcal.ramp_fitting.ols_cas22._fit_one_ramp import fit_one_ramp @cython.boundscheck(False) @cython.wraparound(False) @@ -18,7 +17,7 @@ cdef inline float correction_factor(int i, int j, float [:] t_bar): The index of the first read in the segment j : int The index of the last read in the segment - t_bar : float + t_bar : float """ cdef float denom = t_bar[-1] - t_bar[0] @@ -27,16 +26,19 @@ cdef inline float correction_factor(int i, int j, float [:] t_bar): else: return (1 - 0.75 * (t_bar[i + 2] - t_bar[i]) / denom) ** 2 + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef inline float delta_var( - int i, int j, float [:] t_bar, float [:] tau, float [:] n_reads, float read_noise, float slope): + int i, int j, float [:] t_bar, float [:] tau, + float [:] n_reads, float read_noise, float slope): return ( ( - read_noise * (1 / n_reads[i] + 1 / n_reads[j]) + - slope * (tau[i] + tau[j] - np.min(t_bar[i], t_bar[j])) * correction_factor(i, j, t_bar) + read_noise * (1 / n_reads[i] + 1 / n_reads[j]) + + slope * (tau[i] + tau[j] - np.min(t_bar[i], t_bar[j])) * + correction_factor(i, j, t_bar) ) / ((t_bar[j] - t_bar[i]) ** 2) ) @@ -45,8 +47,8 @@ cdef inline float delta_var( @cython.wraparound(False) @cython.cdivision(True) cdef inline float stat( - int i, int j, float[:] resultants, float [:] t_bar, float [:] tau, - float [:] n_reads, float read_noise, float slope): + int i, int j, float[:] resultants, float [:] t_bar, float [:] tau, + float [:] n_reads, float read_noise, float slope): cdef float delta = ((resultants[j] - resultants[i]) / (t_bar[j] - t_bar[i])) - slope return delta / sqrt(delta_var(i, j, t_bar, tau, n_reads, read_noise, slope)) @@ -56,8 +58,8 @@ cdef inline float stat( @cython.wraparound(False) @cython.cdivision(True) cdef inline float *statistics( - float [:] resultants, float [:] t_bar, float [:] tau, float [:] n_reads, float read_noise, float slope - ): + float [:] resultants, float [:] t_bar, float [:] tau, float [:] n_reads, + float read_noise, float slope): cdef int n_stats = len(n_reads), i cdef float *stats = malloc(n_stats * sizeof(float)) @@ -70,12 +72,3 @@ cdef inline float *statistics( stats[i] = np.max(stat_1, stat_2) return stats - - -# @cython.boundscheck(False) -# @cython.wraparound(False) -# @cython.cdivision(True) -# cdef inline (float, float, float) jump_detection( -# float [:] resultants, int start, int end, float read_noise, -# float [:] t_bar, float [:] tau, int [:] n_reads): - diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index 094732fe..40f94b1e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -4,8 +4,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar -from stcal.ramp_fitting.ols_cas22._core cimport Ramp, make_ramp -from stcal.ramp_fitting.ols_cas22._fit_one_ramp cimport fit_one_ramp +from stcal.ramp_fitting.ols_cas22._core cimport make_ramp @cython.boundscheck(False) @@ -55,7 +54,8 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, """ cdef int nresultant = len(ma_table) if nresultant != resultants.shape[0]: - raise RuntimeError(f'MA table length {nresultant} does not match number of resultants {resultants.shape[0]}') + raise RuntimeError(f'MA table length {nresultant} does not ' + f'match number of resultants {resultants.shape[0]}') cdef np.ndarray[int] nn = np.array([x[1] for x in ma_table]).astype('i4') # number of reads in each resultant @@ -96,12 +96,11 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # we should have just filled out the starting and stopping locations # of each ramp. - cdef Ramp ramp for i in range(nramp): - ramp = make_ramp(resultants[:, pix[i]], resstart[i], resend[i], - read_noise[pix[i]], tbar, tau, nn) - - slope[i], slopereadvar[i], slopepoissonvar[i] = fit_one_ramp(ramp) + slope[i], slopereadvar[i], slopepoissonvar[i] = make_ramp( + resultants[:, pix[i]], + resstart[i], resend[i], + read_noise[pix[i]], tbar, tau, nn).fit() return dict(slope=slope, slopereadvar=slopereadvar, slopepoissonvar=slopepoissonvar, From 1dc649cac0c86eb24fb671f37199eb3826473b07 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 15 Aug 2023 19:57:05 -0400 Subject: [PATCH 08/90] Refactor variable names for fit_ramps --- .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 81 ++++++++++--------- 1 file changed, 42 insertions(+), 39 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index 40f94b1e..8be4589e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -52,56 +52,59 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, resend : np.ndarray[nramp] The last resultant in this ramp. """ - cdef int nresultant = len(ma_table) - if nresultant != resultants.shape[0]: - raise RuntimeError(f'MA table length {nresultant} does not ' + cdef int n_resultants = len(ma_table) + if n_resultants != resultants.shape[0]: + raise RuntimeError(f'MA table length {n_resultants} does not ' f'match number of resultants {resultants.shape[0]}') - cdef np.ndarray[int] nn = np.array([x[1] for x in ma_table]).astype('i4') + cdef np.ndarray[int] n_reads = np.array([x[1] for x in ma_table]).astype('i4') # number of reads in each resultant - cdef np.ndarray[float] tbar = ma_table_to_tbar(ma_table, read_time).astype('f4') + + cdef np.ndarray[float] t_bar = ma_table_to_tbar(ma_table, read_time).astype('f4') cdef np.ndarray[float] tau = ma_table_to_tau(ma_table, read_time).astype('f4') - cdef int npixel = resultants.shape[1] - cdef int nramp = (np.sum(dq[0, :] == 0) + - np.sum((dq[:-1, :] != 0) & (dq[1:, :] == 0))) - cdef np.ndarray[float] slope = np.zeros(nramp, dtype='f4') - cdef np.ndarray[float] slopereadvar = np.zeros(nramp, dtype='f4') - cdef np.ndarray[float] slopepoissonvar = np.zeros(nramp, dtype='f4') - cdef np.ndarray[int] resstart = np.zeros(nramp, dtype='i4') - 1 - cdef np.ndarray[int] resend = np.zeros(nramp, dtype='i4') - 1 - cdef np.ndarray[int] pix = np.zeros(nramp, dtype='i4') - 1 + cdef int n_pixel = resultants.shape[1] + cdef int n_ramp = (np.sum(dq[0, :] == 0) + + np.sum((dq[:-1, :] != 0) & (dq[1:, :] == 0))) + + cdef np.ndarray[float] slope = np.zeros(n_ramp, dtype='f4') + cdef np.ndarray[float] slope_read_var = np.zeros(n_ramp, dtype='f4') + cdef np.ndarray[float] slope_poisson_var = np.zeros(n_ramp, dtype='f4') + + cdef np.ndarray[int] start = np.zeros(n_ramp, dtype='i4') - 1 + cdef np.ndarray[int] end = np.zeros(n_ramp, dtype='i4') - 1 + cdef np.ndarray[int] pix = np.zeros(n_ramp, dtype='i4') - 1 cdef int i, j - cdef int inramp = -1 - cdef int rampnum = 0 - for i in range(npixel): - inramp = 0 - for j in range(nresultant): - if (not inramp) and (dq[j, i] == 0): - inramp = 1 - pix[rampnum] = i - resstart[rampnum] = j - elif (not inramp) and (dq[j, i] != 0): + cdef int in_ramp = -1 + cdef int ramp_num = 0 + for i in range(n_pixel): + in_ramp = 0 + for j in range(n_resultants): + if (not in_ramp) and (dq[j, i] == 0): + in_ramp = 1 + pix[ramp_num] = i + start[ramp_num] = j + elif (not in_ramp) and (dq[j, i] != 0): continue - elif inramp and (dq[j, i] == 0): + elif in_ramp and (dq[j, i] == 0): continue - elif inramp and (dq[j, i] != 0): - inramp = 0 - resend[rampnum] = j - 1 - rampnum += 1 + elif in_ramp and (dq[j, i] != 0): + in_ramp = 0 + end[ramp_num] = j - 1 + ramp_num += 1 else: raise ValueError('unhandled case') - if inramp: - resend[rampnum] = j - rampnum += 1 + if in_ramp: + end[ramp_num] = j + ramp_num += 1 # we should have just filled out the starting and stopping locations # of each ramp. - for i in range(nramp): - slope[i], slopereadvar[i], slopepoissonvar[i] = make_ramp( + for i in range(n_ramp): + slope[i], slope_read_var[i], slope_poisson_var[i] = make_ramp( resultants[:, pix[i]], - resstart[i], resend[i], - read_noise[pix[i]], tbar, tau, nn).fit() + start[i], end[i], + read_noise[pix[i]], t_bar, tau, n_reads).fit() - return dict(slope=slope, slopereadvar=slopereadvar, - slopepoissonvar=slopepoissonvar, - pix=pix, resstart=resstart, resend=resend) + return dict(slope=slope, slopereadvar=slope_read_var, + slopepoissonvar=slope_poisson_var, + pix=pix, resstart=start, resend=end) From 99fabccbfe44d0aed24fab1a929d7b2e17a3aa0a Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 16 Aug 2023 14:44:24 -0400 Subject: [PATCH 09/90] Add vector support --- .gitignore | 1 + setup.py | 2 +- .../ramp_fitting/ols_cas22/_jump_detection.pyx | 17 +++++++++++------ 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 72d64360..5f3f3884 100644 --- a/.gitignore +++ b/.gitignore @@ -140,6 +140,7 @@ dmypy.json # Cython debug symbols cython_debug/ src/stcal/ramp_fitting/ols_cas22/*.c +src/stcal/ramp_fitting/ols_cas22/*.cpp # setuptools-scm generated module src/stcal/_version.py diff --git a/setup.py b/setup.py index 8b6c7665..66f2bc58 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ 'stcal.ramp_fitting.ols_cas22._jump_detection', ['src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx'], include_dirs=[np.get_include()], - extra_compile_args=['-std=c99'] + language='c++' ), Extension( 'stcal.ramp_fitting.ols_cas22._ols_cas22', diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index ea34fc51..16188943 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -1,7 +1,7 @@ import numpy as np cimport numpy as np -from libc.math cimport sqrt -from libc.stdlib cimport malloc +from libc.math cimport sqrt, log10 +from libcpp.vector cimport vector cimport cython @@ -57,18 +57,23 @@ cdef inline float stat( @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) -cdef inline float *statistics( +cdef inline float statistic( float [:] resultants, float [:] t_bar, float [:] tau, float [:] n_reads, float read_noise, float slope): cdef int n_stats = len(n_reads), i - cdef float *stats = malloc(n_stats * sizeof(float)) + cdef vector[float] stats = vector[float](n_stats) cdef float stat_1, stat_2 for i in range(n_stats): stat_1 = stat(i, i + 1, resultants, t_bar, tau, n_reads, read_noise, slope) stat_2 = stat(i, i + 2, resultants, t_bar, tau, n_reads, read_noise, slope) - stats[i] = np.max(stat_1, stat_2) + stats.insert(stats.begin() + i, max(stat_1, stat_2)) + + return max(stats) + + +cdef float threshold(float intercept, float constant, float slope): + return intercept - constant * log10(slope) - return stats From fa5a33c8823f37133c53cd62072a3437f40cda3b Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 16 Aug 2023 15:26:02 -0400 Subject: [PATCH 10/90] More vectors --- setup.py | 2 +- .../ols_cas22/_jump_detection.pyx | 3 +- .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 74 +++++++++++-------- 3 files changed, 45 insertions(+), 34 deletions(-) diff --git a/setup.py b/setup.py index 66f2bc58..6c021239 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ 'stcal.ramp_fitting.ols_cas22._ols_cas22', ['src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx'], include_dirs=[np.get_include()], - extra_compile_args=['-std=c99'] + language='c++' ), ] diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index 16188943..5b8283cd 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -69,11 +69,10 @@ cdef inline float statistic( stat_1 = stat(i, i + 1, resultants, t_bar, tau, n_reads, read_noise, slope) stat_2 = stat(i, i + 2, resultants, t_bar, tau, n_reads, read_noise, slope) - stats.insert(stats.begin() + i, max(stat_1, stat_2)) + stats[i] = max(stat_1, stat_2) return max(stats) cdef float threshold(float intercept, float constant, float slope): return intercept - constant * log10(slope) - diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index 8be4589e..d88fde94 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -1,5 +1,6 @@ import numpy as np cimport numpy as np +from libcpp.vector cimport vector cimport cython from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar @@ -7,6 +8,42 @@ from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar from stcal.ramp_fitting.ols_cas22._core cimport make_ramp +cdef inline (vector[int], vector[int], vector[int]) end_points(int n_ramp, + int n_pixel, + int n_resultants, + int[:, :] dq): + + cdef vector[int] start = vector[int](n_ramp, -1) + cdef vector[int] end = vector[int](n_ramp, -1) + cdef vector[int] pix = vector[int](n_ramp, -1) + + cdef int i, j + cdef int in_ramp = -1 + cdef int ramp_num = 0 + for i in range(n_pixel): + in_ramp = 0 + for j in range(n_resultants): + if (not in_ramp) and (dq[j, i] == 0): + in_ramp = 1 + pix[ramp_num] = i + start[ramp_num] = j + elif (not in_ramp) and (dq[j, i] != 0): + continue + elif in_ramp and (dq[j, i] == 0): + continue + elif in_ramp and (dq[j, i] != 0): + in_ramp = 0 + end[ramp_num] = j - 1 + ramp_num += 1 + else: + raise ValueError('unhandled case') + if in_ramp: + end[ramp_num] = j + ramp_num += 1 + + return start, end, pix + + @cython.boundscheck(False) @cython.wraparound(False) def fit_ramps(np.ndarray[float, ndim=2] resultants, @@ -66,38 +103,13 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef int n_ramp = (np.sum(dq[0, :] == 0) + np.sum((dq[:-1, :] != 0) & (dq[1:, :] == 0))) - cdef np.ndarray[float] slope = np.zeros(n_ramp, dtype='f4') - cdef np.ndarray[float] slope_read_var = np.zeros(n_ramp, dtype='f4') - cdef np.ndarray[float] slope_poisson_var = np.zeros(n_ramp, dtype='f4') + # numpy arrays so that we get numpy arrays out + cdef np.ndarray[float] slope = np.zeros(n_ramp, dtype=np.float32) + cdef np.ndarray[float] slope_read_var = np.zeros(n_ramp, dtype=np.float32) + cdef np.ndarray[float] slope_poisson_var = np.zeros(n_ramp, dtype=np.float32) - cdef np.ndarray[int] start = np.zeros(n_ramp, dtype='i4') - 1 - cdef np.ndarray[int] end = np.zeros(n_ramp, dtype='i4') - 1 - cdef np.ndarray[int] pix = np.zeros(n_ramp, dtype='i4') - 1 - cdef int i, j - cdef int in_ramp = -1 - cdef int ramp_num = 0 - for i in range(n_pixel): - in_ramp = 0 - for j in range(n_resultants): - if (not in_ramp) and (dq[j, i] == 0): - in_ramp = 1 - pix[ramp_num] = i - start[ramp_num] = j - elif (not in_ramp) and (dq[j, i] != 0): - continue - elif in_ramp and (dq[j, i] == 0): - continue - elif in_ramp and (dq[j, i] != 0): - in_ramp = 0 - end[ramp_num] = j - 1 - ramp_num += 1 - else: - raise ValueError('unhandled case') - if in_ramp: - end[ramp_num] = j - ramp_num += 1 - # we should have just filled out the starting and stopping locations - # of each ramp. + cdef vector[int] start, end, pix + start, end, pix = end_points(n_ramp, n_pixel, n_resultants, dq) for i in range(n_ramp): slope[i], slope_read_var[i], slope_poisson_var[i] = make_ramp( From abe53f916f2830fb30445e5fd37c5fcf1242099e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 23 Aug 2023 11:08:34 -0400 Subject: [PATCH 11/90] Move entirely to the fit being in cython, and dynamically vectorized --- setup.py | 2 +- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 8 ++++-- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 21 ++++++++------ .../ramp_fitting/ols_cas22/_ols_cas22.pyx | 28 +++++++++++++++---- 4 files changed, 40 insertions(+), 19 deletions(-) diff --git a/setup.py b/setup.py index 6c021239..008781a5 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ 'stcal.ramp_fitting.ols_cas22._core', ['src/stcal/ramp_fitting/ols_cas22/_core.pyx'], include_dirs=[np.get_include()], - extra_compile_args=['-std=c99'] + language='c++' ), Extension( 'stcal.ramp_fitting.ols_cas22._jump_detection', diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index c023a212..2d2cecc3 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,12 +1,14 @@ +from libcpp.vector cimport vector cdef class Ramp: cdef public int start, end cdef public float read_noise - cdef public float [:] resultants, t_bar, tau - cdef public int [:] n_reads + cdef public float [:] resultants, + cdef public vector[float] t_bar, tau + cdef public vector[int] n_reads cdef (float, float, float) fit(Ramp self) cdef Ramp make_ramp( float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] n_reads) + vector[float] t_bar, vector[float] tau, vector[int] n_reads) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index fba24e5a..5abe4833 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,4 +1,5 @@ from libc.math cimport sqrt, fabs +from libcpp.vector cimport vector import numpy as np cimport numpy as np cimport cython @@ -28,17 +29,18 @@ cdef class Ramp: ---------- resultants : float [:] array of resultants for single pixel + - memoryview of a numpy array to avoid passing through Python start : int starting point of portion to fit within this pixel end : int ending point of portion to fit within this pixel read_noise : float read noise for this pixel - t_bar : float [:] + t_bar : vector[float] mean times of resultants - tau : float [:] + tau : vector[float] variance weighted mean times of resultants - n_reads : int [:] + n_reads : vector[int] number of reads contributing to reach resultant """ @@ -63,8 +65,8 @@ cdef class Ramp: # Else, do the fitting. cdef int i = 0, j = 0 - cdef float weights[2048] - cdef float coeffs[2048] + cdef vector[float] weights = vector[float](n_resultants) + cdef vector[float] coeffs = vector[float](n_resultants) cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 cdef float t_bar_mid = (self.t_bar[self.start] + self.t_bar[self.end]) / 2 @@ -126,7 +128,7 @@ cdef class Ramp: cdef inline Ramp make_ramp( float [:] resultants, int start, int end, float read_noise, - float [:] t_bar, float [:] tau, int [:] n_reads): + vector[float] t_bar, vector[float] tau, vector[int] n_reads): """ Fast constructor for the Ramp C class. @@ -138,17 +140,18 @@ cdef inline Ramp make_ramp( ---------- resultants : float [:] array of resultants for single pixel + - memoryview of a numpy array to avoid passing through Python start : int starting point of portion to fit within this pixel end : int ending point of portion to fit within this pixel read_noise : float read noise for this pixel - t_bar : float [:] + t_bar : vector[float] mean times of resultants - tau : float [:] + tau : vector[float] variance weighted mean times of resultants - n_reads : int [:] + n_reads : vector[int] number of reads contributing to reach resultant Return diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx index d88fde94..01941f6a 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx @@ -3,11 +3,28 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22_util import ma_table_to_tau, ma_table_to_tbar - from stcal.ramp_fitting.ols_cas22._core cimport make_ramp +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline (vector[int], vector[float], vector[float]) read_ma_table(list[list[int]] ma_table, float read_time): + + cdef vector[int] n_reads = vector[int](len(ma_table)) + cdef vector[float] t_bar = vector[float](len(ma_table)) + cdef vector[float] tau = vector[float](len(ma_table)) + + for index, entry in enumerate(ma_table): + n_reads[index] = entry[1] + t_bar[index] = read_time *(entry[0] + (entry[1] - 1) / 2.0) + tau[index] = t_bar[index] - (entry[1] - 1) * (entry[1] + 1) * read_time / (6 * entry[1]) + + return n_reads, t_bar, tau + + + +@cython.boundscheck(False) +@cython.wraparound(False) cdef inline (vector[int], vector[int], vector[int]) end_points(int n_ramp, int n_pixel, int n_resultants, @@ -94,11 +111,10 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, raise RuntimeError(f'MA table length {n_resultants} does not ' f'match number of resultants {resultants.shape[0]}') - cdef np.ndarray[int] n_reads = np.array([x[1] for x in ma_table]).astype('i4') - # number of reads in each resultant + cdef vector[int] n_reads + cdef vector[float] t_bar, tau + n_reads, t_bar, tau = read_ma_table(ma_table, read_time) - cdef np.ndarray[float] t_bar = ma_table_to_tbar(ma_table, read_time).astype('f4') - cdef np.ndarray[float] tau = ma_table_to_tau(ma_table, read_time).astype('f4') cdef int n_pixel = resultants.shape[1] cdef int n_ramp = (np.sum(dq[0, :] == 0) + np.sum((dq[:-1, :] != 0) & (dq[1:, :] == 0))) From 5d139f24ddcb77b170cbd626c6206ffd6c886bd0 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 23 Aug 2023 11:11:47 -0400 Subject: [PATCH 12/90] Rename main fit file --- setup.py | 4 ++-- src/stcal/ramp_fitting/ols_cas22/__init__.py | 2 +- .../ramp_fitting/ols_cas22/{_ols_cas22.pyx => _fit_ramps.pyx} | 0 3 files changed, 3 insertions(+), 3 deletions(-) rename src/stcal/ramp_fitting/ols_cas22/{_ols_cas22.pyx => _fit_ramps.pyx} (100%) diff --git a/setup.py b/setup.py index 008781a5..93f40466 100644 --- a/setup.py +++ b/setup.py @@ -20,8 +20,8 @@ language='c++' ), Extension( - 'stcal.ramp_fitting.ols_cas22._ols_cas22', - ['src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx'], + 'stcal.ramp_fitting.ols_cas22._fit_ramps', + ['src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx'], include_dirs=[np.get_include()], language='c++' ), diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py index 427965a9..a5d0f6f8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/__init__.py +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -1,3 +1,3 @@ -from ._ols_cas22 import fit_ramps +from ._fit_ramps import fit_ramps __all__ = ['fit_ramps'] diff --git a/src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx similarity index 100% rename from src/stcal/ramp_fitting/ols_cas22/_ols_cas22.pyx rename to src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx From aabbe4e9d02944901b27610d553c91ffc55caf4b Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 23 Aug 2023 11:28:08 -0400 Subject: [PATCH 13/90] Convert jump into an object --- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 3 + .../ols_cas22/_jump_detection.pyx | 131 +++++++++--------- 2 files changed, 67 insertions(+), 67 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 5abe4833..58998bcc 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -44,6 +44,9 @@ cdef class Ramp: number of reads contributing to reach resultant """ + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) cdef inline (float, float, float) fit(Ramp self): """Fit a portion of single ramp using the Casertano+22 algorithm. diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index 5b8283cd..031a51b9 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -4,75 +4,72 @@ from libc.math cimport sqrt, log10 from libcpp.vector cimport vector cimport cython - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline float correction_factor(int i, int j, float [:] t_bar): - """Compute the correction factor - - Parameters - ---------- - i : int - The index of the first read in the segment - j : int - The index of the last read in the segment - t_bar : float - """ - cdef float denom = t_bar[-1] - t_bar[0] - - if j - i == 1: - return (1 - (t_bar[i + 1] - t_bar[i]) / denom) ** 2 - else: - return (1 - 0.75 * (t_bar[i + 2] - t_bar[i]) / denom) ** 2 - - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline float delta_var( - int i, int j, float [:] t_bar, float [:] tau, - float [:] n_reads, float read_noise, float slope): - - return ( - ( - read_noise * (1 / n_reads[i] + 1 / n_reads[j]) + - slope * (tau[i] + tau[j] - np.min(t_bar[i], t_bar[j])) * - correction_factor(i, j, t_bar) - ) / ((t_bar[j] - t_bar[i]) ** 2) - ) - - -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline float stat( - int i, int j, float[:] resultants, float [:] t_bar, float [:] tau, - float [:] n_reads, float read_noise, float slope): - cdef float delta = ((resultants[j] - resultants[i]) / (t_bar[j] - t_bar[i])) - slope - - return delta / sqrt(delta_var(i, j, t_bar, tau, n_reads, read_noise, slope)) +from stcal.ramp_fitting.ols_cas22._core cimport Ramp -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef inline float statistic( - float [:] resultants, float [:] t_bar, float [:] tau, float [:] n_reads, - float read_noise, float slope): - cdef int n_stats = len(n_reads), i - - cdef vector[float] stats = vector[float](n_stats) - cdef float stat_1, stat_2 - - for i in range(n_stats): - stat_1 = stat(i, i + 1, resultants, t_bar, tau, n_reads, read_noise, slope) - stat_2 = stat(i, i + 2, resultants, t_bar, tau, n_reads, read_noise, slope) - - stats[i] = max(stat_1, stat_2) +cdef float threshold(float intercept, float constant, float slope): + return intercept - constant * log10(slope) - return max(stats) +cdef class Jump(Ramp): + """ + Class to contain the data for a single ramp fit with jump detection + """ -cdef float threshold(float intercept, float constant, float slope): - return intercept - constant * log10(slope) + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float correction_factor(Jump self, int i, int j): + """Compute the correction factor + + Parameters + ---------- + i : int + The index of the first read in the segment + j : int + The index of the last read in the segment + """ + cdef float denom = self.t_bar[-1] - self.t_bar[0] + + if j - i == 1: + return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 + else: + return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float delta_var(Jump self, int i, int j, float slope): + + return ( + ( + self.read_noise * (1 / self.n_reads[i] + 1 / self.n_reads[j]) + + slope * (self.tau[i] + self.tau[j] - np.min(self.t_bar[i], self.t_bar[j])) * + self.correction_factor(i, j) + ) / ((self.t_bar[j] - self.t_bar[i]) ** 2) + ) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float stat(Jump self, int i, int j, float slope): + cdef float delta = ((self.resultants[j] - self.resultants[i]) / (self.t_bar[j] - self.t_bar[i])) - slope + + return delta / sqrt(self.delta_var(i, j, slope)) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float statistic(Jump self, float slope): + cdef int n_stats = len(self.n_reads), i + + cdef vector[float] stats = vector[float](n_stats) + cdef float stat_1, stat_2 + + for i in range(n_stats): + stat_1 = self.stat(i, i + 1, slope) + stat_2 = self.stat(i, i + 2, slope) + + stats[i] = max(stat_1, stat_2) + + return max(stats) From 76eef893e786c77438341d4a0a7b3f2ac603e310 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 23 Aug 2023 12:46:59 -0400 Subject: [PATCH 14/90] Initial full jump algorithm --- .../ols_cas22/_jump_detection.pyx | 132 +++++++++++++++++- 1 file changed, 126 insertions(+), 6 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index 031a51b9..8c10bcca 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -7,10 +7,6 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport Ramp -cdef float threshold(float intercept, float constant, float slope): - return intercept - constant * log10(slope) - - cdef class Jump(Ramp): """ Class to contain the data for a single ramp fit with jump detection @@ -60,7 +56,7 @@ cdef class Jump(Ramp): @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline float statistic(Jump self, float slope): + cdef inline vector[float] statistic(Jump self, float slope): cdef int n_stats = len(self.n_reads), i cdef vector[float] stats = vector[float](n_stats) @@ -72,4 +68,128 @@ cdef class Jump(Ramp): stats[i] = max(stat_1, stat_2) - return max(stats) + return stats + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline (float, float, float, vector[float]) jump(Jump self): + cdef float slope, read_var, poisson_var + slope, read_var, poisson_var = self.fit() + + cdef vector[float] stats = self.statistic(slope) + + return slope, read_var, poisson_var, stats + + # @cython.boundscheck(False) + # @cython.wraparound(False) + # @cython.cdivision(True) + # cdef inline (Jump, Jump) split(Jump self, int split): + # cdef Jump jump_1 = make_jump( + # self.resultants, self.start, self.start + split, self.read_noise, + # self.t_bar, self.tau, self.n_reads) + + # cdef Jump jump_2 = make_jump( + # self.resultants, self.start + split + 2, self.end, self.read_noise, + # self.t_bar, self.tau, self.n_reads) + + # return jump_1, jump_2 + + + + +cdef float threshold(float intercept, float constant, float slope): + return intercept - constant * log10(slope) + + +cdef inline Jump make_jump( + float [:] resultants, int start, int end, float read_noise, + vector[float] t_bar, vector[float] tau, vector[int] n_reads): + + """ + Fast constructor for the Jump C class. + + This is signifantly faster than using the `__init__` or `__cinit__` + this is because this does not have to pass through the Python as part + of the construction. + + Parameters + ---------- + resultants : float [:] + array of resultants for single pixel + - memoryview of a numpy array to avoid passing through Python + start : int + starting point of portion to fit within this pixel + end : int + ending point of portion to fit within this pixel + read_noise : float + read noise for this pixel + t_bar : vector[float] + mean times of resultants + tau : vector[float] + variance weighted mean times of resultants + n_reads : vector[int] + number of reads contributing to reach resultant + + Return + ------ + jump : Jump + Jump C-class object + """ + + cdef Jump jump = Jump() + + jump.start = start + jump.end = end + + jump.resultants = resultants + jump.t_bar = t_bar + jump.tau = tau + + jump.read_noise = read_noise + + jump.n_reads = n_reads + + return jump + + +cdef (vector[float], vector[float], vector[float]) fit( + float [:] resultants, int start, int end, float read_noise, + vector[float] t_bar, vector[float] tau, vector[int] n_reads, + float intercept, float constant): + + cdef vector[float] slopes + cdef vector[float] read_vars + cdef vector[float] poisson_vars + + cdef Jump jump, jump_1, jump_2 + cdef int split + cdef vector[float] stats + cdef float slope, read_var, poisson_var + + cdef list[Jump] jumps = [make_jump(resultants, start, end, read_noise, t_bar, tau, n_reads)] + while jumps: + jump = jumps.pop() + slope, read_var, poisson_var, stats = jump.jump() + + if max(stats) > threshold(intercept, constant, slope): + split = np.argmax(stats) + + jump_1 = make_jump( + jump.resultants, jump.start, jump.start + split, jump.read_noise, + jump.t_bar, jump.tau, jump.n_reads) + + jump_2 = make_jump( + jump.resultants, jump.start + split + 2, jump.end, jump.read_noise, + jump.t_bar, jump.tau, jump.n_reads) + + jumps.append(jump_1) + jumps.append(jump_2) + + else: + slopes.push_back(slope) + read_vars.push_back(read_var) + poisson_vars.push_back(poisson_var) + + + return stats, read_vars, poisson_vars From 52cc6ab2d5f1d1e46adcc63c5b94b34259fe04c7 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 23 Aug 2023 15:48:57 -0400 Subject: [PATCH 15/90] Create a fixed parameter object --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 43 +++++ src/stcal/ramp_fitting/ols_cas22/_core.pyx | 203 +++++++++++++++++++++ 2 files changed, 246 insertions(+) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 2d2cecc3..f8eb2791 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,4 +1,47 @@ +import numpy as np +cimport numpy as np from libcpp.vector cimport vector +from libcpp cimport bool + +cdef class Fixed: + # Fixed parameters for all pixels inpu t + cdef public bool use_jump + cdef public float read_noise + cdef public float[:] t_bar, tau + cdef public int[:] n_reads + + # Computed and cached values for jump detection + # single -> j = i + 1 + # double -> j = i + 2 + + # single and double differences of t_bar + # t_bar[j] - t_bar[i] + cdef public float[:] t_bar_1, t_bar_2 + + # squared single and double differences of t_bar + # (t_bar[j] - t_bar[i])**2 + cdef public float[:] t_bar_1_sq, t_bar_2_sq + + # single and double sigma values + # read_noise * ((1/n_reads[i]) + (1/n_reads[j])) + cdef public float[:] sigma_1, sigma_2 + + # single and double slope var terms + # (tau[i] + tau[j] - min(t_bar[i], t_bar[j])) * correction(i, j) + cdef public float[:] slope_var_1, slope_var_2 + + cdef float[:] t_bar_diff(Fixed self, int offset) + cdef float[:] t_bar_diff_sq(Fixed self, int offset) + cdef float[:] sigma_val(Fixed self, int offset) + cdef float[:] slope_var_val(Fixed self, int offset) + + cdef float correction(Fixed self, int i, int j) + + +cdef Fixed make_fixed( + float read_noise, float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump) + + cdef class Ramp: cdef public int start, end cdef public float read_noise diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 58998bcc..cd1af573 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,11 +1,214 @@ from libc.math cimport sqrt, fabs from libcpp.vector cimport vector +from libcpp cimport bool import numpy as np cimport numpy as np cimport cython from stcal.ramp_fitting.ols_cas22._core cimport Ramp + +cdef class Fixed: + """ + Class to contain the data fixed for all pixels and commonly referenced + universal values for jump detection + + Parameters + ---------- + read_noise : float + read noise for this pixel + t_bar : vector[float] + mean times of resultants + tau : vector[float] + variance weighted mean times of resultants + n_reads : vector[int] + number of reads contributing to reach resultant + + t_bar_1 : vector[float] + single differences of t_bar (t_bar[i+1] - t_bar[i]) + t_bar_1_sq : vector[float] + squared single differences of t_bar (t_bar[i+1] - t_bar[i])**2 + t_bar_2 : vector[float] + double differences of t_bar (t_bar[i+2] - t_bar[i]) + t_bar_2_sq: vector[float] + squared double differences of t_bar (t_bar[i+2] - t_bar[i])**2 + sigma_1 : vector[float] + single of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + sigma_2 : vector[float] + double of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + slope_var_1 : vector[float] + single of slope variance term + ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1) + slope_var_2 : vector[float] + double of slope variance term + ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2) + """ + + cdef inline float[:] t_bar_diff(Fixed self, int offset): + """ + Compute the difference offset of t_bar + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] + + return diff + + cdef inline float[:] t_bar_diff_sq(Fixed self, int offset): + """ + Compute the square difference offset of t_bar + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] ** 2 + + return diff + + cdef inline float[:] sigma_val(Fixed self, int offset): + """ + Compute the sigma values + read_noise * (1/n_reads[i+offset] + 1/n_reads[i]) + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + + # cdef float[:] sig = self.read_noise * ( + # (1 / np.roll(self.n_reads, -offset) + 1 / np.array(self.n_reads))[:n_diff]).astype(float) + + cdef float[:] sig = (1 / np.roll(self.n_reads, -offset)).astype(np.float32) + + return sig + + cdef inline float correction(Fixed self, int i, int j): + """Compute the correction factor + + Parameters + ---------- + i : int + The index of the first read in the segment + j : int + The index of the last read in the segment + """ + cdef float denom = self.t_bar[self.n_reads[i] - 1] - self.t_bar[0] + + if j - i == 1: + return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 + else: + return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 + + cdef inline float[:] slope_var_val(Fixed self, int offset): + """ + Compute the sigma values + (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * + correction(i, i+offset) + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + + cdef float[:] slope_var_val = ( + (self.tau + np.roll(self.tau, -offset) - + np.minimum(self.t_bar, np.roll(self.t_bar, -offset))) * + self.correction(0, offset))[:n_diff] + + return slope_var_val + + +cdef inline Fixed make_fixed( + float read_noise, float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): + + cdef Fixed fixed = Fixed() + + fixed.use_jump = use_jump + fixed.read_noise = read_noise + fixed.t_bar = t_bar + fixed.tau = tau + fixed.n_reads = n_reads + + if use_jump: + fixed.t_bar_1 = fixed.t_bar_diff(1) + fixed.t_bar_2 = fixed.t_bar_diff(2) + + fixed.t_bar_1_sq = fixed.t_bar_diff_sq(1) + fixed.t_bar_2_sq = fixed.t_bar_diff_sq(2) + + fixed.sigma_1 = fixed.sigma_val(1) + fixed.sigma_2 = fixed.sigma_val(2) + + fixed.slope_var_1 = fixed.slope_var_val(1) + fixed.slope_var_2 = fixed.slope_var_val(2) + + return fixed + + + # cdef inline vector[float] t_bar_diff_sq(Fixed self, int offset): + # """ + # Compute the square difference offset of t_bar + + # Parameters + # ---------- + # offset : int + # index offset to compute difference + # """ + # cdef int n_diff = len(self.t_bar) - offset + # cdef vector[float] diff = vector[float](n_diff) + + # for i in range(n_diff): + # diff[i] = (self.t_bar[i + offset] - self.t_bar[i])**2 + + # return diff + + # cdef inline vector[float] sigma(Fixed, self, int offset): + # """ + # Compute + # read_noise * (1/n_reads[i+offset] + 1/n_reads[i]) + + # Parameters + # ---------- + # offset : int + # index offset to compute difference + # """ + # cdef int n_diff = len(self.t_bar) - offset + # cdef vector[float] sig = vector[float](n_diff) + + # for i in range(n_diff): + # sig[i] = read_noise * (1 / self.n_reads[i + offset] + 1 / self.n_reads[i]) + + # return sig + + # cdef inline vector[float] slope_var(Fixed, self, int offset): + # """ + # Compute + # read_noise * (1/n_reads[i+offset] + 1/n_reads[i]) + + # Parameters + # ---------- + # offset : int + # index offset to compute difference + # """ + # cdef int n_diff = len(self.t_bar) - offset + # cdef vector[float] sig = vector[float](n_diff) + + # for i in range(n_diff): + # sig[i] = read_noise * (1 / self.n_reads[i + offset] + 1 / self.n_reads[i]) + + # return sig + # Casertano+2022, Table 2 cdef float[2][6] PTABLE = [ [-np.inf, 5, 10, 20, 50, 100], From 62a3b440f56ed28cb70917a4ff050530aaccc37b Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 11:14:41 -0400 Subject: [PATCH 16/90] Begin process of moving computations to outer most loops --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 39 ++-- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 193 ++++++------------ .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 15 +- .../ols_cas22/_jump_detection.pyx | 2 +- 4 files changed, 102 insertions(+), 147 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index f8eb2791..68f227c0 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -4,9 +4,8 @@ from libcpp.vector cimport vector from libcpp cimport bool cdef class Fixed: - # Fixed parameters for all pixels inpu t + # Fixed parameters for all pixels input cdef public bool use_jump - cdef public float read_noise cdef public float[:] t_bar, tau cdef public int[:] n_reads @@ -22,9 +21,9 @@ cdef class Fixed: # (t_bar[j] - t_bar[i])**2 cdef public float[:] t_bar_1_sq, t_bar_2_sq - # single and double sigma values - # read_noise * ((1/n_reads[i]) + (1/n_reads[j])) - cdef public float[:] sigma_1, sigma_2 + # single and double reciprical sum values + # ((1/n_reads[i]) + (1/n_reads[j])) + cdef public float[:] recip_1, recip_2 # single and double slope var terms # (tau[i] + tau[j] - min(t_bar[i], t_bar[j])) * correction(i, j) @@ -32,26 +31,34 @@ cdef class Fixed: cdef float[:] t_bar_diff(Fixed self, int offset) cdef float[:] t_bar_diff_sq(Fixed self, int offset) - cdef float[:] sigma_val(Fixed self, int offset) + cdef float[:] recip_val(Fixed self, int offset) cdef float[:] slope_var_val(Fixed self, int offset) cdef float correction(Fixed self, int i, int j) -cdef Fixed make_fixed( - float read_noise, float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump) +cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump) cdef class Ramp: - cdef public int start, end + cdef Fixed fixed cdef public float read_noise - cdef public float [:] resultants, - cdef public vector[float] t_bar, tau - cdef public vector[int] n_reads + cdef public float [:] resultants + + # Computed and cached values for jump detection + # single -> j = i + 1 + # double -> j = i + 2 + + # single and double differences of resultants + # resultants[j] - resultants[i] + cdef public float[:] resultants_1, resultants_2 + + # single and double sigma terms + # read_noise * recip[i] + cdef public float[:] sigma_1, sigma_2 - cdef (float, float, float) fit(Ramp self) + cdef float[:] resultants_diff(Ramp self, int offset) + cdef (float, float, float) fit(Ramp self, int start, int end) -cdef Ramp make_ramp( - float [:] resultants, int start, int end, float read_noise, - vector[float] t_bar, vector[float] tau, vector[int] n_reads) +cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index cd1af573..7fd0be2b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -15,8 +15,6 @@ cdef class Fixed: Parameters ---------- - read_noise : float - read noise for this pixel t_bar : vector[float] mean times of resultants tau : vector[float] @@ -72,10 +70,10 @@ cdef class Fixed: return diff - cdef inline float[:] sigma_val(Fixed self, int offset): + cdef inline float[:] recip_val(Fixed self, int offset): """ - Compute the sigma values - read_noise * (1/n_reads[i+offset] + 1/n_reads[i]) + Compute the recip values + (1/n_reads[i+offset] + 1/n_reads[i]) Parameters ---------- @@ -83,13 +81,11 @@ cdef class Fixed: index offset to compute difference """ cdef int n_diff = len(self.t_bar) - offset - - # cdef float[:] sig = self.read_noise * ( - # (1 / np.roll(self.n_reads, -offset) + 1 / np.array(self.n_reads))[:n_diff]).astype(float) - cdef float[:] sig = (1 / np.roll(self.n_reads, -offset)).astype(np.float32) + cdef float[:] recip = ((1 / np.roll(self.n_reads, -offset)).astype(np.float32) + + (1 / np.array(self.n_reads)).astype(np.float32))[:n_diff] - return sig + return recip cdef inline float correction(Fixed self, int i, int j): """Compute the correction factor @@ -129,13 +125,11 @@ cdef class Fixed: return slope_var_val -cdef inline Fixed make_fixed( - float read_noise, float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): +cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): cdef Fixed fixed = Fixed() fixed.use_jump = use_jump - fixed.read_noise = read_noise fixed.t_bar = t_bar fixed.tau = tau fixed.n_reads = n_reads @@ -147,8 +141,8 @@ cdef inline Fixed make_fixed( fixed.t_bar_1_sq = fixed.t_bar_diff_sq(1) fixed.t_bar_2_sq = fixed.t_bar_diff_sq(2) - fixed.sigma_1 = fixed.sigma_val(1) - fixed.sigma_2 = fixed.sigma_val(2) + fixed.recip_1 = fixed.recip_val(1) + fixed.recip_2 = fixed.recip_val(2) fixed.slope_var_1 = fixed.slope_var_val(1) fixed.slope_var_2 = fixed.slope_var_val(2) @@ -156,59 +150,6 @@ cdef inline Fixed make_fixed( return fixed - # cdef inline vector[float] t_bar_diff_sq(Fixed self, int offset): - # """ - # Compute the square difference offset of t_bar - - # Parameters - # ---------- - # offset : int - # index offset to compute difference - # """ - # cdef int n_diff = len(self.t_bar) - offset - # cdef vector[float] diff = vector[float](n_diff) - - # for i in range(n_diff): - # diff[i] = (self.t_bar[i + offset] - self.t_bar[i])**2 - - # return diff - - # cdef inline vector[float] sigma(Fixed, self, int offset): - # """ - # Compute - # read_noise * (1/n_reads[i+offset] + 1/n_reads[i]) - - # Parameters - # ---------- - # offset : int - # index offset to compute difference - # """ - # cdef int n_diff = len(self.t_bar) - offset - # cdef vector[float] sig = vector[float](n_diff) - - # for i in range(n_diff): - # sig[i] = read_noise * (1 / self.n_reads[i + offset] + 1 / self.n_reads[i]) - - # return sig - - # cdef inline vector[float] slope_var(Fixed, self, int offset): - # """ - # Compute - # read_noise * (1/n_reads[i+offset] + 1/n_reads[i]) - - # Parameters - # ---------- - # offset : int - # index offset to compute difference - # """ - # cdef int n_diff = len(self.t_bar) - offset - # cdef vector[float] sig = vector[float](n_diff) - - # for i in range(n_diff): - # sig[i] = read_noise * (1 / self.n_reads[i + offset] + 1 / self.n_reads[i]) - - # return sig - # Casertano+2022, Table 2 cdef float[2][6] PTABLE = [ [-np.inf, 5, 10, 20, 50, 100], @@ -233,61 +174,79 @@ cdef class Ramp: resultants : float [:] array of resultants for single pixel - memoryview of a numpy array to avoid passing through Python - start : int - starting point of portion to fit within this pixel - end : int - ending point of portion to fit within this pixel - read_noise : float - read noise for this pixel - t_bar : vector[float] - mean times of resultants - tau : vector[float] - variance weighted mean times of resultants - n_reads : vector[int] - number of reads contributing to reach resultant """ + @cython.boundscheck(False) + @cython.wraparound(False) + cdef inline float[:] resultants_diff(Ramp self, int offset): + """ + Compute the difference offset of resultants + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.resultants) - offset + cdef float[:] diff = (np.roll(self.resultants, -offset) - self.t_bar)[:n_diff] + + return diff + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline (float, float, float) fit(Ramp self): + cdef inline (float, float, float) fit(Ramp self, int start, int end): """Fit a portion of single ramp using the Casertano+22 algorithm. + Parameters + ---------- + start : int + Start of range to fit ramp + end : int + End of range to fit ramp + fixed : Fixed + Fixed values for all pixels Returns ------- slope : float fit slope - slope_read_var : float + read_var : float read noise induced variance in slope - slope_poisson_var : float + poisson_var : float coefficient of Poisson-noise induced variance in slope multiply by true flux to get actual Poisson variance. """ - cdef int n_resultants = self.end - self.start + 1 + cdef int n_resultants = end - start + 1 # Special case where there is no or one resultant, there is no fit. if n_resultants <= 1: return 0, 0, 0 + # Setup data for fitting (work over subset of data) + cdef float[:] resultants = self.fixed.resultants[start:end + 1] + cdef float[:] t_bar = self.fixed.t_bar[start:end + 1] + cdef float[:] tau = self.fixed.tau[start:end + 1] + cdef int[:] n_reads = self.fixed.n_reads[start:end + 1] + # Else, do the fitting. cdef int i = 0, j = 0 cdef vector[float] weights = vector[float](n_resultants) cdef vector[float] coeffs = vector[float](n_resultants) - cdef float slope = 0, slope_read_var = 0, slope_poisson_var = 0 - cdef float t_bar_mid = (self.t_bar[self.start] + self.t_bar[self.end]) / 2 + cdef float slope = 0, read_var = 0, poisson_var = 0 + cdef float t_bar_mid = (t_bar[0] + t_bar[- 1]) / 2 # Casertano+2022 Eq. 44 # Note we've departed from Casertano+22 slightly; # there s is just resultants[end]. But that doesn't seem good if, e.g., # a CR in the first resultant has boosted the whole ramp high but there # is no actual signal. - cdef float s = max(self.resultants[self.end] - self.resultants[self.start], 0) - s = s / sqrt(self.read_noise**2 + s) + cdef float s = max(resultants[-1] - resultants[0], 0) + s = s / sqrt(self.fixed.read_noise**2 + s) cdef float power = get_weight_power(s) # It's easy to use up a lot of dynamic range on something like # (tbar - tbarmid) ** 10. Rescale these. - cdef float t_scale = (self.t_bar[self.end] - self.t_bar[self.start]) / 2 + cdef float t_scale = (t_bar[-1] - t_bar[0]) / 2 t_scale = 1 if t_scale == 0 else t_scale cdef float f0 = 0, f1 = 0, f2 = 0 @@ -296,15 +255,13 @@ cdef class Ramp: with cython.cpow(True): for i in range(n_resultants): # Casertano+22, Eq. 45 - weights[i] = ((((1 + power) * self.n_reads[self.start + i]) / - (1 + power * self.n_reads[self.start + i])) * - fabs((self.t_bar[self.start + i] - t_bar_mid) / - t_scale) ** power) + weights[i] = ((((1 + power) * n_reads[i]) / (1 + power * n_reads[i])) * + fabs((t_bar[i] - t_bar_mid) / t_scale) ** power) # Casertano+22 Eq. 35 f0 += weights[i] - f1 += weights[i] * self.t_bar[self.start + i] - f2 += weights[i] * self.t_bar[self.start + i]**2 + f1 += weights[i] * t_bar[i] + f2 += weights[i] * t_bar[i]**2 # Casertano+22 Eq. 36 cdef float det = f2 * f0 - f1 ** 2 @@ -313,28 +270,24 @@ cdef class Ramp: for i in range(n_resultants): # Casertano+22 Eq. 37 - coeffs[i] = (f0 * self.t_bar[self.start + i] - f1) * weights[i] / det + coeffs[i] = (f0 * t_bar[i] - f1) * weights[i] / det for i in range(n_resultants): # Casertano+22 Eq. 38 - slope += coeffs[i] * self.resultants[self.start + i] + slope += coeffs[i] * resultants[i] # Casertano+22 Eq. 39 - slope_read_var += (coeffs[i] ** 2 * self.read_noise ** 2 / - self.n_reads[self.start + i]) + read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / n_reads[i]) # Casertano+22 Eq 40 - slope_poisson_var += coeffs[i] ** 2 * self.tau[self.start + i] + poisson_var += coeffs[i] ** 2 * tau[i] for j in range(i + 1, n_resultants): - slope_poisson_var += (2 * coeffs[i] * coeffs[j] * - self.t_bar[self.start + i]) + poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) - return (slope, slope_read_var, slope_poisson_var) + return (slope, read_var, poisson_var) -cdef inline Ramp make_ramp( - float [:] resultants, int start, int end, float read_noise, - vector[float] t_bar, vector[float] tau, vector[int] n_reads): +cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): """ Fast constructor for the Ramp C class. @@ -344,21 +297,11 @@ cdef inline Ramp make_ramp( Parameters ---------- + fixed : Fixed + Fixed values for all pixels resultants : float [:] array of resultants for single pixel - memoryview of a numpy array to avoid passing through Python - start : int - starting point of portion to fit within this pixel - end : int - ending point of portion to fit within this pixel - read_noise : float - read noise for this pixel - t_bar : vector[float] - mean times of resultants - tau : vector[float] - variance weighted mean times of resultants - n_reads : vector[int] - number of reads contributing to reach resultant Return ------ @@ -368,15 +311,15 @@ cdef inline Ramp make_ramp( cdef Ramp ramp = Ramp() - ramp.start = start - ramp.end = end - + ramp.fixed = fixed + ramp.read_noise = read_noise ramp.resultants = resultants - ramp.t_bar = t_bar - ramp.tau = tau - ramp.read_noise = read_noise + if fixed.use_jump: + ramp.resultants_1 = ramp.resultants_diff(1) + ramp.resultants_2 = ramp.resultants_diff(2) - ramp.n_reads = n_reads + ramp.sigma_1 = read_noise * np.array(fixed.recip_1) + ramp.sigma_2 = read_noise * np.array(fixed.recip_2) return ramp diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 01941f6a..f17bb9f7 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,7 +3,7 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport make_ramp +from stcal.ramp_fitting.ols_cas22._core cimport make_ramp, make_fixed, Fixed @cython.boundscheck(False) @@ -66,7 +66,8 @@ cdef inline (vector[int], vector[int], vector[int]) end_points(int n_ramp, def fit_ramps(np.ndarray[float, ndim=2] resultants, np.ndarray[int, ndim=2] dq, np.ndarray[float, ndim=1] read_noise, read_time, - ma_table): + ma_table, + int use_jumps=False): """Fit ramps using the Casertano+22 algorithm. This implementation fits all ramp segments between bad pixels @@ -115,6 +116,12 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef vector[float] t_bar, tau n_reads, t_bar, tau = read_ma_table(ma_table, read_time) + cdef Fixed fixed = make_fixed( + t_bar.data(), + tau.data(), + n_reads.data(), + use_jumps) + cdef int n_pixel = resultants.shape[1] cdef int n_ramp = (np.sum(dq[0, :] == 0) + np.sum((dq[:-1, :] != 0) & (dq[1:, :] == 0))) @@ -129,9 +136,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, for i in range(n_ramp): slope[i], slope_read_var[i], slope_poisson_var[i] = make_ramp( - resultants[:, pix[i]], - start[i], end[i], - read_noise[pix[i]], t_bar, tau, n_reads).fit() + fixed, read_noise[pix[i]], resultants[:, pix[i]]).fit(start[i], end[i]) return dict(slope=slope, slopereadvar=slope_read_var, slopepoissonvar=slope_poisson_var, diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index 8c10bcca..3d423ea0 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -75,7 +75,7 @@ cdef class Jump(Ramp): @cython.cdivision(True) cdef inline (float, float, float, vector[float]) jump(Jump self): cdef float slope, read_var, poisson_var - slope, read_var, poisson_var = self.fit() + # slope, read_var, poisson_var = self.fit() cdef vector[float] stats = self.statistic(slope) From 2a39a830423e89fd46a57e3c061730752e157850 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 12:36:23 -0400 Subject: [PATCH 17/90] Combine jump and ramp --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 26 +- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 60 +++- .../ols_cas22/_jump_detection.pyx | 304 +++++++----------- 3 files changed, 190 insertions(+), 200 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 68f227c0..4e6c6466 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,8 +1,25 @@ import numpy as np cimport numpy as np from libcpp.vector cimport vector +from libcpp.stack cimport stack from libcpp cimport bool + +cdef struct RampIndex: + int start + int end + + +cdef struct Thresh: + float intercept + float constant + + +cdef struct Fit: + float slope + float read_var + float poisson_var + cdef class Fixed: # Fixed parameters for all pixels input cdef public bool use_jump @@ -49,9 +66,9 @@ cdef class Ramp: # single -> j = i + 1 # double -> j = i + 2 - # single and double differences of resultants - # resultants[j] - resultants[i] - cdef public float[:] resultants_1, resultants_2 + # single and double delta + slope + # (resultants[j] - resultants[i]/(t_bar[j] - t_bar[i]) + cdef public float[:] delta_1, delta_2 # single and double sigma terms # read_noise * recip[i] @@ -60,5 +77,8 @@ cdef class Ramp: cdef float[:] resultants_diff(Ramp self, int offset) cdef (float, float, float) fit(Ramp self, int start, int end) + cdef float[:] stats(Ramp self, float slope, int start, int end) + cdef (stack[float], stack[float], stack[float]) fits(Ramp self, stack[RampIndex] ramps, Thresh thresh) + cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 7fd0be2b..19e4fd38 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,11 +1,12 @@ -from libc.math cimport sqrt, fabs +from libc.math cimport sqrt, fabs, log10 from libcpp.vector cimport vector +from libcpp.stack cimport stack from libcpp cimport bool import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport Ramp +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fixed, Ramp cdef class Fixed: @@ -286,6 +287,57 @@ cdef class Ramp: return (slope, read_var, poisson_var) + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float[:] stats(Ramp self, float slope, int start, int end): + cdef np.ndarray[float] delta_1 = np.array(self.delta_1[start:end-1]) - slope + cdef np.ndarray[float] delta_2 = np.array(self.delta_2[start:end-1]) - slope + + cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[start:end-1]) + + slope * np.array(self.slope_var_1[start:end-1])) / + self.fixed.t_bar_1_sq[start:end-1]).astype(np.float32) + cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[start:end-1]) + + slope * np.array(self.slope_var_2[start:end-1])) / + self.fixed.t_bar_2_sq[start:end-1]).astype(np.float32) + + cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) + cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) + + return np.maximum(stats_1, stats_2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline (stack[float], stack[float], stack[float]) fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): + cdef stack[float] slopes, read_vars, poisson_vars + cdef RampIndex ramp + cdef float slope = 0, read_var = 0, poisson_var = 0 + cdef float [:] stats + cdef int split + + while not ramps.empty(): + ramp = ramps.top() + ramps.pop() + slope, read_var, poisson_var = self.fit(ramp.start, ramp.end) + stats = self.stats(slope, ramp.start, ramp.end) + + if max(stats) > threshold(thresh, slope): + split = np.argmax(stats) + + ramps.push(RampIndex(ramp.start, ramp.start + split)) + ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) + else: + slopes.push(slope) + read_vars.push(read_var) + poisson_vars.push(poisson_var) + + return slopes, read_vars, poisson_vars + + +cdef float threshold(Thresh thresh, float slope): + return thresh.intercept - thresh.constant * log10(slope) + cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): """ @@ -316,8 +368,8 @@ cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): ramp.resultants = resultants if fixed.use_jump: - ramp.resultants_1 = ramp.resultants_diff(1) - ramp.resultants_2 = ramp.resultants_diff(2) + ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / np.array(fixed.t_bar_1)).astype(np.float32) + ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / np.array(fixed.t_bar_2)).astype(np.float32) ramp.sigma_1 = read_noise * np.array(fixed.recip_1) ramp.sigma_2 = read_noise * np.array(fixed.recip_2) diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx index 3d423ea0..7aff64e6 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx @@ -1,195 +1,113 @@ -import numpy as np -cimport numpy as np -from libc.math cimport sqrt, log10 -from libcpp.vector cimport vector -cimport cython +# import numpy as np +# cimport numpy as np +# from libc.math cimport sqrt, log10 +# from libcpp.vector cimport vector +# from libcpp.stack cimport stack +# cimport cython + +# from stcal.ramp_fitting.ols_cas22._core cimport Ramp + + +# cdef struct RampIndex: +# int start +# int end + + +# cdef struct Thresh: +# float intercept +# float constant + + +# cdef class Jump(Ramp): + +# """ +# Class to contain the data for a single ramp fit with jump detection +# """ +# @cython.boundscheck(False) +# @cython.wraparound(False) +# @cython.cdivision(True) +# cdef inline float[:] stats(Jump self, float slope, int start, int end): +# cdef np.ndarray[float] delta_1 = np.array(self.delta_1[start:end-1]) - slope +# cdef np.ndarray[float] delta_2 = np.array(self.delta_2[start:end-1]) - slope + +# cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[start:end-1]) + +# slope * np.array(self.slope_var_1[start:end-1])) / +# self.fixed.t_bar_1_sq[start:end-1]).astype(np.float32) +# cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[start:end-1]) + +# slope * np.array(self.slope_var_2[start:end-1])) / +# self.fixed.t_bar_2_sq[start:end-1]).astype(np.float32) + +# cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) +# cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) + +# return np.maximum(stats_1, stats_2) + +# @cython.boundscheck(False) +# @cython.wraparound(False) +# @cython.cdivision(True) +# cdef inline (stack[float], stack[float], stack[float]) fits(Jump self, stack[RampIndex] ramps, Thresh thresh): +# cdef stack[float] slopes, read_vars, poisson_vars +# cdef RampIndex ramp +# cdef float slope = 0, read_var = 0, poisson_var = 0 +# cdef float [:] stats +# cdef int split + +# while not ramps.empty(): +# ramp = ramps.top() +# ramps.pop() +# slope, read_var, poisson_var = self.fit(ramp.start, ramp.end) +# stats = self.stats(slope, ramp.start, ramp.end) + +# if max(stats) > threshold(thresh, slope): +# split = np.argmax(stats) + +# ramps.push(RampIndex(ramp.start, ramp.start + split)) +# ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) +# else: +# slopes.push(slope) +# read_vars.push(read_var) +# poisson_vars.push(poisson_var) + +# return slopes, read_vars, poisson_vars + + +# cdef float threshold(Thresh thresh, float slope): +# return thresh.intercept - thresh.constant * log10(slope) + + +# # cdef inline Jump make_ramp(Fixed fixed, float read_noise, float [:] resultants): +# # """ +# # Fast constructor for the Jump C class. + +# # This is signifantly faster than using the `__init__` or `__cinit__` +# # this is because this does not have to pass through the Python as part +# # of the construction. + +# # Parameters +# # ---------- +# # fixed : Fixed +# # Fixed values for all pixels +# # resultants : float [:] +# # array of resultants for single pixel +# # - memoryview of a numpy array to avoid passing through Python + +# # Return +# # ------ +# # ramp : Jump +# # Jump C-class object +# # """ + +# # cdef Jump jump = Jump() + +# # jump.start = start +# # jump.end = end -from stcal.ramp_fitting.ols_cas22._core cimport Ramp +# # jump.resultants = resultants +# # jump.t_bar = t_bar +# # jump.tau = tau - -cdef class Jump(Ramp): - """ - Class to contain the data for a single ramp fit with jump detection - """ - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline float correction_factor(Jump self, int i, int j): - """Compute the correction factor - - Parameters - ---------- - i : int - The index of the first read in the segment - j : int - The index of the last read in the segment - """ - cdef float denom = self.t_bar[-1] - self.t_bar[0] - - if j - i == 1: - return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 - else: - return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline float delta_var(Jump self, int i, int j, float slope): - - return ( - ( - self.read_noise * (1 / self.n_reads[i] + 1 / self.n_reads[j]) + - slope * (self.tau[i] + self.tau[j] - np.min(self.t_bar[i], self.t_bar[j])) * - self.correction_factor(i, j) - ) / ((self.t_bar[j] - self.t_bar[i]) ** 2) - ) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline float stat(Jump self, int i, int j, float slope): - cdef float delta = ((self.resultants[j] - self.resultants[i]) / (self.t_bar[j] - self.t_bar[i])) - slope - - return delta / sqrt(self.delta_var(i, j, slope)) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline vector[float] statistic(Jump self, float slope): - cdef int n_stats = len(self.n_reads), i - - cdef vector[float] stats = vector[float](n_stats) - cdef float stat_1, stat_2 - - for i in range(n_stats): - stat_1 = self.stat(i, i + 1, slope) - stat_2 = self.stat(i, i + 2, slope) - - stats[i] = max(stat_1, stat_2) - - return stats - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline (float, float, float, vector[float]) jump(Jump self): - cdef float slope, read_var, poisson_var - # slope, read_var, poisson_var = self.fit() - - cdef vector[float] stats = self.statistic(slope) - - return slope, read_var, poisson_var, stats - - # @cython.boundscheck(False) - # @cython.wraparound(False) - # @cython.cdivision(True) - # cdef inline (Jump, Jump) split(Jump self, int split): - # cdef Jump jump_1 = make_jump( - # self.resultants, self.start, self.start + split, self.read_noise, - # self.t_bar, self.tau, self.n_reads) - - # cdef Jump jump_2 = make_jump( - # self.resultants, self.start + split + 2, self.end, self.read_noise, - # self.t_bar, self.tau, self.n_reads) - - # return jump_1, jump_2 - - - - -cdef float threshold(float intercept, float constant, float slope): - return intercept - constant * log10(slope) - - -cdef inline Jump make_jump( - float [:] resultants, int start, int end, float read_noise, - vector[float] t_bar, vector[float] tau, vector[int] n_reads): - - """ - Fast constructor for the Jump C class. - - This is signifantly faster than using the `__init__` or `__cinit__` - this is because this does not have to pass through the Python as part - of the construction. - - Parameters - ---------- - resultants : float [:] - array of resultants for single pixel - - memoryview of a numpy array to avoid passing through Python - start : int - starting point of portion to fit within this pixel - end : int - ending point of portion to fit within this pixel - read_noise : float - read noise for this pixel - t_bar : vector[float] - mean times of resultants - tau : vector[float] - variance weighted mean times of resultants - n_reads : vector[int] - number of reads contributing to reach resultant - - Return - ------ - jump : Jump - Jump C-class object - """ - - cdef Jump jump = Jump() - - jump.start = start - jump.end = end - - jump.resultants = resultants - jump.t_bar = t_bar - jump.tau = tau - - jump.read_noise = read_noise - - jump.n_reads = n_reads - - return jump - - -cdef (vector[float], vector[float], vector[float]) fit( - float [:] resultants, int start, int end, float read_noise, - vector[float] t_bar, vector[float] tau, vector[int] n_reads, - float intercept, float constant): - - cdef vector[float] slopes - cdef vector[float] read_vars - cdef vector[float] poisson_vars - - cdef Jump jump, jump_1, jump_2 - cdef int split - cdef vector[float] stats - cdef float slope, read_var, poisson_var - - cdef list[Jump] jumps = [make_jump(resultants, start, end, read_noise, t_bar, tau, n_reads)] - while jumps: - jump = jumps.pop() - slope, read_var, poisson_var, stats = jump.jump() - - if max(stats) > threshold(intercept, constant, slope): - split = np.argmax(stats) - - jump_1 = make_jump( - jump.resultants, jump.start, jump.start + split, jump.read_noise, - jump.t_bar, jump.tau, jump.n_reads) - - jump_2 = make_jump( - jump.resultants, jump.start + split + 2, jump.end, jump.read_noise, - jump.t_bar, jump.tau, jump.n_reads) - - jumps.append(jump_1) - jumps.append(jump_2) - - else: - slopes.push_back(slope) - read_vars.push_back(read_var) - poisson_vars.push_back(poisson_var) - - - return stats, read_vars, poisson_vars +# # jump.read_noise = read_noise + +# # jump.n_reads = n_reads + +# # return jump From cb3b14a0970a1cb93f1d82b7fb02e6c158c3bc42 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 12:50:29 -0400 Subject: [PATCH 18/90] Swap to passing structs --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 12 ++- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 78 +++++++++---------- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 11 ++- 3 files changed, 56 insertions(+), 45 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 4e6c6466..2e2a7313 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -20,6 +20,12 @@ cdef struct Fit: float read_var float poisson_var + +cdef struct Fits: + vector[float] slope + vector[float] read_var + vector[float] poisson_var + cdef class Fixed: # Fixed parameters for all pixels input cdef public bool use_jump @@ -75,10 +81,10 @@ cdef class Ramp: cdef public float[:] sigma_1, sigma_2 cdef float[:] resultants_diff(Ramp self, int offset) - cdef (float, float, float) fit(Ramp self, int start, int end) + cdef Fit fit(Ramp self, RampIndex ramp) - cdef float[:] stats(Ramp self, float slope, int start, int end) - cdef (stack[float], stack[float], stack[float]) fits(Ramp self, stack[RampIndex] ramps, Thresh thresh) + cdef float[:] stats(Ramp self, float slope, RampIndex ramp) + cdef Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh) cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 19e4fd38..d72b5273 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -6,7 +6,7 @@ import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fixed, Ramp +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fixed, Ramp, Fits cdef class Fixed: @@ -196,16 +196,10 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline (float, float, float) fit(Ramp self, int start, int end): + cdef inline Fit fit(Ramp self, RampIndex ramp): """Fit a portion of single ramp using the Casertano+22 algorithm. Parameters ---------- - start : int - Start of range to fit ramp - end : int - End of range to fit ramp - fixed : Fixed - Fixed values for all pixels Returns ------- @@ -217,28 +211,29 @@ cdef class Ramp: coefficient of Poisson-noise induced variance in slope multiply by true flux to get actual Poisson variance. """ - cdef int n_resultants = end - start + 1 + cdef int n_resultants = ramp.end - ramp.start + 1 # Special case where there is no or one resultant, there is no fit. if n_resultants <= 1: return 0, 0, 0 # Setup data for fitting (work over subset of data) - cdef float[:] resultants = self.fixed.resultants[start:end + 1] - cdef float[:] t_bar = self.fixed.t_bar[start:end + 1] - cdef float[:] tau = self.fixed.tau[start:end + 1] - cdef int[:] n_reads = self.fixed.n_reads[start:end + 1] + cdef float[:] resultants = self.fixed.resultants[ramp.start:ramp.end + 1] + cdef float[:] t_bar = self.fixed.t_bar[ramp.start:ramp.end + 1] + cdef float[:] tau = self.fixed.tau[ramp.start:ramp.end + 1] + cdef int[:] n_reads = self.fixed.n_reads[ramp.start:ramp.end + 1] # Else, do the fitting. cdef int i = 0, j = 0 cdef vector[float] weights = vector[float](n_resultants) cdef vector[float] coeffs = vector[float](n_resultants) - cdef float slope = 0, read_var = 0, poisson_var = 0 + cdef Fit fit = Fit(0, 0, 0) + cdef float t_bar_mid = (t_bar[0] + t_bar[- 1]) / 2 # Casertano+2022 Eq. 44 # Note we've departed from Casertano+22 slightly; - # there s is just resultants[end]. But that doesn't seem good if, e.g., + # there s is just resultants[ramp.end]. But that doesn't seem good if, e.g., # a CR in the first resultant has boosted the whole ramp high but there # is no actual signal. cdef float s = max(resultants[-1] - resultants[0], 0) @@ -275,31 +270,31 @@ cdef class Ramp: for i in range(n_resultants): # Casertano+22 Eq. 38 - slope += coeffs[i] * resultants[i] + fit.slope += coeffs[i] * resultants[i] # Casertano+22 Eq. 39 - read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / n_reads[i]) + fit.read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / n_reads[i]) # Casertano+22 Eq 40 - poisson_var += coeffs[i] ** 2 * tau[i] + fit.poisson_var += coeffs[i] ** 2 * tau[i] for j in range(i + 1, n_resultants): - poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) + fit.poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) - return (slope, read_var, poisson_var) + return fit @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline float[:] stats(Ramp self, float slope, int start, int end): - cdef np.ndarray[float] delta_1 = np.array(self.delta_1[start:end-1]) - slope - cdef np.ndarray[float] delta_2 = np.array(self.delta_2[start:end-1]) - slope + cdef inline float[:] stats(Ramp self, float slope, RampIndex ramp): + cdef np.ndarray[float] delta_1 = np.array(self.delta_1[ramp.start:ramp.end-1]) - slope + cdef np.ndarray[float] delta_2 = np.array(self.delta_2[ramp.start:ramp.end-1]) - slope - cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[start:end-1]) + - slope * np.array(self.slope_var_1[start:end-1])) / - self.fixed.t_bar_1_sq[start:end-1]).astype(np.float32) - cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[start:end-1]) + - slope * np.array(self.slope_var_2[start:end-1])) / - self.fixed.t_bar_2_sq[start:end-1]).astype(np.float32) + cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[ramp.start:ramp.end-1]) + + slope * np.array(self.slope_var_1[ramp.start:ramp.end-1])) / + self.fixed.t_bar_1_sq[ramp.start:ramp.end-1]).astype(np.float32) + cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[ramp.start:ramp.end-1]) + + slope * np.array(self.slope_var_2[ramp.start:ramp.end-1])) / + self.fixed.t_bar_2_sq[ramp.start:ramp.end-1]).astype(np.float32) cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) @@ -309,36 +304,41 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline (stack[float], stack[float], stack[float]) fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): - cdef stack[float] slopes, read_vars, poisson_vars + cdef inline Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): + cdef Fits fits + cdef RampIndex ramp - cdef float slope = 0, read_var = 0, poisson_var = 0 + cdef Fit fit cdef float [:] stats cdef int split while not ramps.empty(): ramp = ramps.top() ramps.pop() - slope, read_var, poisson_var = self.fit(ramp.start, ramp.end) - stats = self.stats(slope, ramp.start, ramp.end) + fit = self.fit(ramp) + stats = self.stats(fit.slope, ramp) - if max(stats) > threshold(thresh, slope): + if max(stats) > threshold(thresh, fit.slope) and self.fixed.use_jump: split = np.argmax(stats) ramps.push(RampIndex(ramp.start, ramp.start + split)) ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) else: - slopes.push(slope) - read_vars.push(read_var) - poisson_vars.push(poisson_var) + fits.slope.push_back(fit.slope) + fits.read_var.push_back(fit.read_var) + fits.poisson_var.push_back(fit.poisson_var) - return slopes, read_vars, poisson_vars + return reverse_fits(fits) cdef float threshold(Thresh thresh, float slope): return thresh.intercept - thresh.constant * log10(slope) +cdef Fits reverse_fits(Fits fits): + return Fits(fits.slope[::-1], fits.read_var[::-1], fits.poisson_var[::-1]) + + cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): """ Fast constructor for the Ramp C class. diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index f17bb9f7..e996feb5 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,7 +3,7 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport make_ramp, make_fixed, Fixed +from stcal.ramp_fitting.ols_cas22._core cimport make_ramp, make_fixed, Fixed, Fit, RampIndex @cython.boundscheck(False) @@ -130,13 +130,18 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef np.ndarray[float] slope = np.zeros(n_ramp, dtype=np.float32) cdef np.ndarray[float] slope_read_var = np.zeros(n_ramp, dtype=np.float32) cdef np.ndarray[float] slope_poisson_var = np.zeros(n_ramp, dtype=np.float32) + cdef Fit fit cdef vector[int] start, end, pix start, end, pix = end_points(n_ramp, n_pixel, n_resultants, dq) for i in range(n_ramp): - slope[i], slope_read_var[i], slope_poisson_var[i] = make_ramp( - fixed, read_noise[pix[i]], resultants[:, pix[i]]).fit(start[i], end[i]) + fit = make_ramp( + fixed, read_noise[pix[i]], resultants[:, pix[i]]).fit(RampIndex(start[i], end[i])) + + slope[i] = fit.slope + slope_read_var[i] = fit.read_var + slope_poisson_var[i] = fit.poisson_var return dict(slope=slope, slopereadvar=slope_read_var, slopepoissonvar=slope_poisson_var, From 12aab35a0fd8ae643a2ab627f22c34a519a8c96e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 13:36:00 -0400 Subject: [PATCH 19/90] Move fixed into its own file --- setup.py | 4 +- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 37 +---- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 145 +----------------- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 3 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 37 +++++ src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 145 ++++++++++++++++++ 6 files changed, 189 insertions(+), 182 deletions(-) create mode 100644 src/stcal/ramp_fitting/ols_cas22/_fixed.pxd create mode 100644 src/stcal/ramp_fitting/ols_cas22/_fixed.pyx diff --git a/setup.py b/setup.py index 93f40466..81fedac5 100644 --- a/setup.py +++ b/setup.py @@ -14,8 +14,8 @@ language='c++' ), Extension( - 'stcal.ramp_fitting.ols_cas22._jump_detection', - ['src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx'], + 'stcal.ramp_fitting.ols_cas22._fixed', + ['src/stcal/ramp_fitting/ols_cas22/_fixed.pyx'], include_dirs=[np.get_include()], language='c++' ), diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 2e2a7313..1982a99a 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -4,6 +4,7 @@ from libcpp.vector cimport vector from libcpp.stack cimport stack from libcpp cimport bool +from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef struct RampIndex: int start @@ -26,42 +27,6 @@ cdef struct Fits: vector[float] read_var vector[float] poisson_var -cdef class Fixed: - # Fixed parameters for all pixels input - cdef public bool use_jump - cdef public float[:] t_bar, tau - cdef public int[:] n_reads - - # Computed and cached values for jump detection - # single -> j = i + 1 - # double -> j = i + 2 - - # single and double differences of t_bar - # t_bar[j] - t_bar[i] - cdef public float[:] t_bar_1, t_bar_2 - - # squared single and double differences of t_bar - # (t_bar[j] - t_bar[i])**2 - cdef public float[:] t_bar_1_sq, t_bar_2_sq - - # single and double reciprical sum values - # ((1/n_reads[i]) + (1/n_reads[j])) - cdef public float[:] recip_1, recip_2 - - # single and double slope var terms - # (tau[i] + tau[j] - min(t_bar[i], t_bar[j])) * correction(i, j) - cdef public float[:] slope_var_1, slope_var_2 - - cdef float[:] t_bar_diff(Fixed self, int offset) - cdef float[:] t_bar_diff_sq(Fixed self, int offset) - cdef float[:] recip_val(Fixed self, int offset) - cdef float[:] slope_var_val(Fixed self, int offset) - - cdef float correction(Fixed self, int i, int j) - - -cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump) - cdef class Ramp: cdef Fixed fixed diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index d72b5273..93ff1bb8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -6,149 +6,8 @@ import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fixed, Ramp, Fits - - -cdef class Fixed: - """ - Class to contain the data fixed for all pixels and commonly referenced - universal values for jump detection - - Parameters - ---------- - t_bar : vector[float] - mean times of resultants - tau : vector[float] - variance weighted mean times of resultants - n_reads : vector[int] - number of reads contributing to reach resultant - - t_bar_1 : vector[float] - single differences of t_bar (t_bar[i+1] - t_bar[i]) - t_bar_1_sq : vector[float] - squared single differences of t_bar (t_bar[i+1] - t_bar[i])**2 - t_bar_2 : vector[float] - double differences of t_bar (t_bar[i+2] - t_bar[i]) - t_bar_2_sq: vector[float] - squared double differences of t_bar (t_bar[i+2] - t_bar[i])**2 - sigma_1 : vector[float] - single of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) - sigma_2 : vector[float] - double of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) - slope_var_1 : vector[float] - single of slope variance term - ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1) - slope_var_2 : vector[float] - double of slope variance term - ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2) - """ - - cdef inline float[:] t_bar_diff(Fixed self, int offset): - """ - Compute the difference offset of t_bar - - Parameters - ---------- - offset : int - index offset to compute difference - """ - cdef int n_diff = len(self.t_bar) - offset - cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] - - return diff - - cdef inline float[:] t_bar_diff_sq(Fixed self, int offset): - """ - Compute the square difference offset of t_bar - - Parameters - ---------- - offset : int - index offset to compute difference - """ - cdef int n_diff = len(self.t_bar) - offset - cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] ** 2 - - return diff - - cdef inline float[:] recip_val(Fixed self, int offset): - """ - Compute the recip values - (1/n_reads[i+offset] + 1/n_reads[i]) - - Parameters - ---------- - offset : int - index offset to compute difference - """ - cdef int n_diff = len(self.t_bar) - offset - - cdef float[:] recip = ((1 / np.roll(self.n_reads, -offset)).astype(np.float32) + - (1 / np.array(self.n_reads)).astype(np.float32))[:n_diff] - - return recip - - cdef inline float correction(Fixed self, int i, int j): - """Compute the correction factor - - Parameters - ---------- - i : int - The index of the first read in the segment - j : int - The index of the last read in the segment - """ - cdef float denom = self.t_bar[self.n_reads[i] - 1] - self.t_bar[0] - - if j - i == 1: - return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 - else: - return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 - - cdef inline float[:] slope_var_val(Fixed self, int offset): - """ - Compute the sigma values - (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * - correction(i, i+offset) - - Parameters - ---------- - offset : int - index offset to compute difference - """ - cdef int n_diff = len(self.t_bar) - offset - - cdef float[:] slope_var_val = ( - (self.tau + np.roll(self.tau, -offset) - - np.minimum(self.t_bar, np.roll(self.t_bar, -offset))) * - self.correction(0, offset))[:n_diff] - - return slope_var_val - - -cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): - - cdef Fixed fixed = Fixed() - - fixed.use_jump = use_jump - fixed.t_bar = t_bar - fixed.tau = tau - fixed.n_reads = n_reads - - if use_jump: - fixed.t_bar_1 = fixed.t_bar_diff(1) - fixed.t_bar_2 = fixed.t_bar_diff(2) - - fixed.t_bar_1_sq = fixed.t_bar_diff_sq(1) - fixed.t_bar_2_sq = fixed.t_bar_diff_sq(2) - - fixed.recip_1 = fixed.recip_val(1) - fixed.recip_2 = fixed.recip_val(2) - - fixed.slope_var_1 = fixed.slope_var_val(1) - fixed.slope_var_2 = fixed.slope_var_val(2) - - return fixed +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Ramp, Fits +from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed # Casertano+2022, Table 2 diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index e996feb5..f87f3938 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,7 +3,8 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport make_ramp, make_fixed, Fixed, Fit, RampIndex +from stcal.ramp_fitting.ols_cas22._core cimport make_ramp, Fit, RampIndex +from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed @cython.boundscheck(False) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd new file mode 100644 index 00000000..25125e21 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -0,0 +1,37 @@ +from libcpp cimport bool + +cdef class Fixed: + # Fixed parameters for all pixels input + cdef public bool use_jump + cdef public float[:] t_bar, tau + cdef public int[:] n_reads + + # Computed and cached values for jump detection + # single -> j = i + 1 + # double -> j = i + 2 + + # single and double differences of t_bar + # t_bar[j] - t_bar[i] + cdef public float[:] t_bar_1, t_bar_2 + + # squared single and double differences of t_bar + # (t_bar[j] - t_bar[i])**2 + cdef public float[:] t_bar_1_sq, t_bar_2_sq + + # single and double reciprical sum values + # ((1/n_reads[i]) + (1/n_reads[j])) + cdef public float[:] recip_1, recip_2 + + # single and double slope var terms + # (tau[i] + tau[j] - min(t_bar[i], t_bar[j])) * correction(i, j) + cdef public float[:] slope_var_1, slope_var_2 + + cdef float[:] t_bar_diff(Fixed self, int offset) + cdef float[:] t_bar_diff_sq(Fixed self, int offset) + cdef float[:] recip_val(Fixed self, int offset) + cdef float[:] slope_var_val(Fixed self, int offset) + + cdef float correction(Fixed self, int i, int j) + + +cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx new file mode 100644 index 00000000..58c90f33 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -0,0 +1,145 @@ +import numpy as np +cimport numpy as np + +from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed + +cdef class Fixed: + """ + Class to contain the data fixed for all pixels and commonly referenced + universal values for jump detection + + Parameters + ---------- + t_bar : vector[float] + mean times of resultants + tau : vector[float] + variance weighted mean times of resultants + n_reads : vector[int] + number of reads contributing to reach resultant + + t_bar_1 : vector[float] + single differences of t_bar (t_bar[i+1] - t_bar[i]) + t_bar_1_sq : vector[float] + squared single differences of t_bar (t_bar[i+1] - t_bar[i])**2 + t_bar_2 : vector[float] + double differences of t_bar (t_bar[i+2] - t_bar[i]) + t_bar_2_sq: vector[float] + squared double differences of t_bar (t_bar[i+2] - t_bar[i])**2 + sigma_1 : vector[float] + single of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + sigma_2 : vector[float] + double of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + slope_var_1 : vector[float] + single of slope variance term + ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1) + slope_var_2 : vector[float] + double of slope variance term + ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2) + """ + + cdef inline float[:] t_bar_diff(Fixed self, int offset): + """ + Compute the difference offset of t_bar + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] + + return diff + + cdef inline float[:] t_bar_diff_sq(Fixed self, int offset): + """ + Compute the square difference offset of t_bar + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] ** 2 + + return diff + + cdef inline float[:] recip_val(Fixed self, int offset): + """ + Compute the recip values + (1/n_reads[i+offset] + 1/n_reads[i]) + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + + cdef float[:] recip = ((1 / np.roll(self.n_reads, -offset)).astype(np.float32) + + (1 / np.array(self.n_reads)).astype(np.float32))[:n_diff] + + return recip + + cdef inline float correction(Fixed self, int i, int j): + """Compute the correction factor + + Parameters + ---------- + i : int + The index of the first read in the segment + j : int + The index of the last read in the segment + """ + cdef float denom = self.t_bar[self.n_reads[i] - 1] - self.t_bar[0] + + if j - i == 1: + return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 + else: + return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 + + cdef inline float[:] slope_var_val(Fixed self, int offset): + """ + Compute the sigma values + (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * + correction(i, i+offset) + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.t_bar) - offset + + cdef float[:] slope_var_val = ( + (self.tau + np.roll(self.tau, -offset) - + np.minimum(self.t_bar, np.roll(self.t_bar, -offset))) * + self.correction(0, offset))[:n_diff] + + return slope_var_val + + +cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): + + cdef Fixed fixed = Fixed() + + fixed.use_jump = use_jump + fixed.t_bar = t_bar + fixed.tau = tau + fixed.n_reads = n_reads + + if use_jump: + fixed.t_bar_1 = fixed.t_bar_diff(1) + fixed.t_bar_2 = fixed.t_bar_diff(2) + + fixed.t_bar_1_sq = fixed.t_bar_diff_sq(1) + fixed.t_bar_2_sq = fixed.t_bar_diff_sq(2) + + fixed.recip_1 = fixed.recip_val(1) + fixed.recip_2 = fixed.recip_val(2) + + fixed.slope_var_1 = fixed.slope_var_val(1) + fixed.slope_var_2 = fixed.slope_var_val(2) + + return fixed \ No newline at end of file From 5df44fac783b06c4c7be3579c83d0bba1451850c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 13:54:42 -0400 Subject: [PATCH 20/90] Move ramp into its own file --- setup.py | 6 + src/stcal/ramp_fitting/ols_cas22/_core.pxd | 29 +-- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 207 +---------------- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 3 +- src/stcal/ramp_fitting/ols_cas22/_ramp.pxd | 30 +++ src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 216 ++++++++++++++++++ 6 files changed, 258 insertions(+), 233 deletions(-) create mode 100644 src/stcal/ramp_fitting/ols_cas22/_ramp.pxd create mode 100644 src/stcal/ramp_fitting/ols_cas22/_ramp.pyx diff --git a/setup.py b/setup.py index 81fedac5..38937221 100644 --- a/setup.py +++ b/setup.py @@ -19,6 +19,12 @@ include_dirs=[np.get_include()], language='c++' ), + Extension( + 'stcal.ramp_fitting.ols_cas22._ramp', + ['src/stcal/ramp_fitting/ols_cas22/_ramp.pyx'], + include_dirs=[np.get_include()], + language='c++' + ), Extension( 'stcal.ramp_fitting.ols_cas22._fit_ramps', ['src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx'], diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 1982a99a..6cb59f96 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -27,29 +27,6 @@ cdef struct Fits: vector[float] read_var vector[float] poisson_var - -cdef class Ramp: - cdef Fixed fixed - cdef public float read_noise - cdef public float [:] resultants - - # Computed and cached values for jump detection - # single -> j = i + 1 - # double -> j = i + 2 - - # single and double delta + slope - # (resultants[j] - resultants[i]/(t_bar[j] - t_bar[i]) - cdef public float[:] delta_1, delta_2 - - # single and double sigma terms - # read_noise * recip[i] - cdef public float[:] sigma_1, sigma_2 - - cdef float[:] resultants_diff(Ramp self, int offset) - cdef Fit fit(Ramp self, RampIndex ramp) - - cdef float[:] stats(Ramp self, float slope, RampIndex ramp) - cdef Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh) - - -cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) +cdef Fits reverse_fits(Fits fits) +cdef float threshold(Thresh thresh, float slope) +cdef float get_weight_power(float s) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 93ff1bb8..bafadab7 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -6,8 +6,7 @@ import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Ramp, Fits -from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits # Casertano+2022, Table 2 @@ -23,172 +22,6 @@ cdef inline float get_weight_power(float s): return PTABLE[1][i - 1] return PTABLE[1][i] -cdef class Ramp: - """ - Class to contain the data for a single pixel ramp to be fit - - This has to be a class rather than a struct in order to contain memory views - - Parameters - ---------- - resultants : float [:] - array of resultants for single pixel - - memoryview of a numpy array to avoid passing through Python - """ - - @cython.boundscheck(False) - @cython.wraparound(False) - cdef inline float[:] resultants_diff(Ramp self, int offset): - """ - Compute the difference offset of resultants - - Parameters - ---------- - offset : int - index offset to compute difference - """ - cdef int n_diff = len(self.resultants) - offset - cdef float[:] diff = (np.roll(self.resultants, -offset) - self.t_bar)[:n_diff] - - return diff - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline Fit fit(Ramp self, RampIndex ramp): - """Fit a portion of single ramp using the Casertano+22 algorithm. - Parameters - ---------- - - Returns - ------- - slope : float - fit slope - read_var : float - read noise induced variance in slope - poisson_var : float - coefficient of Poisson-noise induced variance in slope - multiply by true flux to get actual Poisson variance. - """ - cdef int n_resultants = ramp.end - ramp.start + 1 - - # Special case where there is no or one resultant, there is no fit. - if n_resultants <= 1: - return 0, 0, 0 - - # Setup data for fitting (work over subset of data) - cdef float[:] resultants = self.fixed.resultants[ramp.start:ramp.end + 1] - cdef float[:] t_bar = self.fixed.t_bar[ramp.start:ramp.end + 1] - cdef float[:] tau = self.fixed.tau[ramp.start:ramp.end + 1] - cdef int[:] n_reads = self.fixed.n_reads[ramp.start:ramp.end + 1] - - # Else, do the fitting. - cdef int i = 0, j = 0 - cdef vector[float] weights = vector[float](n_resultants) - cdef vector[float] coeffs = vector[float](n_resultants) - cdef Fit fit = Fit(0, 0, 0) - - cdef float t_bar_mid = (t_bar[0] + t_bar[- 1]) / 2 - - # Casertano+2022 Eq. 44 - # Note we've departed from Casertano+22 slightly; - # there s is just resultants[ramp.end]. But that doesn't seem good if, e.g., - # a CR in the first resultant has boosted the whole ramp high but there - # is no actual signal. - cdef float s = max(resultants[-1] - resultants[0], 0) - s = s / sqrt(self.fixed.read_noise**2 + s) - cdef float power = get_weight_power(s) - - # It's easy to use up a lot of dynamic range on something like - # (tbar - tbarmid) ** 10. Rescale these. - cdef float t_scale = (t_bar[-1] - t_bar[0]) / 2 - t_scale = 1 if t_scale == 0 else t_scale - - cdef float f0 = 0, f1 = 0, f2 = 0 - - # Issue when tbar[] == tbarmid causes exception otherwise - with cython.cpow(True): - for i in range(n_resultants): - # Casertano+22, Eq. 45 - weights[i] = ((((1 + power) * n_reads[i]) / (1 + power * n_reads[i])) * - fabs((t_bar[i] - t_bar_mid) / t_scale) ** power) - - # Casertano+22 Eq. 35 - f0 += weights[i] - f1 += weights[i] * t_bar[i] - f2 += weights[i] * t_bar[i]**2 - - # Casertano+22 Eq. 36 - cdef float det = f2 * f0 - f1 ** 2 - if det == 0: - return (0.0, 0.0, 0.0) - - for i in range(n_resultants): - # Casertano+22 Eq. 37 - coeffs[i] = (f0 * t_bar[i] - f1) * weights[i] / det - - for i in range(n_resultants): - # Casertano+22 Eq. 38 - fit.slope += coeffs[i] * resultants[i] - - # Casertano+22 Eq. 39 - fit.read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / n_reads[i]) - - # Casertano+22 Eq 40 - fit.poisson_var += coeffs[i] ** 2 * tau[i] - for j in range(i + 1, n_resultants): - fit.poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) - - return fit - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline float[:] stats(Ramp self, float slope, RampIndex ramp): - cdef np.ndarray[float] delta_1 = np.array(self.delta_1[ramp.start:ramp.end-1]) - slope - cdef np.ndarray[float] delta_2 = np.array(self.delta_2[ramp.start:ramp.end-1]) - slope - - cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[ramp.start:ramp.end-1]) + - slope * np.array(self.slope_var_1[ramp.start:ramp.end-1])) / - self.fixed.t_bar_1_sq[ramp.start:ramp.end-1]).astype(np.float32) - cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[ramp.start:ramp.end-1]) + - slope * np.array(self.slope_var_2[ramp.start:ramp.end-1])) / - self.fixed.t_bar_2_sq[ramp.start:ramp.end-1]).astype(np.float32) - - cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) - cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) - - return np.maximum(stats_1, stats_2) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef inline Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): - cdef Fits fits - - cdef RampIndex ramp - cdef Fit fit - cdef float [:] stats - cdef int split - - while not ramps.empty(): - ramp = ramps.top() - ramps.pop() - fit = self.fit(ramp) - stats = self.stats(fit.slope, ramp) - - if max(stats) > threshold(thresh, fit.slope) and self.fixed.use_jump: - split = np.argmax(stats) - - ramps.push(RampIndex(ramp.start, ramp.start + split)) - ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) - else: - fits.slope.push_back(fit.slope) - fits.read_var.push_back(fit.read_var) - fits.poisson_var.push_back(fit.poisson_var) - - return reverse_fits(fits) - cdef float threshold(Thresh thresh, float slope): return thresh.intercept - thresh.constant * log10(slope) @@ -196,41 +29,3 @@ cdef float threshold(Thresh thresh, float slope): cdef Fits reverse_fits(Fits fits): return Fits(fits.slope[::-1], fits.read_var[::-1], fits.poisson_var[::-1]) - - -cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): - """ - Fast constructor for the Ramp C class. - - This is signifantly faster than using the `__init__` or `__cinit__` - this is because this does not have to pass through the Python as part - of the construction. - - Parameters - ---------- - fixed : Fixed - Fixed values for all pixels - resultants : float [:] - array of resultants for single pixel - - memoryview of a numpy array to avoid passing through Python - - Return - ------ - ramp : Ramp - Ramp C-class object - """ - - cdef Ramp ramp = Ramp() - - ramp.fixed = fixed - ramp.read_noise = read_noise - ramp.resultants = resultants - - if fixed.use_jump: - ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / np.array(fixed.t_bar_1)).astype(np.float32) - ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / np.array(fixed.t_bar_2)).astype(np.float32) - - ramp.sigma_1 = read_noise * np.array(fixed.recip_1) - ramp.sigma_2 = read_noise * np.array(fixed.recip_2) - - return ramp diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index f87f3938..d962e5ed 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,8 +3,9 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport make_ramp, Fit, RampIndex +from stcal.ramp_fitting.ols_cas22._core cimport Fit, RampIndex from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed +from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp @cython.boundscheck(False) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd new file mode 100644 index 00000000..34309cc1 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd @@ -0,0 +1,30 @@ +from libcpp.stack cimport stack + +from stcal.ramp_fitting.ols_cas22._core cimport Fit, Fits, RampIndex, Thresh +from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed + +cdef class Ramp: + cdef Fixed fixed + cdef public float read_noise + cdef public float [:] resultants + + # Computed and cached values for jump detection + # single -> j = i + 1 + # double -> j = i + 2 + + # single and double delta + slope + # (resultants[j] - resultants[i]/(t_bar[j] - t_bar[i]) + cdef public float[:] delta_1, delta_2 + + # single and double sigma terms + # read_noise * recip[i] + cdef public float[:] sigma_1, sigma_2 + + cdef float[:] resultants_diff(Ramp self, int offset) + cdef Fit fit(Ramp self, RampIndex ramp) + + cdef float[:] stats(Ramp self, float slope, RampIndex ramp) + cdef Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh) + + +cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx new file mode 100644 index 00000000..6dd662a4 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -0,0 +1,216 @@ +from libc.math cimport sqrt, fabs, log10 +from libcpp.vector cimport vector +from libcpp.stack cimport stack + +import numpy as np +cimport numpy as np +cimport cython + + +from stcal.ramp_fitting.ols_cas22._core cimport get_weight_power, reverse_fits, threshold, Fit, Fits, RampIndex, Thresh +from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp, Ramp + + +cdef class Ramp: + """ + Class to contain the data for a single pixel ramp to be fit + + This has to be a class rather than a struct in order to contain memory views + + Parameters + ---------- + resultants : float [:] + array of resultants for single pixel + - memoryview of a numpy array to avoid passing through Python + """ + + @cython.boundscheck(False) + @cython.wraparound(False) + cdef inline float[:] resultants_diff(Ramp self, int offset): + """ + Compute the difference offset of resultants + + Parameters + ---------- + offset : int + index offset to compute difference + """ + cdef int n_diff = len(self.resultants) - offset + cdef float[:] diff = (np.roll(self.resultants, -offset) - self.t_bar)[:n_diff] + + return diff + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline Fit fit(Ramp self, RampIndex ramp): + """Fit a portion of single ramp using the Casertano+22 algorithm. + Parameters + ---------- + + Returns + ------- + slope : float + fit slope + read_var : float + read noise induced variance in slope + poisson_var : float + coefficient of Poisson-noise induced variance in slope + multiply by true flux to get actual Poisson variance. + """ + cdef int n_resultants = ramp.end - ramp.start + 1 + + # Special case where there is no or one resultant, there is no fit. + if n_resultants <= 1: + return 0, 0, 0 + + # Setup data for fitting (work over subset of data) + cdef float[:] resultants = self.fixed.resultants[ramp.start:ramp.end + 1] + cdef float[:] t_bar = self.fixed.t_bar[ramp.start:ramp.end + 1] + cdef float[:] tau = self.fixed.tau[ramp.start:ramp.end + 1] + cdef int[:] n_reads = self.fixed.n_reads[ramp.start:ramp.end + 1] + + # Else, do the fitting. + cdef int i = 0, j = 0 + cdef vector[float] weights = vector[float](n_resultants) + cdef vector[float] coeffs = vector[float](n_resultants) + cdef Fit fit = Fit(0, 0, 0) + + cdef float t_bar_mid = (t_bar[0] + t_bar[- 1]) / 2 + + # Casertano+2022 Eq. 44 + # Note we've departed from Casertano+22 slightly; + # there s is just resultants[ramp.end]. But that doesn't seem good if, e.g., + # a CR in the first resultant has boosted the whole ramp high but there + # is no actual signal. + cdef float s = max(resultants[-1] - resultants[0], 0) + s = s / sqrt(self.fixed.read_noise**2 + s) + cdef float power = get_weight_power(s) + + # It's easy to use up a lot of dynamic range on something like + # (tbar - tbarmid) ** 10. Rescale these. + cdef float t_scale = (t_bar[-1] - t_bar[0]) / 2 + t_scale = 1 if t_scale == 0 else t_scale + + cdef float f0 = 0, f1 = 0, f2 = 0 + + # Issue when tbar[] == tbarmid causes exception otherwise + with cython.cpow(True): + for i in range(n_resultants): + # Casertano+22, Eq. 45 + weights[i] = ((((1 + power) * n_reads[i]) / (1 + power * n_reads[i])) * + fabs((t_bar[i] - t_bar_mid) / t_scale) ** power) + + # Casertano+22 Eq. 35 + f0 += weights[i] + f1 += weights[i] * t_bar[i] + f2 += weights[i] * t_bar[i]**2 + + # Casertano+22 Eq. 36 + cdef float det = f2 * f0 - f1 ** 2 + if det == 0: + return (0.0, 0.0, 0.0) + + for i in range(n_resultants): + # Casertano+22 Eq. 37 + coeffs[i] = (f0 * t_bar[i] - f1) * weights[i] / det + + for i in range(n_resultants): + # Casertano+22 Eq. 38 + fit.slope += coeffs[i] * resultants[i] + + # Casertano+22 Eq. 39 + fit.read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / n_reads[i]) + + # Casertano+22 Eq 40 + fit.poisson_var += coeffs[i] ** 2 * tau[i] + for j in range(i + 1, n_resultants): + fit.poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) + + return fit + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float[:] stats(Ramp self, float slope, RampIndex ramp): + cdef np.ndarray[float] delta_1 = np.array(self.delta_1[ramp.start:ramp.end-1]) - slope + cdef np.ndarray[float] delta_2 = np.array(self.delta_2[ramp.start:ramp.end-1]) - slope + + cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[ramp.start:ramp.end-1]) + + slope * np.array(self.slope_var_1[ramp.start:ramp.end-1])) / + self.fixed.t_bar_1_sq[ramp.start:ramp.end-1]).astype(np.float32) + cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[ramp.start:ramp.end-1]) + + slope * np.array(self.slope_var_2[ramp.start:ramp.end-1])) / + self.fixed.t_bar_2_sq[ramp.start:ramp.end-1]).astype(np.float32) + + cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) + cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) + + return np.maximum(stats_1, stats_2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): + cdef Fits fits + + cdef RampIndex ramp + cdef Fit fit + cdef float [:] stats + cdef int split + + while not ramps.empty(): + ramp = ramps.top() + ramps.pop() + fit = self.fit(ramp) + stats = self.stats(fit.slope, ramp) + + if max(stats) > threshold(thresh, fit.slope) and self.fixed.use_jump: + split = np.argmax(stats) + + ramps.push(RampIndex(ramp.start, ramp.start + split)) + ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) + else: + fits.slope.push_back(fit.slope) + fits.read_var.push_back(fit.read_var) + fits.poisson_var.push_back(fit.poisson_var) + + return reverse_fits(fits) + + +cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): + """ + Fast constructor for the Ramp C class. + + This is signifantly faster than using the `__init__` or `__cinit__` + this is because this does not have to pass through the Python as part + of the construction. + + Parameters + ---------- + fixed : Fixed + Fixed values for all pixels + resultants : float [:] + array of resultants for single pixel + - memoryview of a numpy array to avoid passing through Python + + Return + ------ + ramp : Ramp + Ramp C-class object + """ + + cdef Ramp ramp = Ramp() + + ramp.fixed = fixed + ramp.read_noise = read_noise + ramp.resultants = resultants + + if fixed.use_jump: + ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / np.array(fixed.t_bar_1)).astype(np.float32) + ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / np.array(fixed.t_bar_2)).astype(np.float32) + + ramp.sigma_1 = read_noise * np.array(fixed.recip_1) + ramp.sigma_2 = read_noise * np.array(fixed.recip_2) + + return ramp \ No newline at end of file From 97cd2372d3cfdd732a16700ef266c4173ab1bb40 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 13:55:02 -0400 Subject: [PATCH 21/90] Remove jump file --- .../ols_cas22/_jump_detection.pyx | 113 ------------------ 1 file changed, 113 deletions(-) delete mode 100644 src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx diff --git a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx b/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx deleted file mode 100644 index 7aff64e6..00000000 --- a/src/stcal/ramp_fitting/ols_cas22/_jump_detection.pyx +++ /dev/null @@ -1,113 +0,0 @@ -# import numpy as np -# cimport numpy as np -# from libc.math cimport sqrt, log10 -# from libcpp.vector cimport vector -# from libcpp.stack cimport stack -# cimport cython - -# from stcal.ramp_fitting.ols_cas22._core cimport Ramp - - -# cdef struct RampIndex: -# int start -# int end - - -# cdef struct Thresh: -# float intercept -# float constant - - -# cdef class Jump(Ramp): - -# """ -# Class to contain the data for a single ramp fit with jump detection -# """ -# @cython.boundscheck(False) -# @cython.wraparound(False) -# @cython.cdivision(True) -# cdef inline float[:] stats(Jump self, float slope, int start, int end): -# cdef np.ndarray[float] delta_1 = np.array(self.delta_1[start:end-1]) - slope -# cdef np.ndarray[float] delta_2 = np.array(self.delta_2[start:end-1]) - slope - -# cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[start:end-1]) + -# slope * np.array(self.slope_var_1[start:end-1])) / -# self.fixed.t_bar_1_sq[start:end-1]).astype(np.float32) -# cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[start:end-1]) + -# slope * np.array(self.slope_var_2[start:end-1])) / -# self.fixed.t_bar_2_sq[start:end-1]).astype(np.float32) - -# cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) -# cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) - -# return np.maximum(stats_1, stats_2) - -# @cython.boundscheck(False) -# @cython.wraparound(False) -# @cython.cdivision(True) -# cdef inline (stack[float], stack[float], stack[float]) fits(Jump self, stack[RampIndex] ramps, Thresh thresh): -# cdef stack[float] slopes, read_vars, poisson_vars -# cdef RampIndex ramp -# cdef float slope = 0, read_var = 0, poisson_var = 0 -# cdef float [:] stats -# cdef int split - -# while not ramps.empty(): -# ramp = ramps.top() -# ramps.pop() -# slope, read_var, poisson_var = self.fit(ramp.start, ramp.end) -# stats = self.stats(slope, ramp.start, ramp.end) - -# if max(stats) > threshold(thresh, slope): -# split = np.argmax(stats) - -# ramps.push(RampIndex(ramp.start, ramp.start + split)) -# ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) -# else: -# slopes.push(slope) -# read_vars.push(read_var) -# poisson_vars.push(poisson_var) - -# return slopes, read_vars, poisson_vars - - -# cdef float threshold(Thresh thresh, float slope): -# return thresh.intercept - thresh.constant * log10(slope) - - -# # cdef inline Jump make_ramp(Fixed fixed, float read_noise, float [:] resultants): -# # """ -# # Fast constructor for the Jump C class. - -# # This is signifantly faster than using the `__init__` or `__cinit__` -# # this is because this does not have to pass through the Python as part -# # of the construction. - -# # Parameters -# # ---------- -# # fixed : Fixed -# # Fixed values for all pixels -# # resultants : float [:] -# # array of resultants for single pixel -# # - memoryview of a numpy array to avoid passing through Python - -# # Return -# # ------ -# # ramp : Jump -# # Jump C-class object -# # """ - -# # cdef Jump jump = Jump() - -# # jump.start = start -# # jump.end = end - -# # jump.resultants = resultants -# # jump.t_bar = t_bar -# # jump.tau = tau - -# # jump.read_noise = read_noise - -# # jump.n_reads = n_reads - -# # return jump From c0fe85f6403a89d0f3f42ada6c0348c2fba42fb0 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 14:28:13 -0400 Subject: [PATCH 22/90] Add core documentation --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 5 +- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 93 ++++++++++++++++++++-- src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 4 +- 3 files changed, 92 insertions(+), 10 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 6cb59f96..cafe54ce 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -27,6 +27,7 @@ cdef struct Fits: vector[float] read_var vector[float] poisson_var -cdef Fits reverse_fits(Fits fits) +cdef float get_power(float s) cdef float threshold(Thresh thresh, float slope) -cdef float get_weight_power(float s) +cdef Thresh make_thresh(float intercept, float constant) +cdef Fits reverse_fits(Fits fits) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index bafadab7..b4cbc78b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,3 +1,23 @@ +""" +Define the basic types and functions for the CAS22 algorithm with jump detection + +Structs: +------- + RampIndex + int start: starting index of the ramp in the resultants + int end: ending index of the ramp in the resultants + Thresh + float intercept: intercept of the threshold + float constant: constant of the threshold + Fit + float slope: slope of a single ramp + float read_var: read noise variance of a single ramp + float poisson_var: poisson noise variance of single ramp + Fits + vector[float] slope: slopes of the ramps for a single pixel + vector[float] read_var: read noise variances of the ramps for a single pixel + vector[float] poisson_var: poisson noise variances of the ramps for a single pixel +""" from libc.math cimport sqrt, fabs, log10 from libcpp.vector cimport vector from libcpp.stack cimport stack @@ -6,26 +26,87 @@ import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits, get_power, threshold, reverse_fits # Casertano+2022, Table 2 cdef float[2][6] PTABLE = [ [-np.inf, 5, 10, 20, 50, 100], [0, 0.4, 1, 3, 6, 10]] -cdef int PTABLE_LENGTH = 6 -cdef inline float get_weight_power(float s): + +cdef inline float get_power(float s): + """ + Return the power from Casertano+22, Table 2 + + Parameters + ---------- + s: float + signal from the resultants + + Returns + ------- + signal power from Table 2 + """ cdef int i - for i in range(PTABLE_LENGTH): + for i in range(6): if s < PTABLE[0][i]: return PTABLE[1][i - 1] + return PTABLE[1][i] -cdef float threshold(Thresh thresh, float slope): +cdef inline float threshold(Thresh thresh, float slope): + """ + Compute jump threshold + + Parameters + ---------- + thresh : Thresh + threshold parameters struct: + slope : float + slope of the ramp in question + + Returns + ------- + intercept - constant * log10(slope) + """ return thresh.intercept - thresh.constant * log10(slope) -cdef Fits reverse_fits(Fits fits): +cdef inline Thresh make_thresh(float intercept, float constant): + """ + Make a threshold parameters struct: + intercept - constant * log10(slope) + + Parameters + ---------- + intercept : float + intercept of the threshold + constant : float + constant of the threshold + + Returns + ------- + threshold parameters struct + """ + return Thresh(intercept, constant) + + +cdef inline Fits reverse_fits(Fits fits): + """ + Reverse a Fits struct + The jump detection step computes the ramps in reverse time order for each pixel. + This reverses the results of the fit to match the original time order, which is + much faster than prepending to a C++ vector. + + Parameters + ---------- + fits : Fits + fits struct to reverse + + Returns + ------- + reversed fits struct + """ return Fits(fits.slope[::-1], fits.read_var[::-1], fits.poisson_var[::-1]) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index 6dd662a4..e8a2b483 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -7,7 +7,7 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport get_weight_power, reverse_fits, threshold, Fit, Fits, RampIndex, Thresh +from stcal.ramp_fitting.ols_cas22._core cimport get_power, reverse_fits, threshold, Fit, Fits, RampIndex, Thresh from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp, Ramp @@ -85,7 +85,7 @@ cdef class Ramp: # is no actual signal. cdef float s = max(resultants[-1] - resultants[0], 0) s = s / sqrt(self.fixed.read_noise**2 + s) - cdef float power = get_weight_power(s) + cdef float power = get_power(s) # It's easy to use up a lot of dynamic range on something like # (tbar - tbarmid) ** 10. Rescale these. From 43f08e3778ff7865d37ec7f8ee9ed130bac8b29b Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 14:55:59 -0400 Subject: [PATCH 23/90] Document the fixed data structure --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 1 - src/stcal/ramp_fitting/ols_cas22/_core.pyx | 34 ++--- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 16 --- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 146 +++++++++++++++----- 4 files changed, 119 insertions(+), 78 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index cafe54ce..7037f701 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -29,5 +29,4 @@ cdef struct Fits: cdef float get_power(float s) cdef float threshold(Thresh thresh, float slope) -cdef Thresh make_thresh(float intercept, float constant) cdef Fits reverse_fits(Fits fits) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index b4cbc78b..b23eb0ae 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -17,14 +17,19 @@ Structs: vector[float] slope: slopes of the ramps for a single pixel vector[float] read_var: read noise variances of the ramps for a single pixel vector[float] poisson_var: poisson noise variances of the ramps for a single pixel + +Functions: +---------- + get_power + Return the power from Casertano+22, Table 2 + threshold + Compute jump threshold + reverse_fits + Reverse a Fits struct """ -from libc.math cimport sqrt, fabs, log10 -from libcpp.vector cimport vector -from libcpp.stack cimport stack -from libcpp cimport bool +from libc.math cimport log10 import numpy as np cimport numpy as np -cimport cython from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits, get_power, threshold, reverse_fits @@ -74,25 +79,6 @@ cdef inline float threshold(Thresh thresh, float slope): return thresh.intercept - thresh.constant * log10(slope) -cdef inline Thresh make_thresh(float intercept, float constant): - """ - Make a threshold parameters struct: - intercept - constant * log10(slope) - - Parameters - ---------- - intercept : float - intercept of the threshold - constant : float - constant of the threshold - - Returns - ------- - threshold parameters struct - """ - return Thresh(intercept, constant) - - cdef inline Fits reverse_fits(Fits fits): """ Reverse a Fits struct diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 25125e21..ba3066bd 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -1,29 +1,13 @@ from libcpp cimport bool cdef class Fixed: - # Fixed parameters for all pixels input cdef public bool use_jump cdef public float[:] t_bar, tau cdef public int[:] n_reads - # Computed and cached values for jump detection - # single -> j = i + 1 - # double -> j = i + 2 - - # single and double differences of t_bar - # t_bar[j] - t_bar[i] cdef public float[:] t_bar_1, t_bar_2 - - # squared single and double differences of t_bar - # (t_bar[j] - t_bar[i])**2 cdef public float[:] t_bar_1_sq, t_bar_2_sq - - # single and double reciprical sum values - # ((1/n_reads[i]) + (1/n_reads[j])) cdef public float[:] recip_1, recip_2 - - # single and double slope var terms - # (tau[i] + tau[j] - min(t_bar[i], t_bar[j])) * correction(i, j) cdef public float[:] slope_var_1, slope_var_2 cdef float[:] t_bar_diff(Fixed self, int offset) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 58c90f33..b90dbef7 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -1,3 +1,17 @@ +""" +Define the data which is fixed for all pixels to compute the CAS22 algorithm with jump detection + +Objects +------- +Fixed : class + Class to contain the data fixed for all pixels and commonly referenced + universal values for jump detection + +Functions +--------- +make_fixed : function + Fast constructor for Fixed class +""" import numpy as np cimport numpy as np @@ -10,31 +24,50 @@ cdef class Fixed: Parameters ---------- - t_bar : vector[float] - mean times of resultants - tau : vector[float] - variance weighted mean times of resultants - n_reads : vector[int] - number of reads contributing to reach resultant - - t_bar_1 : vector[float] - single differences of t_bar (t_bar[i+1] - t_bar[i]) - t_bar_1_sq : vector[float] - squared single differences of t_bar (t_bar[i+1] - t_bar[i])**2 - t_bar_2 : vector[float] - double differences of t_bar (t_bar[i+2] - t_bar[i]) - t_bar_2_sq: vector[float] - squared double differences of t_bar (t_bar[i+2] - t_bar[i])**2 - sigma_1 : vector[float] - single of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) - sigma_2 : vector[float] - double of sigma term read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + t_bar : float[:] + mean times of resultants (data input) + tau : float[:] + variance weighted mean times of resultants (data input) + n_reads : float[:] + number of reads contributing to reach resultant (data input) + + use_jump : bool + flag to indicate whether to use jump detection (user input) + + t_bar_1 : float[:] + single differences of t_bar: + (t_bar[i+1] - t_bar[i]) + t_bar_1_sq : float[:] + squared single differences of t_bar: + (t_bar[i+1] - t_bar[i])**2 + t_bar_2 : float[:] + double differences of t_bar: + (t_bar[i+2] - t_bar[i]) + t_bar_2_sq: float[:] + squared double differences of t_bar: + (t_bar[i+2] - t_bar[i])**2 + recip_1 : vector[float] + single sum of reciprocal n_reads: + ((1/n_reads[i+1]) + (1/n_reads[i])) + recip_2 : vector[float] + double sum of reciprocal n_reads: + ((1/n_reads[i+2]) + (1/n_reads[i])) slope_var_1 : vector[float] - single of slope variance term - ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1) + single of slope variance term: + ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1) slope_var_2 : vector[float] - double of slope variance term - ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2) + double of slope variance term: + ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2) + + Notes + ----- + - t_bar_*, t_bar_*_sq, recip_*, slope_var_* are only computed if use_jump is True. + These values represent reused computations for jump detection which are used by + every pixel for jump detection. They are computed once and stored in the Fixed + for reuse by all pixels. + - The computations are done using vectorized operations for some performance + increases. However, this is marginal compaired with the performance increase + from precomputing the values and reusing them. """ cdef inline float[:] t_bar_diff(Fixed self, int offset): @@ -45,6 +78,10 @@ cdef class Fixed: ---------- offset : int index offset to compute difference + + Returns + ------- + t_bar[i+offset] - t_bar[i] """ cdef int n_diff = len(self.t_bar) - offset cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] @@ -58,12 +95,13 @@ cdef class Fixed: Parameters ---------- offset : int - index offset to compute difference - """ - cdef int n_diff = len(self.t_bar) - offset - cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] ** 2 + index offset - return diff + Returns + ------- + (t_bar[i+offset] - t_bar[i])**2 + """ + return np.array(self.t_bar_diff(offset)) ** 2 cdef inline float[:] recip_val(Fixed self, int offset): """ @@ -73,7 +111,11 @@ cdef class Fixed: Parameters ---------- offset : int - index offset to compute difference + index offset + + Returns + ------- + (1/n_reads[i+offset] + 1/n_reads[i]) """ cdef int n_diff = len(self.t_bar) - offset @@ -91,10 +133,14 @@ cdef class Fixed: The index of the first read in the segment j : int The index of the last read in the segment + + Returns + ------- + the correction factor f_corr for a single term """ cdef float denom = self.t_bar[self.n_reads[i] - 1] - self.t_bar[0] - if j - i == 1: + if i - j == 1: return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 else: return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 @@ -102,26 +148,51 @@ cdef class Fixed: cdef inline float[:] slope_var_val(Fixed self, int offset): """ Compute the sigma values - (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * - correction(i, i+offset) Parameters ---------- offset : int - index offset to compute difference + index offset + + Returns + ------- + (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * correction(i, i+offset) """ cdef int n_diff = len(self.t_bar) - offset - cdef float[:] slope_var_val = ( - (self.tau + np.roll(self.tau, -offset) - - np.minimum(self.t_bar, np.roll(self.t_bar, -offset))) * - self.correction(0, offset))[:n_diff] + # Comput correction factor vector + cdef int i + cdef np.ndarray[float] f_corr = np.zeros(n_diff, dtype=np.float32) + for i in range(n_diff): + f_corr[i] = self.correction(i, i + offset) + + cdef float[:] slope_var_val = ((self.tau + np.roll(self.tau, -offset) - + np.minimum(self.t_bar, np.roll(self.t_bar, -offset)))[:n_diff]) * f_corr return slope_var_val cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): + """ + Fast constructor for Fixed class + Use this instead of an __init__ because it does not incure the overhead of + switching back and forth to python + Parameters + ---------- + t_bar : float[:] + mean times of resultants (data input) + tau : float[:] + variance weighted mean times of resultants (data input) + n_reads : float[:] + number of reads contributing to reach resultant (data input) + use_jump : bool + flag to indicate whether to use jump detection (user input) + + Returns + ------- + Fixed parameters object (with precomputed values if use_jump is True) + """ cdef Fixed fixed = Fixed() fixed.use_jump = use_jump @@ -129,6 +200,7 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool fixed.tau = tau fixed.n_reads = n_reads + # Precompute jump detection computations shared by all pixels if use_jump: fixed.t_bar_1 = fixed.t_bar_diff(1) fixed.t_bar_2 = fixed.t_bar_diff(2) From 97adae8ba099a3b9036541b0ccda826658bc5e1f Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 15:32:53 -0400 Subject: [PATCH 24/90] Document the ramp object --- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 7 +- src/stcal/ramp_fitting/ols_cas22/_ramp.pxd | 9 - src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 173 ++++++++++++++++---- 3 files changed, 142 insertions(+), 47 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index b90dbef7..d7ca3008 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -67,7 +67,7 @@ cdef class Fixed: for reuse by all pixels. - The computations are done using vectorized operations for some performance increases. However, this is marginal compaired with the performance increase - from precomputing the values and reusing them. + from pre-computing the values and reusing them. """ cdef inline float[:] t_bar_diff(Fixed self, int offset): @@ -191,16 +191,17 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool Returns ------- - Fixed parameters object (with precomputed values if use_jump is True) + Fixed parameters object (with pre-computed values if use_jump is True) """ cdef Fixed fixed = Fixed() + # Fill in input information for all pixels fixed.use_jump = use_jump fixed.t_bar = t_bar fixed.tau = tau fixed.n_reads = n_reads - # Precompute jump detection computations shared by all pixels + # Pre-compute jump detection computations shared by all pixels if use_jump: fixed.t_bar_1 = fixed.t_bar_diff(1) fixed.t_bar_2 = fixed.t_bar_diff(2) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd index 34309cc1..42634417 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd @@ -8,16 +8,7 @@ cdef class Ramp: cdef public float read_noise cdef public float [:] resultants - # Computed and cached values for jump detection - # single -> j = i + 1 - # double -> j = i + 2 - - # single and double delta + slope - # (resultants[j] - resultants[i]/(t_bar[j] - t_bar[i]) cdef public float[:] delta_1, delta_2 - - # single and double sigma terms - # read_noise * recip[i] cdef public float[:] sigma_1, sigma_2 cdef float[:] resultants_diff(Ramp self, int offset) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index e8a2b483..d930a2c2 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -1,3 +1,17 @@ +""" +Define the C class for the CAS22 algorithm for fitting ramps with jump detection + +Objects +------- +Ramp : class + Class to handle ramp fit with jump detection for a single pixel + Provides fits method which fits all the ramps for a single pixel + +Functions +--------- +make_ramp : function + Fast constructor for the Ramp class +""" from libc.math cimport sqrt, fabs, log10 from libcpp.vector cimport vector from libcpp.stack cimport stack @@ -13,15 +27,48 @@ from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp, Ramp cdef class Ramp: """ - Class to contain the data for a single pixel ramp to be fit - - This has to be a class rather than a struct in order to contain memory views - + Class to contain the data to fit ramps for a single pixel. + This data is drawn from for all ramps for a single pixel. + This class pre-computes jump detection values shared by all ramps + for a given pixel. + Parameters ---------- + fixed : Fixed + Fixed values for all pixels (pre-computed data) + read_noise : float + The read noise for the given pixel (data input) resultants : float [:] - array of resultants for single pixel - - memoryview of a numpy array to avoid passing through Python + array of resultants for single pixel (data input) + + delta_1 : float [:] + single difference delta+slope: + (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) + delta_2 : float [:] + double difference delta+slope: + (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) + sigma_1 : float [:] + single difference "sigma": + read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + sigma_2 : float [:] + double difference "sigma": + read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) + + Notes + ----- + - delta_*, sigma_* are only computed if use_jump is True. These values + represent reused computations for jump detection which are used by every + ramp for the given pixel for jump detection. They are computed once and + stored for reuse by all ramp computations for the pixel. + - The computations are done using vectorized operations for some performance + increases. However, this is marginal compaired with the performance increase + from pre-computing the values and reusing them. + + Methods + ------- + fits (ramp_stack) : method + Compute all the ramps for a single pixel using the Casertano+22 algorithm + with jump detection. """ @cython.boundscheck(False) @@ -34,6 +81,9 @@ cdef class Ramp: ---------- offset : int index offset to compute difference + Returns + ------- + (resultants[i+offset] - resultants[i]) """ cdef int n_diff = len(self.resultants) - offset cdef float[:] diff = (np.roll(self.resultants, -offset) - self.t_bar)[:n_diff] @@ -44,25 +94,25 @@ cdef class Ramp: @cython.wraparound(False) @cython.cdivision(True) cdef inline Fit fit(Ramp self, RampIndex ramp): - """Fit a portion of single ramp using the Casertano+22 algorithm. + """ + Fit a single ramp using Casertano+22 algorithm. + Parameters ---------- + ramp : RampIndex + Struct for start and end of ramp to fit Returns ------- - slope : float - fit slope - read_var : float - read noise induced variance in slope - poisson_var : float - coefficient of Poisson-noise induced variance in slope - multiply by true flux to get actual Poisson variance. + Fit struct of slope, read_var, poisson_var """ cdef int n_resultants = ramp.end - ramp.start + 1 + cdef Fit fit = Fit(0, 0, 0) # Special case where there is no or one resultant, there is no fit. if n_resultants <= 1: - return 0, 0, 0 + return fit + # Else, do the fitting. # Setup data for fitting (work over subset of data) cdef float[:] resultants = self.fixed.resultants[ramp.start:ramp.end + 1] @@ -70,12 +120,10 @@ cdef class Ramp: cdef float[:] tau = self.fixed.tau[ramp.start:ramp.end + 1] cdef int[:] n_reads = self.fixed.n_reads[ramp.start:ramp.end + 1] - # Else, do the fitting. + # initalize fit cdef int i = 0, j = 0 cdef vector[float] weights = vector[float](n_resultants) cdef vector[float] coeffs = vector[float](n_resultants) - cdef Fit fit = Fit(0, 0, 0) - cdef float t_bar_mid = (t_bar[0] + t_bar[- 1]) / 2 # Casertano+2022 Eq. 44 @@ -93,7 +141,6 @@ cdef class Ramp: t_scale = 1 if t_scale == 0 else t_scale cdef float f0 = 0, f1 = 0, f2 = 0 - # Issue when tbar[] == tbarmid causes exception otherwise with cython.cpow(True): for i in range(n_resultants): @@ -109,7 +156,7 @@ cdef class Ramp: # Casertano+22 Eq. 36 cdef float det = f2 * f0 - f1 ** 2 if det == 0: - return (0.0, 0.0, 0.0) + return fit for i in range(n_resultants): # Casertano+22 Eq. 37 @@ -133,6 +180,27 @@ cdef class Ramp: @cython.wraparound(False) @cython.cdivision(True) cdef inline float[:] stats(Ramp self, float slope, RampIndex ramp): + """ + Compute fit statistics for jump detection on a single ramp + Computed using: + + var_1[i] = ((sigma_1[i] + slope * slope_var_1[i]) / t_bar_1_sq[i]) + var_2[i] = ((sigma_2[i] + slope * slope_var_2[i]) / t_bar_2_sq[i]) + + s_1[i] = (delta_1[i] - slope) / sqrt(var_1[i]) + s_2[i] = (delta_2[i] - slope) / sqrt(var_2[i]) + + stats[i] = max(s_1[i], s_2[i]) + Parameters + ---------- + ramp : RampIndex + Struct for start and end of ramp to fit + + Returns + ------- + list of statistics for each resultant + except for the last 2 due to single/double difference due to indexing + """ cdef np.ndarray[float] delta_1 = np.array(self.delta_1[ramp.start:ramp.end-1]) - slope cdef np.ndarray[float] delta_2 = np.array(self.delta_2[ramp.start:ramp.end-1]) - slope @@ -143,8 +211,8 @@ cdef class Ramp: slope * np.array(self.slope_var_2[ramp.start:ramp.end-1])) / self.fixed.t_bar_2_sq[ramp.start:ramp.end-1]).astype(np.float32) - cdef np.ndarray[float] stats_1 = delta_1 / sqrt(var_1) - cdef np.ndarray[float] stats_2 = delta_2 / sqrt(var_2) + cdef np.ndarray[float] stats_1 = (delta_1 / np.sqrt(var_1, dtype=np.float32)).astype(np.float32) + cdef np.ndarray[float] stats_2 = (delta_2 / np.sqrt(var_2, dtype=np.float32)).astype(np.float32) return np.maximum(stats_1, stats_2) @@ -152,29 +220,64 @@ cdef class Ramp: @cython.wraparound(False) @cython.cdivision(True) cdef inline Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): - cdef Fits fits + """ + Compute all the ramps for a single pixel using the Casertano+22 algorithm + with jump detection. + + Note: This algorithm computes the ramps for the pixel in reverse time order + so that the last uncomputed ramp in time is always on top of the stack. + This means we compute the slopes in reverse time order, so we have to + reverse the order of the output data to be consistent with user + expectations. + Parameters + ---------- + ramps : stack[RampIndex] + Stack of initial ramps to fit for a single pixel + multiple ramps are possible due to dq flags + thresh : Thresh + Thresholds struct passed in for jump detection + + Returns + ------- + Fits struct of all the fits for a single pixel + """ + # Setup algorithm + cdef Fits fits cdef RampIndex ramp cdef Fit fit cdef float [:] stats cdef int split + # Run while the stack is non-empty while not ramps.empty(): + # Remove top ramp of the stack to use ramp = ramps.top() ramps.pop() + + # Compute fit fit = self.fit(ramp) - stats = self.stats(fit.slope, ramp) + + if self.fixed.use_jump: + stats = self.stats(fit.slope, ramp) - if max(stats) > threshold(thresh, fit.slope) and self.fixed.use_jump: - split = np.argmax(stats) + if max(stats) > threshold(thresh, fit.slope): + # Compute split point to create two new ramps + split = np.argmax(stats) + + # add ramps so last ramp in time is on top of stack + ramps.push(RampIndex(ramp.start, ramp.start + split)) + ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) - ramps.push(RampIndex(ramp.start, ramp.start + split)) - ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) - else: - fits.slope.push_back(fit.slope) - fits.read_var.push_back(fit.read_var) - fits.poisson_var.push_back(fit.poisson_var) + # Return to top of loop to fit new ramps (without adding to fits) + continue + # Add fit to fits if no jump detection or stats are less than threshold + fits.slope.push_back(fit.slope) + fits.read_var.push_back(fit.read_var) + fits.poisson_var.push_back(fit.poisson_var) + + # Reverse the slope data return reverse_fits(fits) @@ -196,16 +299,16 @@ cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): Return ------ - ramp : Ramp - Ramp C-class object + Ramp C-class object (with pre-computed values if use_jump is True) """ - cdef Ramp ramp = Ramp() + # Fill in input information for pixel ramp.fixed = fixed ramp.read_noise = read_noise ramp.resultants = resultants + # Pre-compute values for jump detection shared by all ramps for this pixel if fixed.use_jump: ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / np.array(fixed.t_bar_1)).astype(np.float32) ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / np.array(fixed.t_bar_2)).astype(np.float32) From 8be95bbdbaf03b4093d998fcaf70b33396ae898f Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 16:19:35 -0400 Subject: [PATCH 25/90] Fold theshold into an object so it doesn't have to be passed around --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 14 ++-- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 66 +++++++++++++------ .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 3 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 8 ++- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 7 +- src/stcal/ramp_fitting/ols_cas22/_ramp.pxd | 2 +- src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 8 +-- 7 files changed, 72 insertions(+), 36 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 7037f701..db504b7b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -11,11 +11,6 @@ cdef struct RampIndex: int end -cdef struct Thresh: - float intercept - float constant - - cdef struct Fit: float slope float read_var @@ -27,6 +22,13 @@ cdef struct Fits: vector[float] read_var vector[float] poisson_var + +cdef class Thresh: + cdef float intercept + cdef float constant + + cdef float run(Thresh self, float slope) + +cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) -cdef float threshold(Thresh thresh, float slope) cdef Fits reverse_fits(Fits fits) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index b23eb0ae..2188aa18 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -6,9 +6,6 @@ Structs: RampIndex int start: starting index of the ramp in the resultants int end: ending index of the ramp in the resultants - Thresh - float intercept: intercept of the threshold - float constant: constant of the threshold Fit float slope: slope of a single ramp float read_var: read noise variance of a single ramp @@ -18,6 +15,11 @@ Structs: vector[float] read_var: read noise variances of the ramps for a single pixel vector[float] poisson_var: poisson noise variances of the ramps for a single pixel +Objects +------- + Thresh : class + Hold the threshold parameters and compute the threshold + Functions: ---------- get_power @@ -31,7 +33,7 @@ from libc.math cimport log10 import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits, get_power, threshold, reverse_fits +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits, get_power, reverse_fits, make_threshold # Casertano+2022, Table 2 @@ -61,38 +63,60 @@ cdef inline float get_power(float s): return PTABLE[1][i] -cdef inline float threshold(Thresh thresh, float slope): +cdef inline Fits reverse_fits(Fits fits): """ - Compute jump threshold + Reverse a Fits struct + The jump detection step computes the ramps in reverse time order for each pixel. + This reverses the results of the fit to match the original time order, which is + much faster than prepending to a C++ vector. Parameters ---------- - thresh : Thresh - threshold parameters struct: - slope : float - slope of the ramp in question + fits : Fits + fits struct to reverse Returns ------- - intercept - constant * log10(slope) + reversed fits struct """ - return thresh.intercept - thresh.constant * log10(slope) + return Fits(fits.slope[::-1], fits.read_var[::-1], fits.poisson_var[::-1]) -cdef inline Fits reverse_fits(Fits fits): +cdef class Thresh: + cdef inline float run(Thresh self, float slope): + """ + Compute jump threshold + + Parameters + ---------- + slope : float + slope of the ramp in question + + Returns + ------- + intercept - constant * log10(slope) + """ + return self.intercept - self.constant * log10(slope) + + +cdef Thresh make_threshold(float intercept, float constant): """ - Reverse a Fits struct - The jump detection step computes the ramps in reverse time order for each pixel. - This reverses the results of the fit to match the original time order, which is - much faster than prepending to a C++ vector. + Create a Thresh object Parameters ---------- - fits : Fits - fits struct to reverse + intercept : float + intercept of the threshold + constant : float + constant of the threshold Returns ------- - reversed fits struct + Thresh object """ - return Fits(fits.slope[::-1], fits.read_var[::-1], fits.poisson_var[::-1]) + + thresh = Thresh() + thresh.intercept = intercept + thresh.constant = constant + + return thresh diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index d962e5ed..cd358e30 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,7 +3,7 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport Fit, RampIndex +from stcal.ramp_fitting.ols_cas22._core cimport Fit, RampIndex, make_threshold from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp @@ -122,6 +122,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, t_bar.data(), tau.data(), n_reads.data(), + make_threshold(5.5, 1/3.0), use_jumps) cdef int n_pixel = resultants.shape[1] diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index ba3066bd..7e74c118 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -1,10 +1,16 @@ from libcpp cimport bool +from stcal.ramp_fitting.ols_cas22._core cimport Thresh + + cdef class Fixed: cdef public bool use_jump + cdef public float[:] t_bar, tau cdef public int[:] n_reads + cdef Thresh threshold + cdef public float[:] t_bar_1, t_bar_2 cdef public float[:] t_bar_1_sq, t_bar_2_sq cdef public float[:] recip_1, recip_2 @@ -18,4 +24,4 @@ cdef class Fixed: cdef float correction(Fixed self, int i, int j) -cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump) \ No newline at end of file +cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, Thresh threshold, bool use_jump) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index d7ca3008..253e0cba 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -15,6 +15,7 @@ make_fixed : function import numpy as np cimport numpy as np +from stcal.ramp_fitting.ols_cas22._core cimport Thresh from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Fixed: @@ -172,7 +173,7 @@ cdef class Fixed: return slope_var_val -cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool use_jump): +cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, Thresh threshold, bool use_jump): """ Fast constructor for Fixed class Use this instead of an __init__ because it does not incure the overhead of @@ -186,6 +187,8 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool variance weighted mean times of resultants (data input) n_reads : float[:] number of reads contributing to reach resultant (data input) + threshold : Thresh + threshold object (user input) use_jump : bool flag to indicate whether to use jump detection (user input) @@ -197,6 +200,8 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, bool # Fill in input information for all pixels fixed.use_jump = use_jump + fixed.threshold = threshold + fixed.t_bar = t_bar fixed.tau = tau fixed.n_reads = n_reads diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd index 42634417..9f14d9e0 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd @@ -15,7 +15,7 @@ cdef class Ramp: cdef Fit fit(Ramp self, RampIndex ramp) cdef float[:] stats(Ramp self, float slope, RampIndex ramp) - cdef Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh) + cdef Fits fits(Ramp self, stack[RampIndex] ramps) cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) \ No newline at end of file diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index d930a2c2..57b6bea0 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -21,7 +21,7 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport get_power, reverse_fits, threshold, Fit, Fits, RampIndex, Thresh +from stcal.ramp_fitting.ols_cas22._core cimport get_power, reverse_fits, Fit, Fits, RampIndex from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp, Ramp @@ -219,7 +219,7 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline Fits fits(Ramp self, stack[RampIndex] ramps, Thresh thresh): + cdef inline Fits fits(Ramp self, stack[RampIndex] ramps): """ Compute all the ramps for a single pixel using the Casertano+22 algorithm with jump detection. @@ -235,8 +235,6 @@ cdef class Ramp: ramps : stack[RampIndex] Stack of initial ramps to fit for a single pixel multiple ramps are possible due to dq flags - thresh : Thresh - Thresholds struct passed in for jump detection Returns ------- @@ -261,7 +259,7 @@ cdef class Ramp: if self.fixed.use_jump: stats = self.stats(fit.slope, ramp) - if max(stats) > threshold(thresh, fit.slope): + if max(stats) > self.threshold.run(fit.slope): # Compute split point to create two new ramps split = np.argmax(stats) From 56d4f45209b1868923a94590e91eeeda4db14ddc Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 16:22:39 -0400 Subject: [PATCH 26/90] Drop public attributes as the things in question don't have to be accessed from python it self --- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 16 +++++++--------- src/stcal/ramp_fitting/ols_cas22/_ramp.pxd | 8 ++++---- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 7e74c118..9a8cc5b7 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -4,17 +4,15 @@ from stcal.ramp_fitting.ols_cas22._core cimport Thresh cdef class Fixed: - cdef public bool use_jump - - cdef public float[:] t_bar, tau - cdef public int[:] n_reads - + cdef bool use_jump + cdef float[:] t_bar, tau + cdef int[:] n_reads cdef Thresh threshold - cdef public float[:] t_bar_1, t_bar_2 - cdef public float[:] t_bar_1_sq, t_bar_2_sq - cdef public float[:] recip_1, recip_2 - cdef public float[:] slope_var_1, slope_var_2 + cdef float[:] t_bar_1, t_bar_2 + cdef float[:] t_bar_1_sq, t_bar_2_sq + cdef float[:] recip_1, recip_2 + cdef float[:] slope_var_1, slope_var_2 cdef float[:] t_bar_diff(Fixed self, int offset) cdef float[:] t_bar_diff_sq(Fixed self, int offset) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd index 9f14d9e0..ea21f614 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd @@ -5,11 +5,11 @@ from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Ramp: cdef Fixed fixed - cdef public float read_noise - cdef public float [:] resultants + cdef float read_noise + cdef float [:] resultants - cdef public float[:] delta_1, delta_2 - cdef public float[:] sigma_1, sigma_2 + cdef float[:] delta_1, delta_2 + cdef float[:] sigma_1, sigma_2 cdef float[:] resultants_diff(Ramp self, int offset) cdef Fit fit(Ramp self, RampIndex ramp) From 7adc4b530fb80d0443414a5410c6076edabc2397 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 19:49:28 -0400 Subject: [PATCH 27/90] Fix cython lint issues --- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 8 +-- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 14 ++--- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 3 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 22 +++++--- src/stcal/ramp_fitting/ols_cas22/_ramp.pxd | 4 +- src/stcal/ramp_fitting/ols_cas22/_ramp.pyx | 54 +++++++++++-------- 6 files changed, 64 insertions(+), 41 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 2188aa18..08ce7bbb 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -12,8 +12,10 @@ Structs: float poisson_var: poisson noise variance of single ramp Fits vector[float] slope: slopes of the ramps for a single pixel - vector[float] read_var: read noise variances of the ramps for a single pixel - vector[float] poisson_var: poisson noise variances of the ramps for a single pixel + vector[float] read_var: read noise variances of the ramps for a single + pixel + vector[float] poisson_var: poisson noise variances of the ramps for a + single pixel Objects ------- @@ -33,7 +35,7 @@ from libc.math cimport log10 import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, Thresh, Fit, Fits, get_power, reverse_fits, make_threshold +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, Fits # Casertano+2022, Table 2 diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index cd358e30..3b32f6bf 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -10,7 +10,9 @@ from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp @cython.boundscheck(False) @cython.wraparound(False) -cdef inline (vector[int], vector[float], vector[float]) read_ma_table(list[list[int]] ma_table, float read_time): +cdef inline (vector[int], vector[float], vector[float]) read_ma_table(list[list[int]] + ma_table, + float read_time): cdef vector[int] n_reads = vector[int](len(ma_table)) cdef vector[float] t_bar = vector[float](len(ma_table)) @@ -19,12 +21,12 @@ cdef inline (vector[int], vector[float], vector[float]) read_ma_table(list[list[ for index, entry in enumerate(ma_table): n_reads[index] = entry[1] t_bar[index] = read_time *(entry[0] + (entry[1] - 1) / 2.0) - tau[index] = t_bar[index] - (entry[1] - 1) * (entry[1] + 1) * read_time / (6 * entry[1]) + tau[index] = t_bar[index] - (entry[1] - 1) * ((entry[1] + 1) * read_time / + (6 * entry[1])) return n_reads, t_bar, tau - @cython.boundscheck(False) @cython.wraparound(False) cdef inline (vector[int], vector[int], vector[int]) end_points(int n_ramp, @@ -139,9 +141,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, start, end, pix = end_points(n_ramp, n_pixel, n_resultants, dq) for i in range(n_ramp): - fit = make_ramp( - fixed, read_noise[pix[i]], resultants[:, pix[i]]).fit(RampIndex(start[i], end[i])) - + fit = make_ramp(fixed, read_noise[pix[i]], resultants[:, pix[i]] + ).fit(RampIndex(start[i], end[i])) + slope[i] = fit.slope slope_read_var[i] = fit.read_var slope_poisson_var[i] = fit.poisson_var diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 9a8cc5b7..0d5c2543 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -22,4 +22,5 @@ cdef class Fixed: cdef float correction(Fixed self, int i, int j) -cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, Thresh threshold, bool use_jump) \ No newline at end of file +cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, + Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 253e0cba..a8cdc3f0 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -1,5 +1,6 @@ """ -Define the data which is fixed for all pixels to compute the CAS22 algorithm with jump detection +Define the data which is fixed for all pixels to compute the CAS22 algorithm with + jump detection Objects ------- @@ -119,7 +120,6 @@ cdef class Fixed: (1/n_reads[i+offset] + 1/n_reads[i]) """ cdef int n_diff = len(self.t_bar) - offset - cdef float[:] recip = ((1 / np.roll(self.n_reads, -offset)).astype(np.float32) + (1 / np.array(self.n_reads)).astype(np.float32))[:n_diff] @@ -157,7 +157,8 @@ cdef class Fixed: Returns ------- - (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * correction(i, i+offset) + (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * + correction(i, i+offset) """ cdef int n_diff = len(self.t_bar) - offset @@ -167,13 +168,20 @@ cdef class Fixed: for i in range(n_diff): f_corr[i] = self.correction(i, i + offset) - cdef float[:] slope_var_val = ((self.tau + np.roll(self.tau, -offset) - - np.minimum(self.t_bar, np.roll(self.t_bar, -offset)))[:n_diff]) * f_corr + # Compute rolls to the correct shapes + cdef t_bar_1 = np.array(self.t_bar, dtype=np.float32)[:n_diff] + cdef t_bar_2 = np.array(np.roll(self.t_bar, -offset), dtype=np.float32)[:n_diff] + cdef tau_1 = np.array(self.tau, dtype=np.float32)[:n_diff] + cdef tau_2 = np.array(np.roll(self.tau, -offset), dtype=np.float32)[:n_diff] + + cdef float[:] slope_var_val = (tau_1 + tau_2 - np.minimum(t_bar_1, t_bar_2) + ) * f_corr return slope_var_val -cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, Thresh threshold, bool use_jump): +cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, + Thresh threshold, bool use_jump): """ Fast constructor for Fixed class Use this instead of an __init__ because it does not incure the overhead of @@ -220,4 +228,4 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, Thres fixed.slope_var_1 = fixed.slope_var_val(1) fixed.slope_var_2 = fixed.slope_var_val(2) - return fixed \ No newline at end of file + return fixed diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd index ea21f614..fe577d6f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd @@ -8,7 +8,7 @@ cdef class Ramp: cdef float read_noise cdef float [:] resultants - cdef float[:] delta_1, delta_2 + cdef float[:] delta_1, delta_2 cdef float[:] sigma_1, sigma_2 cdef float[:] resultants_diff(Ramp self, int offset) @@ -18,4 +18,4 @@ cdef class Ramp: cdef Fits fits(Ramp self, stack[RampIndex] ramps) -cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) \ No newline at end of file +cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx index 57b6bea0..04582c05 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx @@ -12,7 +12,7 @@ Functions make_ramp : function Fast constructor for the Ramp class """ -from libc.math cimport sqrt, fabs, log10 +from libc.math cimport sqrt, fabs from libcpp.vector cimport vector from libcpp.stack cimport stack @@ -21,8 +21,9 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport get_power, reverse_fits, Fit, Fits, RampIndex -from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp, Ramp +from stcal.ramp_fitting.ols_cas22._core cimport ( + get_power, reverse_fits, Fit, Fits, RampIndex) +from stcal.ramp_fitting.ols_cas22._ramp cimport Ramp cdef class Ramp: @@ -31,7 +32,7 @@ cdef class Ramp: This data is drawn from for all ramps for a single pixel. This class pre-computes jump detection values shared by all ramps for a given pixel. - + Parameters ---------- fixed : Fixed @@ -198,21 +199,28 @@ cdef class Ramp: Returns ------- - list of statistics for each resultant + list of statistics for each resultant except for the last 2 due to single/double difference due to indexing """ - cdef np.ndarray[float] delta_1 = np.array(self.delta_1[ramp.start:ramp.end-1]) - slope - cdef np.ndarray[float] delta_2 = np.array(self.delta_2[ramp.start:ramp.end-1]) - slope - - cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[ramp.start:ramp.end-1]) + - slope * np.array(self.slope_var_1[ramp.start:ramp.end-1])) / - self.fixed.t_bar_1_sq[ramp.start:ramp.end-1]).astype(np.float32) - cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[ramp.start:ramp.end-1]) + - slope * np.array(self.slope_var_2[ramp.start:ramp.end-1])) / - self.fixed.t_bar_2_sq[ramp.start:ramp.end-1]).astype(np.float32) - - cdef np.ndarray[float] stats_1 = (delta_1 / np.sqrt(var_1, dtype=np.float32)).astype(np.float32) - cdef np.ndarray[float] stats_2 = (delta_2 / np.sqrt(var_2, dtype=np.float32)).astype(np.float32) + cdef int start = ramp.start + cdef int end = ramp.end - 1 + + cdef np.ndarray[float] delta_1 = np.array(self.delta_1[start:end]) - slope + cdef np.ndarray[float] delta_2 = np.array(self.delta_2[start:end]) - slope + + cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[start:end]) + slope * + np.array(self.slope_var_1[start:end])) / + self.fixed.t_bar_1_sq[start:end] + ).astype(np.float32) + cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[start:end]) + slope * + np.array(self.slope_var_2[start:end])) / + self.fixed.t_bar_2_sq[start:end] + ).astype(np.float32) + + cdef np.ndarray[float] stats_1 = (delta_1 / np.sqrt(var_1, dtype=np.float32) + ).astype(np.float32) + cdef np.ndarray[float] stats_2 = (delta_2 / np.sqrt(var_2, dtype=np.float32) + ).astype(np.float32) return np.maximum(stats_1, stats_2) @@ -235,7 +243,7 @@ cdef class Ramp: ramps : stack[RampIndex] Stack of initial ramps to fit for a single pixel multiple ramps are possible due to dq flags - + Returns ------- Fits struct of all the fits for a single pixel @@ -258,7 +266,7 @@ cdef class Ramp: if self.fixed.use_jump: stats = self.stats(fit.slope, ramp) - + if max(stats) > self.threshold.run(fit.slope): # Compute split point to create two new ramps split = np.argmax(stats) @@ -308,10 +316,12 @@ cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): # Pre-compute values for jump detection shared by all ramps for this pixel if fixed.use_jump: - ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / np.array(fixed.t_bar_1)).astype(np.float32) - ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / np.array(fixed.t_bar_2)).astype(np.float32) + ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / + np.array(fixed.t_bar_1)).astype(np.float32) + ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / + np.array(fixed.t_bar_2)).astype(np.float32) ramp.sigma_1 = read_noise * np.array(fixed.recip_1) ramp.sigma_2 = read_noise * np.array(fixed.recip_2) - return ramp \ No newline at end of file + return ramp From 89d8332a8544f0a5f3cdb2bf81989ddda8c4b67a Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 20:31:11 -0400 Subject: [PATCH 28/90] Add initial ramps creator --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 1 + src/stcal/ramp_fitting/ols_cas22/_core.pyx | 68 +++++++++++++++++++ .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 4 +- 3 files changed, 71 insertions(+), 2 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index db504b7b..0e1cb426 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -32,3 +32,4 @@ cdef class Thresh: cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) cdef Fits reverse_fits(Fits fits) +cdef vector[stack[RampIndex]] init_ramps(int[:, :] dq) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 08ce7bbb..f85bb4d9 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -122,3 +122,71 @@ cdef Thresh make_threshold(float intercept, float constant): thresh.constant = constant return thresh + + +cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): + """ + Create the initial ramp stack for each pixel + if dq[index_resultant, index_pixel] == 0, then the resultant is in a ramp + otherwise, the resultant is not in a ramp + + Parameters + ---------- + dq : int[n_resultants, n_pixel] + DQ array + + Returns + ------- + Vector of stacks of RampIndex objects + - Vector with entry for each pixel + - Stack with entry for each ramp found (top of stack is last ramp found) + - RampIndex with start and end indices of the ramp in the resultants + """ + cdef int n_pixel, n_resultants + + n_resultants, n_pixel = np.array(dq).shape + cdef vector[stack[RampIndex]] pixel_ramps = vector[stack[RampIndex]](n_pixel) + + cdef int index_resultant, index_pixel + cdef stack[RampIndex] ramps + cdef RampIndex ramp + + for index_pixel in range(n_pixel): + ramps = stack[RampIndex]() + + # Note: if start/end are -1, then no value has been assigned + # ramp.start == -1 means we have not started a ramp + # dq[index_resultant, index_pixel] == 0 means resultant is in ramp + ramp = RampIndex(-1, -1) + for index_resultant in range(n_resultants): + if ramp.start == -1: + # Looking for the start of a ramp + if dq[index_resultant, index_pixel] == 0: + # We have found the start of a ramp! + ramp.start = index_resultant + else: + # This is not the start of the ramp yet + continue + else: + # Looking for the end of a ramp + if dq[index_resultant, index_pixel] == 0: + # This pixel is in the ramp do nothing + continue + else: + # This pixel is not in the ramp => index_resultant - 1 is the end of the ramp + ramp.end = index_resultant - 1 + + # Add completed ramp to stack and reset ramp + ramps.push(ramp) + ramp = RampIndex(-1, -1) + + # Handle case where last resultant is in ramp (so no end has been set) + if ramp.start != -1 and ramp.end == -1: + # Last resultant is end of the ramp => set then add to stack + ramp.end = n_resultants - 1 + ramps.push(ramp) + + # Add ramp stack for pixel to vector + pixel_ramps.push_back(ramps) + + return pixel_ramps diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 3b32f6bf..6fb8a57b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -82,9 +82,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, Parameters ---------- - resultants : np.ndarry[nresultants, npixel] + resultants : np.ndarry[n_resultants, n_pixel] the resultants in electrons - dq : np.ndarry[nresultants, npixel] + dq : np.ndarry[n_resultants, n_pixel] the dq array. dq != 0 implies bad pixel / CR. read noise : float the read noise in electrons From b5bede360283798d6b91e9eccd707a277ea22330 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 24 Aug 2023 21:10:05 -0400 Subject: [PATCH 29/90] Add MA table reader --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 7 +++ src/stcal/ramp_fitting/ols_cas22/_core.pyx | 53 +++++++++++++++++-- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 35 ++---------- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 5 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 19 +++---- 5 files changed, 71 insertions(+), 48 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 0e1cb426..bbdff550 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -23,6 +23,12 @@ cdef struct Fits: vector[float] poisson_var +cdef struct DerivedData: + vector[float] t_bar + vector[float] tau + vector[int] n_reads + + cdef class Thresh: cdef float intercept cdef float constant @@ -33,3 +39,4 @@ cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) cdef Fits reverse_fits(Fits fits) cdef vector[stack[RampIndex]] init_ramps(int[:, :] dq) +cdef DerivedData read_data(list[list[int]] ma_table, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index f85bb4d9..9518edf8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -16,6 +16,10 @@ Structs: pixel vector[float] poisson_var: poisson noise variances of the ramps for a single pixel + DerivedData + vector[float] t_bar: mean time of each resultant + vector[float] tau: variance time of each resultant + vector[int] n_reads: number of reads in each resultant Objects ------- @@ -30,12 +34,16 @@ Functions: Compute jump threshold reverse_fits Reverse a Fits struct + init_ramps + Find initial ramps for each pixel + read_ma_table + Read the MA table and Derive the necessary data from it """ from libc.math cimport log10 import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, Fits +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, Fits, DerivedData # Casertano+2022, Table 2 @@ -159,7 +167,7 @@ cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): # dq[index_resultant, index_pixel] == 0 means resultant is in ramp ramp = RampIndex(-1, -1) for index_resultant in range(n_resultants): - if ramp.start == -1: + if ramp.start == -1: # Looking for the start of a ramp if dq[index_resultant, index_pixel] == 0: # We have found the start of a ramp! @@ -173,7 +181,8 @@ cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): # This pixel is in the ramp do nothing continue else: - # This pixel is not in the ramp => index_resultant - 1 is the end of the ramp + # This pixel is not in the ramp + # => index_resultant - 1 is the end of the ramp ramp.end = index_resultant - 1 # Add completed ramp to stack and reset ramp @@ -190,3 +199,41 @@ cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): pixel_ramps.push_back(ramps) return pixel_ramps + + +cdef inline DerivedData read_data(list[list[int]] ma_table, float read_time): + """ + Derive the input data from the MA table + + Note the MA table is a list of pairs of ints for each resultant: + (first read index, number of reads in resultant) + + Parameters + ---------- + ma_table : list[list[int]] + MA table + read_time : float + Time to perform a readout. + + Returns + ------- + DerivedData struct: + vector[float] t_bar: mean time of each resultant + vector[float] tau: variance time of each resultant + vector[int] n_reads: number of reads in each resultant + """ + cdef int n_resultants = len(ma_table) + cdef DerivedData data = DerivedData(vector[float](n_resultants), + vector[float](n_resultants), + vector[int](n_resultants)) + + cdef int index + cdef list[int] entry + for index, entry in enumerate(ma_table): + data.n_reads[index] = entry[1] + data.t_bar[index] = read_time *(entry[0] + (entry[1] - 1) / 2.0) + data.tau[index] = data.t_bar[index] - (entry[1] - 1) * ((entry[1] + 1) * + read_time / + (6 * entry[1])) + + return data diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 6fb8a57b..6e6e55e5 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,30 +3,12 @@ cimport numpy as np from libcpp.vector cimport vector cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport Fit, RampIndex, make_threshold +from stcal.ramp_fitting.ols_cas22._core cimport ( + Fit, RampIndex, make_threshold, read_data) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline (vector[int], vector[float], vector[float]) read_ma_table(list[list[int]] - ma_table, - float read_time): - - cdef vector[int] n_reads = vector[int](len(ma_table)) - cdef vector[float] t_bar = vector[float](len(ma_table)) - cdef vector[float] tau = vector[float](len(ma_table)) - - for index, entry in enumerate(ma_table): - n_reads[index] = entry[1] - t_bar[index] = read_time *(entry[0] + (entry[1] - 1) / 2.0) - tau[index] = t_bar[index] - (entry[1] - 1) * ((entry[1] + 1) * read_time / - (6 * entry[1])) - - return n_reads, t_bar, tau - - @cython.boundscheck(False) @cython.wraparound(False) cdef inline (vector[int], vector[int], vector[int]) end_points(int n_ramp, @@ -116,16 +98,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, raise RuntimeError(f'MA table length {n_resultants} does not ' f'match number of resultants {resultants.shape[0]}') - cdef vector[int] n_reads - cdef vector[float] t_bar, tau - n_reads, t_bar, tau = read_ma_table(ma_table, read_time) - - cdef Fixed fixed = make_fixed( - t_bar.data(), - tau.data(), - n_reads.data(), - make_threshold(5.5, 1/3.0), - use_jumps) + cdef Fixed fixed = make_fixed(read_data(ma_table, read_time), + make_threshold(5.5, 1/3.0), + use_jumps) cdef int n_pixel = resultants.shape[1] cdef int n_ramp = (np.sum(dq[0, :] == 0) + diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 0d5c2543..eb194ead 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -1,6 +1,6 @@ from libcpp cimport bool -from stcal.ramp_fitting.ols_cas22._core cimport Thresh +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData cdef class Fixed: @@ -22,5 +22,4 @@ cdef class Fixed: cdef float correction(Fixed self, int i, int j) -cdef Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, - Thresh threshold, bool use_jump) +cdef Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index a8cdc3f0..002799d8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -16,7 +16,7 @@ make_fixed : function import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport Thresh +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Fixed: @@ -180,8 +180,7 @@ cdef class Fixed: return slope_var_val -cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, - Thresh threshold, bool use_jump): +cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): """ Fast constructor for Fixed class Use this instead of an __init__ because it does not incure the overhead of @@ -189,12 +188,8 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, Parameters ---------- - t_bar : float[:] - mean times of resultants (data input) - tau : float[:] - variance weighted mean times of resultants (data input) - n_reads : float[:] - number of reads contributing to reach resultant (data input) + data : DerivedData + derived data object created from MA table (input data) threshold : Thresh threshold object (user input) use_jump : bool @@ -210,9 +205,9 @@ cdef inline Fixed make_fixed(float[:] t_bar, float[:] tau, int[:] n_reads, fixed.use_jump = use_jump fixed.threshold = threshold - fixed.t_bar = t_bar - fixed.tau = tau - fixed.n_reads = n_reads + fixed.t_bar = data.t_bar.data() + fixed.tau = data.tau.data() + fixed.n_reads = data.n_reads.data() # Pre-compute jump detection computations shared by all pixels if use_jump: From ade66d6b5c5fb37c38292f90f296c986e1b6b1b5 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 10:20:27 -0400 Subject: [PATCH 30/90] Swap over to using "jump" for all the ramps --- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 93 ++++++------------- 1 file changed, 30 insertions(+), 63 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 6e6e55e5..214dab1f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -1,52 +1,15 @@ import numpy as np cimport numpy as np from libcpp.vector cimport vector +from libcpp.stack cimport stack cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( - Fit, RampIndex, make_threshold, read_data) + Fits, RampIndex, make_threshold, read_data, init_ramps) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp -@cython.boundscheck(False) -@cython.wraparound(False) -cdef inline (vector[int], vector[int], vector[int]) end_points(int n_ramp, - int n_pixel, - int n_resultants, - int[:, :] dq): - - cdef vector[int] start = vector[int](n_ramp, -1) - cdef vector[int] end = vector[int](n_ramp, -1) - cdef vector[int] pix = vector[int](n_ramp, -1) - - cdef int i, j - cdef int in_ramp = -1 - cdef int ramp_num = 0 - for i in range(n_pixel): - in_ramp = 0 - for j in range(n_resultants): - if (not in_ramp) and (dq[j, i] == 0): - in_ramp = 1 - pix[ramp_num] = i - start[ramp_num] = j - elif (not in_ramp) and (dq[j, i] != 0): - continue - elif in_ramp and (dq[j, i] == 0): - continue - elif in_ramp and (dq[j, i] != 0): - in_ramp = 0 - end[ramp_num] = j - 1 - ramp_num += 1 - else: - raise ValueError('unhandled case') - if in_ramp: - end[ramp_num] = j - ramp_num += 1 - - return start, end, pix - - @cython.boundscheck(False) @cython.wraparound(False) def fit_ramps(np.ndarray[float, ndim=2] resultants, @@ -98,31 +61,35 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, raise RuntimeError(f'MA table length {n_resultants} does not ' f'match number of resultants {resultants.shape[0]}') + # Pre-compute data for all pixels cdef Fixed fixed = make_fixed(read_data(ma_table, read_time), make_threshold(5.5, 1/3.0), use_jumps) - cdef int n_pixel = resultants.shape[1] - cdef int n_ramp = (np.sum(dq[0, :] == 0) + - np.sum((dq[:-1, :] != 0) & (dq[1:, :] == 0))) - - # numpy arrays so that we get numpy arrays out - cdef np.ndarray[float] slope = np.zeros(n_ramp, dtype=np.float32) - cdef np.ndarray[float] slope_read_var = np.zeros(n_ramp, dtype=np.float32) - cdef np.ndarray[float] slope_poisson_var = np.zeros(n_ramp, dtype=np.float32) - cdef Fit fit - - cdef vector[int] start, end, pix - start, end, pix = end_points(n_ramp, n_pixel, n_resultants, dq) - - for i in range(n_ramp): - fit = make_ramp(fixed, read_noise[pix[i]], resultants[:, pix[i]] - ).fit(RampIndex(start[i], end[i])) - - slope[i] = fit.slope - slope_read_var[i] = fit.read_var - slope_poisson_var[i] = fit.poisson_var - - return dict(slope=slope, slopereadvar=slope_read_var, - slopepoissonvar=slope_poisson_var, - pix=pix, resstart=start, resend=end) + # Compute all the initial sets of ramps + cdef vector[stack[RampIndex]] pixel_ramps = init_ramps(dq) + + # Set up the output lists + # Thes are python lists because cython does not support templating + # types baised on Python types like what numpy arrays are. + # This is an annoying limitation. + slopes = [] + read_vars = [] + poisson_vars = [] + + # Perform all of the fits + cdef Fits fits + cdef int index + for index in range(n_resultants): + # Fit all the ramps for the given pixel + fits = make_ramp(fixed, read_noise, + resultants[:, index]).fits(pixel_ramps[index]) + + # Cast into numpy arrays for output + slopes.append(np.array( fits.slope.data())) + read_vars.append(np.array( fits.read_var.data())) + poisson_vars.append(np.array( + fits.poisson_var.data())) + + return dict(slope=slopes, slopereadvar=read_vars, + slopepoissonvar=poisson_vars) From 9d9fa15a098761618a8eb32684432ea7b4cf0b20 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 10:47:26 -0400 Subject: [PATCH 31/90] Move from Ramp to Pixel This is a better description since it handles the fit for all ramps for a pixel --- setup.py | 4 +- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 6 +-- .../ols_cas22/{_ramp.pxd => _pixel.pxd} | 12 ++--- .../ols_cas22/{_ramp.pyx => _pixel.pyx} | 46 +++++++++---------- 4 files changed, 34 insertions(+), 34 deletions(-) rename src/stcal/ramp_fitting/ols_cas22/{_ramp.pxd => _pixel.pxd} (50%) rename src/stcal/ramp_fitting/ols_cas22/{_ramp.pyx => _pixel.pyx} (89%) diff --git a/setup.py b/setup.py index 38937221..b34e7dfa 100644 --- a/setup.py +++ b/setup.py @@ -20,8 +20,8 @@ language='c++' ), Extension( - 'stcal.ramp_fitting.ols_cas22._ramp', - ['src/stcal/ramp_fitting/ols_cas22/_ramp.pyx'], + 'stcal.ramp_fitting.ols_cas22._pixel', + ['src/stcal/ramp_fitting/ols_cas22/_pixel.pyx'], include_dirs=[np.get_include()], language='c++' ), diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 214dab1f..8b96853d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -7,7 +7,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( Fits, RampIndex, make_threshold, read_data, init_ramps) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed -from stcal.ramp_fitting.ols_cas22._ramp cimport make_ramp +from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @cython.boundscheck(False) @@ -82,8 +82,8 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef int index for index in range(n_resultants): # Fit all the ramps for the given pixel - fits = make_ramp(fixed, read_noise, - resultants[:, index]).fits(pixel_ramps[index]) + fits = make_pixel(fixed, read_noise, + resultants[:, index]).fits(pixel_ramps[index]) # Cast into numpy arrays for output slopes.append(np.array( fits.slope.data())) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd similarity index 50% rename from src/stcal/ramp_fitting/ols_cas22/_ramp.pxd rename to src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index fe577d6f..c222aa20 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -3,7 +3,7 @@ from libcpp.stack cimport stack from stcal.ramp_fitting.ols_cas22._core cimport Fit, Fits, RampIndex, Thresh from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed -cdef class Ramp: +cdef class Pixel: cdef Fixed fixed cdef float read_noise cdef float [:] resultants @@ -11,11 +11,11 @@ cdef class Ramp: cdef float[:] delta_1, delta_2 cdef float[:] sigma_1, sigma_2 - cdef float[:] resultants_diff(Ramp self, int offset) - cdef Fit fit(Ramp self, RampIndex ramp) + cdef float[:] resultants_diff(Pixel self, int offset) + cdef Fit fit(Pixel self, RampIndex ramp) - cdef float[:] stats(Ramp self, float slope, RampIndex ramp) - cdef Fits fits(Ramp self, stack[RampIndex] ramps) + cdef float[:] stats(Pixel self, float slope, RampIndex ramp) + cdef Fits fits(Pixel self, stack[RampIndex] ramps) -cdef Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants) +cdef Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx similarity index 89% rename from src/stcal/ramp_fitting/ols_cas22/_ramp.pyx rename to src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 04582c05..1e916901 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_ramp.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -3,14 +3,14 @@ Define the C class for the CAS22 algorithm for fitting ramps with jump detection Objects ------- -Ramp : class +Pixel : class Class to handle ramp fit with jump detection for a single pixel Provides fits method which fits all the ramps for a single pixel Functions --------- make_ramp : function - Fast constructor for the Ramp class + Fast constructor for the Pixel class """ from libc.math cimport sqrt, fabs from libcpp.vector cimport vector @@ -23,10 +23,10 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( get_power, reverse_fits, Fit, Fits, RampIndex) -from stcal.ramp_fitting.ols_cas22._ramp cimport Ramp +from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel -cdef class Ramp: +cdef class Pixel: """ Class to contain the data to fit ramps for a single pixel. This data is drawn from for all ramps for a single pixel. @@ -74,7 +74,7 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:] resultants_diff(Ramp self, int offset): + cdef inline float[:] resultants_diff(Pixel self, int offset): """ Compute the difference offset of resultants @@ -94,7 +94,7 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline Fit fit(Ramp self, RampIndex ramp): + cdef inline Fit fit(Pixel self, RampIndex ramp): """ Fit a single ramp using Casertano+22 algorithm. @@ -180,7 +180,7 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline float[:] stats(Ramp self, float slope, RampIndex ramp): + cdef inline float[:] stats(Pixel self, float slope, RampIndex ramp): """ Compute fit statistics for jump detection on a single ramp Computed using: @@ -227,7 +227,7 @@ cdef class Ramp: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline Fits fits(Ramp self, stack[RampIndex] ramps): + cdef inline Fits fits(Pixel self, stack[RampIndex] ramps): """ Compute all the ramps for a single pixel using the Casertano+22 algorithm with jump detection. @@ -287,9 +287,9 @@ cdef class Ramp: return reverse_fits(fits) -cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): +cdef inline Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants): """ - Fast constructor for the Ramp C class. + Fast constructor for the Pixel C class. This is signifantly faster than using the `__init__` or `__cinit__` this is because this does not have to pass through the Python as part @@ -305,23 +305,23 @@ cdef inline Ramp make_ramp(Fixed fixed, float read_noise, float [:] resultants): Return ------ - Ramp C-class object (with pre-computed values if use_jump is True) + Pixel C-class object (with pre-computed values if use_jump is True) """ - cdef Ramp ramp = Ramp() + cdef Pixel pixel = Pixel() # Fill in input information for pixel - ramp.fixed = fixed - ramp.read_noise = read_noise - ramp.resultants = resultants + pixel.fixed = fixed + pixel.read_noise = read_noise + pixel.resultants = resultants - # Pre-compute values for jump detection shared by all ramps for this pixel + # Pre-compute values for jump detection shared by all pixels for this pixel if fixed.use_jump: - ramp.delta_1 = (np.array(ramp.resultants_diff(1)) / - np.array(fixed.t_bar_1)).astype(np.float32) - ramp.delta_2 = (np.array(ramp.resultants_diff(2)) / - np.array(fixed.t_bar_2)).astype(np.float32) + pixel.delta_1 = (np.array(pixel.resultants_diff(1)) / + np.array(fixed.t_bar_1)).astype(np.float32) + pixel.delta_2 = (np.array(pixel.resultants_diff(2)) / + np.array(fixed.t_bar_2)).astype(np.float32) - ramp.sigma_1 = read_noise * np.array(fixed.recip_1) - ramp.sigma_2 = read_noise * np.array(fixed.recip_2) + pixel.sigma_1 = read_noise * np.array(fixed.recip_1) + pixel.sigma_2 = read_noise * np.array(fixed.recip_2) - return ramp + return pixel From 0e064c887bfba5e598e71dee25f32fd8b10a4de3 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 11:07:37 -0400 Subject: [PATCH 32/90] "fit" to "ramp_fit" "fit" was a bit to vague explicitly calling it ramp_fit makes it easier to follow --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 6 +-- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 16 ++++--- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 18 ++++---- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 6 +-- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 45 ++++++++++--------- 5 files changed, 48 insertions(+), 43 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index bbdff550..8377193e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -11,13 +11,13 @@ cdef struct RampIndex: int end -cdef struct Fit: +cdef struct RampFit: float slope float read_var float poisson_var -cdef struct Fits: +cdef struct RampFits: vector[float] slope vector[float] read_var vector[float] poisson_var @@ -37,6 +37,6 @@ cdef class Thresh: cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) -cdef Fits reverse_fits(Fits fits) +cdef RampFits reverse_fits(RampFits ramp_fits) cdef vector[stack[RampIndex]] init_ramps(int[:, :] dq) cdef DerivedData read_data(list[list[int]] ma_table, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 9518edf8..61922ade 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -6,11 +6,11 @@ Structs: RampIndex int start: starting index of the ramp in the resultants int end: ending index of the ramp in the resultants - Fit + RampFit float slope: slope of a single ramp float read_var: read noise variance of a single ramp float poisson_var: poisson noise variance of single ramp - Fits + RampFits vector[float] slope: slopes of the ramps for a single pixel vector[float] read_var: read noise variances of the ramps for a single pixel @@ -43,7 +43,7 @@ from libc.math cimport log10 import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, Fits, DerivedData +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, RampFits, DerivedData # Casertano+2022, Table 2 @@ -73,23 +73,25 @@ cdef inline float get_power(float s): return PTABLE[1][i] -cdef inline Fits reverse_fits(Fits fits): +cdef inline RampFits reverse_fits(RampFits ramp_fits): """ - Reverse a Fits struct + Reverse a RampFits struct The jump detection step computes the ramps in reverse time order for each pixel. This reverses the results of the fit to match the original time order, which is much faster than prepending to a C++ vector. Parameters ---------- - fits : Fits + ramp_fits : RampFits fits struct to reverse Returns ------- reversed fits struct """ - return Fits(fits.slope[::-1], fits.read_var[::-1], fits.poisson_var[::-1]) + return RampFits(ramp_fits.slope[::-1], + ramp_fits.read_var[::-1], + ramp_fits.poisson_var[::-1]) cdef class Thresh: diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 8b96853d..a209b679 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -5,7 +5,7 @@ from libcpp.stack cimport stack cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( - Fits, RampIndex, make_threshold, read_data, init_ramps) + RampFits, RampIndex, make_threshold, read_data, init_ramps) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @@ -78,18 +78,20 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, poisson_vars = [] # Perform all of the fits - cdef Fits fits + cdef RampFits ramp_fits cdef int index for index in range(n_resultants): # Fit all the ramps for the given pixel - fits = make_pixel(fixed, read_noise, - resultants[:, index]).fits(pixel_ramps[index]) + ramp_fits = make_pixel(fixed, read_noise, + resultants[:, index]).fit_ramps(pixel_ramps[index]) # Cast into numpy arrays for output - slopes.append(np.array( fits.slope.data())) - read_vars.append(np.array( fits.read_var.data())) - poisson_vars.append(np.array( - fits.poisson_var.data())) + slopes.append(np.array( + ramp_fits.slope.data())) + read_vars.append(np.array( + ramp_fits.read_var.data())) + poisson_vars.append(np.array( + ramp_fits.poisson_var.data())) return dict(slope=slopes, slopereadvar=read_vars, slopepoissonvar=poisson_vars) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index c222aa20..1189f8c1 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -1,6 +1,6 @@ from libcpp.stack cimport stack -from stcal.ramp_fitting.ols_cas22._core cimport Fit, Fits, RampIndex, Thresh +from stcal.ramp_fitting.ols_cas22._core cimport RampFit, RampFits, RampIndex, Thresh from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Pixel: @@ -12,10 +12,10 @@ cdef class Pixel: cdef float[:] sigma_1, sigma_2 cdef float[:] resultants_diff(Pixel self, int offset) - cdef Fit fit(Pixel self, RampIndex ramp) + cdef RampFit fit_ramp(Pixel self, RampIndex ramp) cdef float[:] stats(Pixel self, float slope, RampIndex ramp) - cdef Fits fits(Pixel self, stack[RampIndex] ramps) + cdef RampFits fit_ramps(Pixel self, stack[RampIndex] ramps) cdef Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 1e916901..3c6d0469 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -22,7 +22,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( - get_power, reverse_fits, Fit, Fits, RampIndex) + get_power, reverse_fits, RampFit, RampFits, RampIndex) from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel @@ -94,7 +94,7 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline Fit fit(Pixel self, RampIndex ramp): + cdef inline RampFit fit_ramp(Pixel self, RampIndex ramp): """ Fit a single ramp using Casertano+22 algorithm. @@ -105,14 +105,14 @@ cdef class Pixel: Returns ------- - Fit struct of slope, read_var, poisson_var + RampFit struct of slope, read_var, poisson_var """ cdef int n_resultants = ramp.end - ramp.start + 1 - cdef Fit fit = Fit(0, 0, 0) + cdef RampFit ramp_fit = RampFit(0, 0, 0) # Special case where there is no or one resultant, there is no fit. if n_resultants <= 1: - return fit + return ramp_fit # Else, do the fitting. # Setup data for fitting (work over subset of data) @@ -157,7 +157,7 @@ cdef class Pixel: # Casertano+22 Eq. 36 cdef float det = f2 * f0 - f1 ** 2 if det == 0: - return fit + return ramp_fit for i in range(n_resultants): # Casertano+22 Eq. 37 @@ -165,17 +165,18 @@ cdef class Pixel: for i in range(n_resultants): # Casertano+22 Eq. 38 - fit.slope += coeffs[i] * resultants[i] + ramp_fit.slope += coeffs[i] * resultants[i] # Casertano+22 Eq. 39 - fit.read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / n_reads[i]) + ramp_fit.read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / + n_reads[i]) # Casertano+22 Eq 40 - fit.poisson_var += coeffs[i] ** 2 * tau[i] + ramp_fit.poisson_var += coeffs[i] ** 2 * tau[i] for j in range(i + 1, n_resultants): - fit.poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) + ramp_fit.poisson_var += (2 * coeffs[i] * coeffs[j] * t_bar[i]) - return fit + return ramp_fit @cython.boundscheck(False) @cython.wraparound(False) @@ -227,7 +228,7 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef inline Fits fits(Pixel self, stack[RampIndex] ramps): + cdef inline RampFits fit_ramps(Pixel self, stack[RampIndex] ramps): """ Compute all the ramps for a single pixel using the Casertano+22 algorithm with jump detection. @@ -246,12 +247,12 @@ cdef class Pixel: Returns ------- - Fits struct of all the fits for a single pixel + RampFits struct of all the fits for a single pixel """ # Setup algorithm - cdef Fits fits + cdef RampFits ramp_fits cdef RampIndex ramp - cdef Fit fit + cdef RampFit ramp_fit cdef float [:] stats cdef int split @@ -262,12 +263,12 @@ cdef class Pixel: ramps.pop() # Compute fit - fit = self.fit(ramp) + ramp_fit = self.ramp_fit(ramp) if self.fixed.use_jump: - stats = self.stats(fit.slope, ramp) + stats = self.stats(ramp_fit.slope, ramp) - if max(stats) > self.threshold.run(fit.slope): + if max(stats) > self.threshold.run(ramp_fit.slope): # Compute split point to create two new ramps split = np.argmax(stats) @@ -279,12 +280,12 @@ cdef class Pixel: continue # Add fit to fits if no jump detection or stats are less than threshold - fits.slope.push_back(fit.slope) - fits.read_var.push_back(fit.read_var) - fits.poisson_var.push_back(fit.poisson_var) + ramp_fits.slope.push_back(ramp_fit.slope) + ramp_fits.read_var.push_back(ramp_fit.read_var) + ramp_fits.poisson_var.push_back(ramp_fit.poisson_var) # Reverse the slope data - return reverse_fits(fits) + return reverse_fits(ramp_fits) cdef inline Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants): From 03f9139121776614f4201eaee43b522efaae23d5 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 11:15:08 -0400 Subject: [PATCH 33/90] Use C++ list templates instead of vectors --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 8 ++--- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 35 ++++--------------- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 20 ++++------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 18 +++------- 4 files changed, 21 insertions(+), 60 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 8377193e..003e0804 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -2,6 +2,7 @@ import numpy as np cimport numpy as np from libcpp.vector cimport vector from libcpp.stack cimport stack +from libcpp.list cimport list as cpp_list from libcpp cimport bool from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed @@ -18,9 +19,9 @@ cdef struct RampFit: cdef struct RampFits: - vector[float] slope - vector[float] read_var - vector[float] poisson_var + cpp_list[float] slope + cpp_list[float] read_var + cpp_list[float] poisson_var cdef struct DerivedData: @@ -37,6 +38,5 @@ cdef class Thresh: cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) -cdef RampFits reverse_fits(RampFits ramp_fits) cdef vector[stack[RampIndex]] init_ramps(int[:, :] dq) cdef DerivedData read_data(list[list[int]] ma_table, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 61922ade..e5520e1e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -11,11 +11,11 @@ Structs: float read_var: read noise variance of a single ramp float poisson_var: poisson noise variance of single ramp RampFits - vector[float] slope: slopes of the ramps for a single pixel - vector[float] read_var: read noise variances of the ramps for a single - pixel - vector[float] poisson_var: poisson noise variances of the ramps for a - single pixel + cpp_list[float] slope: slopes of the ramps for a single pixel + cpp_list[float] read_var: read noise variances of the ramps for a single + pixel + cpp_list[float] poisson_var: poisson noise variances of the ramps for a + single pixel DerivedData vector[float] t_bar: mean time of each resultant vector[float] tau: variance time of each resultant @@ -32,8 +32,6 @@ Functions: Return the power from Casertano+22, Table 2 threshold Compute jump threshold - reverse_fits - Reverse a Fits struct init_ramps Find initial ramps for each pixel read_ma_table @@ -43,7 +41,7 @@ from libc.math cimport log10 import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, RampFits, DerivedData +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData # Casertano+2022, Table 2 @@ -73,27 +71,6 @@ cdef inline float get_power(float s): return PTABLE[1][i] -cdef inline RampFits reverse_fits(RampFits ramp_fits): - """ - Reverse a RampFits struct - The jump detection step computes the ramps in reverse time order for each pixel. - This reverses the results of the fit to match the original time order, which is - much faster than prepending to a C++ vector. - - Parameters - ---------- - ramp_fits : RampFits - fits struct to reverse - - Returns - ------- - reversed fits struct - """ - return RampFits(ramp_fits.slope[::-1], - ramp_fits.read_var[::-1], - ramp_fits.poisson_var[::-1]) - - cdef class Thresh: cdef inline float run(Thresh self, float slope): """ diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index a209b679..a14c9154 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -2,6 +2,7 @@ import numpy as np cimport numpy as np from libcpp.vector cimport vector from libcpp.stack cimport stack +from libcpp.list cimport list as cpp_list cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( @@ -69,13 +70,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # Compute all the initial sets of ramps cdef vector[stack[RampIndex]] pixel_ramps = init_ramps(dq) - # Set up the output lists - # Thes are python lists because cython does not support templating - # types baised on Python types like what numpy arrays are. - # This is an annoying limitation. - slopes = [] - read_vars = [] - poisson_vars = [] + cdef cpp_list[vector[float]] slopes, read_vars, poisson_vars # Perform all of the fits cdef RampFits ramp_fits @@ -85,13 +80,10 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, ramp_fits = make_pixel(fixed, read_noise, resultants[:, index]).fit_ramps(pixel_ramps[index]) - # Cast into numpy arrays for output - slopes.append(np.array( - ramp_fits.slope.data())) - read_vars.append(np.array( - ramp_fits.read_var.data())) - poisson_vars.append(np.array( - ramp_fits.poisson_var.data())) + # Build the output arrays + slopes.push_back(ramp_fits.slopes) + read_vars.push_back(ramp_fits.read_vars) + poisson_vars.push_back(ramp_fits.poisson_vars) return dict(slope=slopes, slopereadvar=read_vars, slopepoissonvar=poisson_vars) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 3c6d0469..ec1ee336 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -21,8 +21,7 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport ( - get_power, reverse_fits, RampFit, RampFits, RampIndex) +from stcal.ramp_fitting.ols_cas22._core cimport get_power, RampFit, RampFits, RampIndex from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel @@ -233,12 +232,6 @@ cdef class Pixel: Compute all the ramps for a single pixel using the Casertano+22 algorithm with jump detection. - Note: This algorithm computes the ramps for the pixel in reverse time order - so that the last uncomputed ramp in time is always on top of the stack. - This means we compute the slopes in reverse time order, so we have to - reverse the order of the output data to be consistent with user - expectations. - Parameters ---------- ramps : stack[RampIndex] @@ -280,12 +273,11 @@ cdef class Pixel: continue # Add fit to fits if no jump detection or stats are less than threshold - ramp_fits.slope.push_back(ramp_fit.slope) - ramp_fits.read_var.push_back(ramp_fit.read_var) - ramp_fits.poisson_var.push_back(ramp_fit.poisson_var) + ramp_fits.slope.push_front(ramp_fit.slope) + ramp_fits.read_var.push_front(ramp_fit.read_var) + ramp_fits.poisson_var.push_front(ramp_fit.poisson_var) - # Reverse the slope data - return reverse_fits(ramp_fits) + return ramp_fits cdef inline Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants): From 5721c9d143ffac55441252b8d26d6da666b3afdb Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 11:42:03 -0400 Subject: [PATCH 34/90] Use deque --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 3 ++- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 15 +++++++++------ src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx | 5 +++-- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 8 +++++++- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 003e0804..a7dbeef8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -3,6 +3,7 @@ cimport numpy as np from libcpp.vector cimport vector from libcpp.stack cimport stack from libcpp.list cimport list as cpp_list +from libcpp.deque cimport deque from libcpp cimport bool from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed @@ -38,5 +39,5 @@ cdef class Thresh: cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) -cdef vector[stack[RampIndex]] init_ramps(int[:, :] dq) +cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) cdef DerivedData read_data(list[list[int]] ma_table, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index e5520e1e..ae5dded9 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -37,6 +37,8 @@ Functions: read_ma_table Read the MA table and Derive the necessary data from it """ +from libcpp.stack cimport stack +from libcpp.deque cimport deque from libc.math cimport log10 import numpy as np cimport numpy as np @@ -111,7 +113,7 @@ cdef Thresh make_threshold(float intercept, float constant): return thresh -cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): +cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): """ Create the initial ramp stack for each pixel if dq[index_resultant, index_pixel] == 0, then the resultant is in a ramp @@ -124,15 +126,16 @@ cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): Returns ------- - Vector of stacks of RampIndex objects - - Vector with entry for each pixel - - Stack with entry for each ramp found (top of stack is last ramp found) + deque of stacks of RampIndex objects + - deque with entry for each pixel + Chosen to be deque because need element access to loop + - stack with entry for each ramp found (top of stack is last ramp found) - RampIndex with start and end indices of the ramp in the resultants """ cdef int n_pixel, n_resultants n_resultants, n_pixel = np.array(dq).shape - cdef vector[stack[RampIndex]] pixel_ramps = vector[stack[RampIndex]](n_pixel) + cdef deque[stack[RampIndex]] pixel_ramps cdef int index_resultant, index_pixel cdef stack[RampIndex] ramps @@ -174,7 +177,7 @@ cdef inline vector[stack[RampIndex]] init_ramps(int[:, :] dq): ramp.end = n_resultants - 1 ramps.push(ramp) - # Add ramp stack for pixel to vector + # Add ramp stack for pixel to list pixel_ramps.push_back(ramps) return pixel_ramps diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index a14c9154..cfeee25f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -3,6 +3,7 @@ cimport numpy as np from libcpp.vector cimport vector from libcpp.stack cimport stack from libcpp.list cimport list as cpp_list +from libcpp.deque cimport deque cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( @@ -68,9 +69,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, use_jumps) # Compute all the initial sets of ramps - cdef vector[stack[RampIndex]] pixel_ramps = init_ramps(dq) + cdef deque[stack[RampIndex]] pixel_ramps = init_ramps(dq) - cdef cpp_list[vector[float]] slopes, read_vars, poisson_vars + cdef cpp_list[cpp_list[float]] slopes, read_vars, poisson_vars # Perform all of the fits cdef RampFits ramp_fits diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index ec1ee336..0a0a21fb 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -272,7 +272,13 @@ cdef class Pixel: # Return to top of loop to fit new ramps (without adding to fits) continue - # Add fit to fits if no jump detection or stats are less than threshold + # Add ramp_fit to ramp_fits if no jump detection or stats are less + # than threshold + # Note push_front and use of cpp_list are because ramps are computed + # backward in time meaning we need to add to the front of the list + # cpp_list over vector because need to append to the front which + # is slow for vector. Additionally, we don't need random access + # and cpp_list is closer to python lists then deque. ramp_fits.slope.push_front(ramp_fit.slope) ramp_fits.read_var.push_front(ramp_fit.read_var) ramp_fits.poisson_var.push_front(ramp_fit.poisson_var) From 20b2d9ba2c41cb6647e046b6be7395bd34ab6b34 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 13:02:24 -0400 Subject: [PATCH 35/90] Collect ramp resultant indices --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 2 ++ src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx | 13 ++++++++----- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 2 ++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index a7dbeef8..3e6edf60 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -23,6 +23,8 @@ cdef struct RampFits: cpp_list[float] slope cpp_list[float] read_var cpp_list[float] poisson_var + cpp_list[int] start + cpp_list[int] end cdef struct DerivedData: diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index cfeee25f..49ffacd2 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -72,6 +72,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef deque[stack[RampIndex]] pixel_ramps = init_ramps(dq) cdef cpp_list[cpp_list[float]] slopes, read_vars, poisson_vars + cdef cpp_list[cpp_list[int]] starts, ends # Perform all of the fits cdef RampFits ramp_fits @@ -82,9 +83,11 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, resultants[:, index]).fit_ramps(pixel_ramps[index]) # Build the output arrays - slopes.push_back(ramp_fits.slopes) - read_vars.push_back(ramp_fits.read_vars) - poisson_vars.push_back(ramp_fits.poisson_vars) + slopes.push_back(ramp_fits.slope) + read_vars.push_back(ramp_fits.read_var) + poisson_vars.push_back(ramp_fits.poisson_var) + starts.push_back(ramp_fits.start) + ends.push_back(ramp_fits.end) - return dict(slope=slopes, slopereadvar=read_vars, - slopepoissonvar=poisson_vars) + return dict(slope=slopes, read_var=read_vars, + poisson_var=poisson_vars, start=starts, end=ends) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 0a0a21fb..e4c06823 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -282,6 +282,8 @@ cdef class Pixel: ramp_fits.slope.push_front(ramp_fit.slope) ramp_fits.read_var.push_front(ramp_fit.read_var) ramp_fits.poisson_var.push_front(ramp_fit.poisson_var) + ramp_fits.start.push_front(ramp.start) + ramp_fits.end.push_front(ramp.end) return ramp_fits From 9d5be8832c7d67acb50a2e7a56a6f3fda241a784 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 25 Aug 2023 14:08:18 -0400 Subject: [PATCH 36/90] Clean up pre-computation code --- .gitignore | 1 + src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 39 ++++++++++----------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 37 +++++++++---------- 3 files changed, 35 insertions(+), 42 deletions(-) diff --git a/.gitignore b/.gitignore index 5f3f3884..2afbcecc 100644 --- a/.gitignore +++ b/.gitignore @@ -141,6 +141,7 @@ dmypy.json cython_debug/ src/stcal/ramp_fitting/ols_cas22/*.c src/stcal/ramp_fitting/ols_cas22/*.cpp +src/stcal/ramp_fitting/ols_cas22/*.html # setuptools-scm generated module src/stcal/_version.py diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 002799d8..a8064357 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -85,10 +85,9 @@ cdef class Fixed: ------- t_bar[i+offset] - t_bar[i] """ - cdef int n_diff = len(self.t_bar) - offset - cdef float[:] diff = (np.roll(self.t_bar, -offset) - self.t_bar)[:n_diff] + cdef float[:] t_bar = self.t_bar - return diff + return np.subtract(t_bar[offset:], t_bar[:-offset]) cdef inline float[:] t_bar_diff_sq(Fixed self, int offset): """ @@ -119,11 +118,10 @@ cdef class Fixed: ------- (1/n_reads[i+offset] + 1/n_reads[i]) """ - cdef int n_diff = len(self.t_bar) - offset - cdef float[:] recip = ((1 / np.roll(self.n_reads, -offset)).astype(np.float32) + - (1 / np.array(self.n_reads)).astype(np.float32))[:n_diff] + cdef int[:] n_reads = self.n_reads - return recip + return (np.divide(1.0, n_reads[offset:], dtype=np.float32) + + np.divide(1.0, n_reads[:-offset], dtype=np.float32)) cdef inline float correction(Fixed self, int i, int j): """Compute the correction factor @@ -139,12 +137,15 @@ cdef class Fixed: ------- the correction factor f_corr for a single term """ - cdef float denom = self.t_bar[self.n_reads[i] - 1] - self.t_bar[0] + cdef float[:] t_bar = self.t_bar + cdef int[:] n_reads = self.n_reads + + cdef float denom = t_bar[n_reads[i] - 1] - t_bar[0] if i - j == 1: - return (1 - (self.t_bar[i + 1] - self.t_bar[i]) / denom) ** 2 + return (1.0 - (t_bar[i + 1] - t_bar[i]) / denom) ** 2 else: - return (1 - 0.75 * (self.t_bar[i + 2] - self.t_bar[i]) / denom) ** 2 + return (1.0 - 0.75 * (t_bar[i + 2] - t_bar[i]) / denom) ** 2 cdef inline float[:] slope_var_val(Fixed self, int offset): """ @@ -160,7 +161,10 @@ cdef class Fixed: (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * correction(i, i+offset) """ - cdef int n_diff = len(self.t_bar) - offset + cdef float[:] t_bar = self.t_bar + cdef float[:] tau = self.tau + + cdef int n_diff = t_bar.size - offset # Comput correction factor vector cdef int i @@ -168,16 +172,8 @@ cdef class Fixed: for i in range(n_diff): f_corr[i] = self.correction(i, i + offset) - # Compute rolls to the correct shapes - cdef t_bar_1 = np.array(self.t_bar, dtype=np.float32)[:n_diff] - cdef t_bar_2 = np.array(np.roll(self.t_bar, -offset), dtype=np.float32)[:n_diff] - cdef tau_1 = np.array(self.tau, dtype=np.float32)[:n_diff] - cdef tau_2 = np.array(np.roll(self.tau, -offset), dtype=np.float32)[:n_diff] - - cdef float[:] slope_var_val = (tau_1 + tau_2 - np.minimum(t_bar_1, t_bar_2) - ) * f_corr - - return slope_var_val + return (np.add(tau[offset:], tau[:-offset]) - + np.minimum(t_bar[offset:], t_bar[:-offset])) * f_corr cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): @@ -205,6 +201,7 @@ cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): fixed.use_jump = use_jump fixed.threshold = threshold + # Cast vector to a c array fixed.t_bar = data.t_bar.data() fixed.tau = data.tau.data() fixed.n_reads = data.n_reads.data() diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index e4c06823..7a1b0968 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -85,10 +85,9 @@ cdef class Pixel: ------- (resultants[i+offset] - resultants[i]) """ - cdef int n_diff = len(self.resultants) - offset - cdef float[:] diff = (np.roll(self.resultants, -offset) - self.t_bar)[:n_diff] + cdef float[:] resultants = self.resultants - return diff + return np.subtract(resultants[offset:], resultants[:-offset]) @cython.boundscheck(False) @cython.wraparound(False) @@ -119,6 +118,7 @@ cdef class Pixel: cdef float[:] t_bar = self.fixed.t_bar[ramp.start:ramp.end + 1] cdef float[:] tau = self.fixed.tau[ramp.start:ramp.end + 1] cdef int[:] n_reads = self.fixed.n_reads[ramp.start:ramp.end + 1] + cdef float read_noise = self.fixed.read_noise # initalize fit cdef int i = 0, j = 0 @@ -132,7 +132,7 @@ cdef class Pixel: # a CR in the first resultant has boosted the whole ramp high but there # is no actual signal. cdef float s = max(resultants[-1] - resultants[0], 0) - s = s / sqrt(self.fixed.read_noise**2 + s) + s = s / sqrt(read_noise**2 + s) cdef float power = get_power(s) # It's easy to use up a lot of dynamic range on something like @@ -167,8 +167,7 @@ cdef class Pixel: ramp_fit.slope += coeffs[i] * resultants[i] # Casertano+22 Eq. 39 - ramp_fit.read_var += (coeffs[i] ** 2 * self.fixed.read_noise ** 2 / - n_reads[i]) + ramp_fit.read_var += (coeffs[i] ** 2 * read_noise ** 2 / n_reads[i]) # Casertano+22 Eq 40 ramp_fit.poisson_var += coeffs[i] ** 2 * tau[i] @@ -205,24 +204,20 @@ cdef class Pixel: cdef int start = ramp.start cdef int end = ramp.end - 1 - cdef np.ndarray[float] delta_1 = np.array(self.delta_1[start:end]) - slope - cdef np.ndarray[float] delta_2 = np.array(self.delta_2[start:end]) - slope + cdef float[:] delta_1 = np.subtract(self.delta_1[start:end], slope) + cdef float[:] delta_2 = np.subtract(self.delta_2[start:end], slope) - cdef np.ndarray[float] var_1 = ((np.array(self.sigma_1[start:end]) + slope * - np.array(self.slope_var_1[start:end])) / - self.fixed.t_bar_1_sq[start:end] - ).astype(np.float32) - cdef np.ndarray[float] var_2 = ((np.array(self.sigma_2[start:end]) + slope * - np.array(self.slope_var_2[start:end])) / - self.fixed.t_bar_2_sq[start:end] - ).astype(np.float32) + cdef float[:] var_1 = np.divide(np.add(self.sigma_1[start:end], + np.multiply(slope, self.slope_var_1[start:end])), + self.fixed.t_bar_1_sq[start:end]) + cdef float[:] var_2 = np.divide(np.add(self.sigma_2[start:end], + np.multiply(slope, self.slope_var_2[start:end])), + self.fixed.t_bar_2_sq[start:end]) - cdef np.ndarray[float] stats_1 = (delta_1 / np.sqrt(var_1, dtype=np.float32) - ).astype(np.float32) - cdef np.ndarray[float] stats_2 = (delta_2 / np.sqrt(var_2, dtype=np.float32) - ).astype(np.float32) + cdef float[:] stats_1 = np.divide(delta_1, np.sqrt(var_1)) + cdef float[:] stats_2 = np.divide(delta_2, np.sqrt(var_2)) - return np.maximum(stats_1, stats_2) + return np.maximum(stats_1, stats_2, dtype=np.float32) @cython.boundscheck(False) @cython.wraparound(False) From d7877bd5a58ac522139ad47baaea835792119fb2 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 30 Aug 2023 13:59:37 -0400 Subject: [PATCH 37/90] Add some testing --- setup.py | 6 ++ src/stcal/ramp_fitting/ols_cas22/_core.pxd | 6 +- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 30 ++++---- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 10 +-- .../ramp_fitting/ols_cas22/_wrappers.pyx | 58 ++++++++++++++ tests/test_jump_cas22.py | 77 +++++++++++++++++++ 6 files changed, 162 insertions(+), 25 deletions(-) create mode 100644 src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx create mode 100644 tests/test_jump_cas22.py diff --git a/setup.py b/setup.py index b34e7dfa..03eedc6f 100644 --- a/setup.py +++ b/setup.py @@ -31,6 +31,12 @@ include_dirs=[np.get_include()], language='c++' ), + Extension( + 'stcal.ramp_fitting.ols_cas22._wrappers', + ['src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx'], + include_dirs=[np.get_include()], + language='c++' + ), ] setup(ext_modules=cythonize(extensions)) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 3e6edf60..2230ed2f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,12 +1,8 @@ -import numpy as np -cimport numpy as np from libcpp.vector cimport vector from libcpp.stack cimport stack from libcpp.list cimport list as cpp_list from libcpp.deque cimport deque -from libcpp cimport bool -from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef struct RampIndex: int start @@ -42,4 +38,4 @@ cdef class Thresh: cdef Thresh make_threshold(float intercept, float constant) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) -cdef DerivedData read_data(list[list[int]] ma_table, float read_time) +cdef DerivedData read_data(list[list[int]] read_pattern, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index ae5dded9..00dfa5bc 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -183,17 +183,17 @@ cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): return pixel_ramps -cdef inline DerivedData read_data(list[list[int]] ma_table, float read_time): +cdef DerivedData read_data(list[list[int]] read_pattern, float read_time): """ - Derive the input data from the MA table + Derive the input data from the the read pattern - Note the MA table is a list of pairs of ints for each resultant: - (first read index, number of reads in resultant) + read pattern is a list of resultant lists, where each resultant list is + a list of the reads in that resultant. Parameters ---------- - ma_table : list[list[int]] - MA table + read pattern: list[list[int]] + read pattern for the image read_time : float Time to perform a readout. @@ -204,18 +204,18 @@ cdef inline DerivedData read_data(list[list[int]] ma_table, float read_time): vector[float] tau: variance time of each resultant vector[int] n_reads: number of reads in each resultant """ - cdef int n_resultants = len(ma_table) + cdef int n_resultants = len(read_pattern) cdef DerivedData data = DerivedData(vector[float](n_resultants), vector[float](n_resultants), vector[int](n_resultants)) - cdef int index - cdef list[int] entry - for index, entry in enumerate(ma_table): - data.n_reads[index] = entry[1] - data.t_bar[index] = read_time *(entry[0] + (entry[1] - 1) / 2.0) - data.tau[index] = data.t_bar[index] - (entry[1] - 1) * ((entry[1] + 1) * - read_time / - (6 * entry[1])) + cdef int index, n_reads + cdef list[int] resultant + for index, resultant in enumerate(read_pattern): + n_reads = len(resultant) + + data.n_reads[index] = n_reads + data.t_bar[index] = read_time * np.mean(resultant) + data.tau[index] = np.sum((2 * (n_reads - np.arange(n_reads)) - 1) * resultant) * read_time / n_reads**2 return data diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 49ffacd2..f40a077f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -17,7 +17,7 @@ from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel def fit_ramps(np.ndarray[float, ndim=2] resultants, np.ndarray[int, ndim=2] dq, np.ndarray[float, ndim=1] read_noise, read_time, - ma_table, + list[list[int]] read_pattern, int use_jumps=False): """Fit ramps using the Casertano+22 algorithm. @@ -37,8 +37,8 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, the read noise in electrons read_time : float Time to perform a readout. For Roman data, this is FRAME_TIME. - ma_table : list[list[int]] - the ma table prescription + read_pattern : list[list[int]] + the read pattern for the image Returns ------- @@ -58,13 +58,13 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, resend : np.ndarray[nramp] The last resultant in this ramp. """ - cdef int n_resultants = len(ma_table) + cdef int n_resultants = len(read_pattern) if n_resultants != resultants.shape[0]: raise RuntimeError(f'MA table length {n_resultants} does not ' f'match number of resultants {resultants.shape[0]}') # Pre-compute data for all pixels - cdef Fixed fixed = make_fixed(read_data(ma_table, read_time), + cdef Fixed fixed = make_fixed(read_data(read_pattern, read_time), make_threshold(5.5, 1/3.0), use_jumps) diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx new file mode 100644 index 00000000..174febe3 --- /dev/null +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -0,0 +1,58 @@ +import numpy as np +cimport numpy as np + +from libcpp cimport bool +from libcpp.stack cimport stack +from libcpp.deque cimport deque + +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, DerivedData, Thresh +from stcal.ramp_fitting.ols_cas22._core cimport read_data as c_read_data +from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps +from stcal.ramp_fitting.ols_cas22._core cimport make_threshold as c_make_threshold + +from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed as c_make_fixed + +def read_data(list[list[int]] read_pattern, float read_time): + return c_read_data(read_pattern, read_time) + + +def init_ramps(np.ndarray[int, ndim=2] dq): + cdef deque[stack[RampIndex]] raw = c_init_ramps(dq) + + # Have to turn deque and stack into python compatible objects + cdef RampIndex index + cdef stack[RampIndex] ramp + cdef list out = [] + cdef list stack_out + for ramp in raw: + stack_out = [] + while not ramp.empty(): + index = ramp.top() + ramp.pop() + # So top of stack is first item of list + stack_out = [index] + stack_out + + out.append(stack_out) + + return out + + +def make_threshold(float intercept, float constant): + return c_make_threshold(intercept, constant) + + +def run_threshold(Thresh threshold, float slope): + return threshold.run(slope) + + +def make_fixed(np.ndarray[float, ndim=1] t_bar, + np.ndarray[float, ndim=1] tau, + np.ndarray[int, ndim=1] n_reads, + float intercept, + float constant, + bool use_jump): + + cdef DerivedData data = DerivedData(t_bar, tau, n_reads) + cdef Thresh threshold = c_make_threshold(intercept, constant) + + return c_make_fixed(data, threshold, use_jump) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py new file mode 100644 index 00000000..d324f803 --- /dev/null +++ b/tests/test_jump_cas22.py @@ -0,0 +1,77 @@ +import numpy as np +from numpy.testing import assert_allclose + +from stcal.ramp_fitting.ols_cas22._wrappers import read_data +from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps +from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold + +def test_read_data(): + """Test turning read_pattern into the time data""" + pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] + data = read_data(pattern, 3.0) + + # Basic sanity checks (structs become dicts) + assert isinstance(data, dict) + assert 't_bar' in data + assert 'tau' in data + assert 'n_reads' in data + assert len(data) == 3 + + # Check that the data is correct + assert_allclose(data['t_bar'], [4.5, 15, 21, 28.5]) + assert_allclose(data['tau'], [3.75, 13.666667, 21, 26.625]) + assert data['n_reads'] == [2, 3, 1, 4] + + +def test_init_ramps(): + """Test turning dq flags into initial ramp splits""" + dq = np.array([[0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1], + [0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1], + [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1]], dtype=np.int32) + + ramps = init_ramps(dq) + assert len(ramps) == dq.shape[1] == 16 + + # Check that the ramps are correct + + # No DQ + assert ramps[0] == [{'start': 0, 'end': 3}] + + # 1 DQ + assert ramps[1] == [{'start': 1, 'end': 3}] + assert ramps[2] == [{'start': 0, 'end': 0}, {'start': 2, 'end': 3}] + assert ramps[3] == [{'start': 0, 'end': 1}, {'start': 3, 'end': 3}] + assert ramps[4] == [{'start': 0, 'end': 2}] + + # 2 DQ + assert ramps[5] == [{'start': 2, 'end': 3}] + assert ramps[6] == [{'start': 1, 'end': 1}, {'start': 3, 'end': 3}] + assert ramps[7] == [{'start': 1, 'end': 2}] + assert ramps[8] == [{'start': 0, 'end': 0}, {'start': 3, 'end': 3}] + assert ramps[9] == [{'start': 0, 'end': 0}, {'start': 2, 'end': 2}] + assert ramps[10] == [{'start': 0, 'end': 1}] + + # 3 DQ + assert ramps[11] == [{'start': 3, 'end': 3}] + assert ramps[12] == [{'start': 2, 'end': 2}] + assert ramps[13] == [{'start': 1, 'end': 1}] + assert ramps[14] == [{'start': 0, 'end': 0}] + + # 4 DQ + assert ramps[15] == [] + + +def test_threshold(): + intercept = 5.5 + constant = 1/3 + + thresh = make_threshold(intercept, constant) + + # Parameters are not directly accessible + assert intercept == run_threshold(thresh, 1.0) # check intercept + assert_allclose(intercept - constant, run_threshold(thresh, 10.0)) # check constant + + +def test_make_fixed(): + pass From e0981a4db8b803193277b4a5f43eba91910383b2 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 31 Aug 2023 10:39:08 -0400 Subject: [PATCH 38/90] Some partial fixes for the correction factor --- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 2 - src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 37 ++----------------- .../ramp_fitting/ols_cas22/_wrappers.pyx | 35 +++++++++++++++++- tests/test_jump_cas22.py | 24 +++++++++--- 4 files changed, 56 insertions(+), 42 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index eb194ead..abe91fa1 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -19,7 +19,5 @@ cdef class Fixed: cdef float[:] recip_val(Fixed self, int offset) cdef float[:] slope_var_val(Fixed self, int offset) - cdef float correction(Fixed self, int i, int j) - cdef Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index a8064357..cd1622a1 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -56,10 +56,10 @@ cdef class Fixed: ((1/n_reads[i+2]) + (1/n_reads[i])) slope_var_1 : vector[float] single of slope variance term: - ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1) + ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) slope_var_2 : vector[float] double of slope variance term: - ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2) + ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) Notes ----- @@ -123,29 +123,6 @@ cdef class Fixed: return (np.divide(1.0, n_reads[offset:], dtype=np.float32) + np.divide(1.0, n_reads[:-offset], dtype=np.float32)) - cdef inline float correction(Fixed self, int i, int j): - """Compute the correction factor - - Parameters - ---------- - i : int - The index of the first read in the segment - j : int - The index of the last read in the segment - - Returns - ------- - the correction factor f_corr for a single term - """ - cdef float[:] t_bar = self.t_bar - cdef int[:] n_reads = self.n_reads - - cdef float denom = t_bar[n_reads[i] - 1] - t_bar[0] - - if i - j == 1: - return (1.0 - (t_bar[i + 1] - t_bar[i]) / denom) ** 2 - else: - return (1.0 - 0.75 * (t_bar[i + 2] - t_bar[i]) / denom) ** 2 cdef inline float[:] slope_var_val(Fixed self, int offset): """ @@ -164,16 +141,8 @@ cdef class Fixed: cdef float[:] t_bar = self.t_bar cdef float[:] tau = self.tau - cdef int n_diff = t_bar.size - offset - - # Comput correction factor vector - cdef int i - cdef np.ndarray[float] f_corr = np.zeros(n_diff, dtype=np.float32) - for i in range(n_diff): - f_corr[i] = self.correction(i, i + offset) - return (np.add(tau[offset:], tau[:-offset]) - - np.minimum(t_bar[offset:], t_bar[:-offset])) * f_corr + np.minimum(t_bar[offset:], t_bar[:-offset])) cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 174febe3..30ad7263 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -10,6 +10,7 @@ from stcal.ramp_fitting.ols_cas22._core cimport read_data as c_read_data from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps from stcal.ramp_fitting.ols_cas22._core cimport make_threshold as c_make_threshold +from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed as c_make_fixed def read_data(list[list[int]] read_pattern, float read_time): @@ -55,4 +56,36 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, cdef DerivedData data = DerivedData(t_bar, tau, n_reads) cdef Thresh threshold = c_make_threshold(intercept, constant) - return c_make_fixed(data, threshold, use_jump) + cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) + + cdef np.ndarray[float, ndim=1] t_bar_ = np.array(fixed.t_bar, dtype=np.float32) + cdef np.ndarray[float, ndim=1] tau_ = np.array(fixed.tau, dtype=np.float32) + cdef np.ndarray[int, ndim=1] n_reads_ = np.array(fixed.n_reads, dtype=np.int32) + + cdef float intercept_ = fixed.threshold.intercept + cdef float constant_ = fixed.threshold.constant + + cdef np.ndarray[float, ndim=1] t_bar_1 = np.array(fixed.t_bar_1, dtype=np.float32) + cdef np.ndarray[float, ndim=1] t_bar_2 = np.array(fixed.t_bar_2, dtype=np.float32) + cdef np.ndarray[float, ndim=1] t_bar_1_sq = np.array(fixed.t_bar_1_sq, dtype=np.float32) + cdef np.ndarray[float, ndim=1] t_bar_2_sq = np.array(fixed.t_bar_2_sq, dtype=np.float32) + + cdef np.ndarray[float, ndim=1] recip_1 = np.array(fixed.recip_1, dtype=np.float32) + cdef np.ndarray[float, ndim=1] recip_2 = np.array(fixed.recip_2, dtype=np.float32) + + cdef np.ndarray[float, ndim=1] slope_var_1 = np.array(fixed.slope_var_1, dtype=np.float32) + cdef np.ndarray[float, ndim=1] slope_var_2 = np.array(fixed.slope_var_2, dtype=np.float32) + + return dict(t_bar=t_bar_, + tau=tau_, + n_reads=n_reads_, + intercept=intercept_, + constant=constant_, + t_bar_1=t_bar_1, + t_bar_2=t_bar_2, + t_bar_1_sq=t_bar_1_sq, + t_bar_2_sq=t_bar_2_sq, + recip_1=recip_1, + recip_2=recip_2, + slope_var_1=slope_var_1, + slope_var_2=slope_var_2) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index d324f803..60a5b125 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -1,9 +1,10 @@ +from calendar import c import numpy as np from numpy.testing import assert_allclose from stcal.ramp_fitting.ols_cas22._wrappers import read_data from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold +from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold, make_fixed def test_read_data(): """Test turning read_pattern into the time data""" @@ -63,15 +64,28 @@ def test_init_ramps(): def test_threshold(): - intercept = 5.5 - constant = 1/3 + intercept = np.float32(5.5) + constant = np.float32(1/3) thresh = make_threshold(intercept, constant) # Parameters are not directly accessible assert intercept == run_threshold(thresh, 1.0) # check intercept - assert_allclose(intercept - constant, run_threshold(thresh, 10.0)) # check constant + assert np.float32(intercept - constant) == run_threshold(thresh, 10.0) # check constant def test_make_fixed(): - pass + pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] + data = read_data(pattern, 3.0) + + t_bar = np.array(data['t_bar'], dtype=np.float32) + tau = np.array(data['tau'], dtype=np.float32) + n_reads = np.array(data['n_reads'], dtype=np.int32) + intercept = np.float32(5.5) + constant = np.float32(1/3) + + fixed = make_fixed(t_bar, tau, n_reads, intercept, constant, True) + + assert (fixed['t_bar'] == t_bar).all() + assert fixed["intercept"] == intercept + assert fixed["constant"] == constant From 98033dc3d28f72c94d0f864c29e44df791c88c07 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 31 Aug 2023 10:56:01 -0400 Subject: [PATCH 39/90] Better handle vector vs memory view for fixed data Casting vectors to memory views without a copy is dangerous! This is because if the memory view outlives the vector, then it contains a dangling pointer. One can be "safe" if one only does casting to local variables for computations. --- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 3 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 30 ++++++++++++++----- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 17 +++++++++-- .../ramp_fitting/ols_cas22/_wrappers.pyx | 8 +---- tests/test_jump_cas22.py | 2 +- 5 files changed, 40 insertions(+), 20 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index abe91fa1..311463fc 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -5,8 +5,7 @@ from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData cdef class Fixed: cdef bool use_jump - cdef float[:] t_bar, tau - cdef int[:] n_reads + cdef DerivedData data cdef Thresh threshold cdef float[:] t_bar_1, t_bar_2 diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index cd1622a1..1807598a 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -85,7 +85,13 @@ cdef class Fixed: ------- t_bar[i+offset] - t_bar[i] """ - cdef float[:] t_bar = self.t_bar + # Cast vector to memory view + # This way of doing it is potentially memory unsafe because the memory + # can outlive the vector. However, this is much faster (no copies) and + # much simpler than creating an intermediate wrapper which can pretend + # to be a memory view. In this case, I make sure that the memory view + # stays local to the function (numpy operations create brand new objects) + cdef float[:] t_bar = self.data.t_bar.data() return np.subtract(t_bar[offset:], t_bar[:-offset]) @@ -118,7 +124,13 @@ cdef class Fixed: ------- (1/n_reads[i+offset] + 1/n_reads[i]) """ - cdef int[:] n_reads = self.n_reads + # Cast vector to memory view + # This way of doing it is potentially memory unsafe because the memory + # can outlive the vector. However, this is much faster (no copies) and + # much simpler than creating an intermediate wrapper which can pretend + # to be a memory view. In this case, I make sure that the memory view + # stays local to the function (numpy operations create brand new objects) + cdef int[:] n_reads = self.data.n_reads.data() return (np.divide(1.0, n_reads[offset:], dtype=np.float32) + np.divide(1.0, n_reads[:-offset], dtype=np.float32)) @@ -138,8 +150,14 @@ cdef class Fixed: (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * correction(i, i+offset) """ - cdef float[:] t_bar = self.t_bar - cdef float[:] tau = self.tau + # Cast vectors to memory views + # This way of doing it is potentially memory unsafe because the memory + # can outlive the vector. However, this is much faster (no copies) and + # much simpler than creating an intermediate wrapper which can pretend + # to be a memory view. In this case, I make sure that the memory view + # stays local to the function (numpy operations create brand new objects) + cdef float[:] t_bar = self.data.t_bar.data() + cdef float[:] tau = self.data.tau.data() return (np.add(tau[offset:], tau[:-offset]) - np.minimum(t_bar[offset:], t_bar[:-offset])) @@ -171,9 +189,7 @@ cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): fixed.threshold = threshold # Cast vector to a c array - fixed.t_bar = data.t_bar.data() - fixed.tau = data.tau.data() - fixed.n_reads = data.n_reads.data() + fixed.data = data # Pre-compute jump detection computations shared by all pixels if use_jump: diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 7a1b0968..18a6613c 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -113,11 +113,22 @@ cdef class Pixel: return ramp_fit # Else, do the fitting. + # Cast vectors to memory views for faster access + # This way of doing it is potentially memory unsafe because the memory + # can outlive the vector. However, this is much faster (no copies) and + # much simpler than creating an intermediate wrapper which can pretend + # to be a memory view. In this case, I make sure that the memory view + # stays local to the function t_bar, tau, n_reads are used only for + # computations whose results are stored in new objects, so they are local + cdef float[:] t_bar_ = self.fixed.data.t_bar.data() + cdef float[:] tau_ = self.fixed.data.tau.data() + cdef int[:] n_reads_ = self.fixed.data.n_reads.data() + # Setup data for fitting (work over subset of data) cdef float[:] resultants = self.fixed.resultants[ramp.start:ramp.end + 1] - cdef float[:] t_bar = self.fixed.t_bar[ramp.start:ramp.end + 1] - cdef float[:] tau = self.fixed.tau[ramp.start:ramp.end + 1] - cdef int[:] n_reads = self.fixed.n_reads[ramp.start:ramp.end + 1] + cdef float[:] t_bar = t_bar_[ramp.start:ramp.end + 1] + cdef float[:] tau = tau_[ramp.start:ramp.end + 1] + cdef int[:] n_reads = n_reads_[ramp.start:ramp.end + 1] cdef float read_noise = self.fixed.read_noise # initalize fit diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 30ad7263..47124414 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -58,10 +58,6 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) - cdef np.ndarray[float, ndim=1] t_bar_ = np.array(fixed.t_bar, dtype=np.float32) - cdef np.ndarray[float, ndim=1] tau_ = np.array(fixed.tau, dtype=np.float32) - cdef np.ndarray[int, ndim=1] n_reads_ = np.array(fixed.n_reads, dtype=np.int32) - cdef float intercept_ = fixed.threshold.intercept cdef float constant_ = fixed.threshold.constant @@ -76,9 +72,7 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, cdef np.ndarray[float, ndim=1] slope_var_1 = np.array(fixed.slope_var_1, dtype=np.float32) cdef np.ndarray[float, ndim=1] slope_var_2 = np.array(fixed.slope_var_2, dtype=np.float32) - return dict(t_bar=t_bar_, - tau=tau_, - n_reads=n_reads_, + return dict(data=data, intercept=intercept_, constant=constant_, t_bar_1=t_bar_1, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 60a5b125..4b601b45 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -86,6 +86,6 @@ def test_make_fixed(): fixed = make_fixed(t_bar, tau, n_reads, intercept, constant, True) - assert (fixed['t_bar'] == t_bar).all() + assert (fixed['data']['t_bar'] == t_bar).all() assert fixed["intercept"] == intercept assert fixed["constant"] == constant From a5d13ababcb27bc209c36c972e9de165dd58dc6c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 31 Aug 2023 11:25:30 -0400 Subject: [PATCH 40/90] Finish testing fixed object --- .../ramp_fitting/ols_cas22/_wrappers.pyx | 81 ++++++++++++++++--- tests/test_jump_cas22.py | 36 ++++++++- 2 files changed, 105 insertions(+), 12 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 47124414..b1ac5493 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -61,16 +61,79 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, cdef float intercept_ = fixed.threshold.intercept cdef float constant_ = fixed.threshold.constant - cdef np.ndarray[float, ndim=1] t_bar_1 = np.array(fixed.t_bar_1, dtype=np.float32) - cdef np.ndarray[float, ndim=1] t_bar_2 = np.array(fixed.t_bar_2, dtype=np.float32) - cdef np.ndarray[float, ndim=1] t_bar_1_sq = np.array(fixed.t_bar_1_sq, dtype=np.float32) - cdef np.ndarray[float, ndim=1] t_bar_2_sq = np.array(fixed.t_bar_2_sq, dtype=np.float32) + cdef np.ndarray[float, ndim=1] t_bar_1, t_bar_2 + cdef np.ndarray[float, ndim=1] t_bar_1_sq, t_bar_2_sq + cdef np.ndarray[float, ndim=1] recip_1, recip_2 + cdef np.ndarray[float, ndim=1] slope_var_1, slope_var_2 + + if use_jump: + t_bar_1 = np.array(fixed.t_bar_1, dtype=np.float32) + t_bar_2 = np.array(fixed.t_bar_2, dtype=np.float32) + t_bar_1_sq = np.array(fixed.t_bar_1_sq, dtype=np.float32) + t_bar_2_sq = np.array(fixed.t_bar_2_sq, dtype=np.float32) + + recip_1 = np.array(fixed.recip_1, dtype=np.float32) + recip_2 = np.array(fixed.recip_2, dtype=np.float32) + + slope_var_1 = np.array(fixed.slope_var_1, dtype=np.float32) + slope_var_2 = np.array(fixed.slope_var_2, dtype=np.float32) + else: + try: + fixed.t_bar_1 + except AttributeError: + t_bar_1 = np.zeros(1, np.float32) + else: + raise AttributeError("t_bar_1 should not exist") + + try: + fixed.t_bar_2 + except AttributeError: + t_bar_2 = np.zeros(1, np.float32) + else: + raise AttributeError("t_bar_2 should not exist") + + try: + fixed.t_bar_1_sq + except AttributeError: + t_bar_1_sq = np.zeros(1, np.float32) + else: + raise AttributeError("t_bar_1_sq should not exist") + + try: + fixed.t_bar_2_sq + except AttributeError: + t_bar_2_sq = np.zeros(1, np.float32) + else: + raise AttributeError("t_bar_2_sq should not exist") + + try: + fixed.recip_1 + except AttributeError: + recip_1 = np.zeros(1, np.float32) + else: + raise AttributeError("recip_1 should not exist") + + try: + fixed.recip_2 + except AttributeError: + recip_2 = np.zeros(1, np.float32) + else: + raise AttributeError("recip_2 should not exist") + + try: + fixed.slope_var_1 + except AttributeError: + slope_var_1 = np.zeros(1, np.float32) + else: + raise AttributeError("slope_var_1 should not exist") + + try: + fixed.slope_var_2 + except AttributeError: + slope_var_2 = np.zeros(1, np.float32) + else: + raise AttributeError("slope_var_2 should not exist") - cdef np.ndarray[float, ndim=1] recip_1 = np.array(fixed.recip_1, dtype=np.float32) - cdef np.ndarray[float, ndim=1] recip_2 = np.array(fixed.recip_2, dtype=np.float32) - - cdef np.ndarray[float, ndim=1] slope_var_1 = np.array(fixed.slope_var_1, dtype=np.float32) - cdef np.ndarray[float, ndim=1] slope_var_2 = np.array(fixed.slope_var_2, dtype=np.float32) return dict(data=data, intercept=intercept_, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 4b601b45..35d69727 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -1,5 +1,5 @@ -from calendar import c import numpy as np +import pytest from numpy.testing import assert_allclose from stcal.ramp_fitting.ols_cas22._wrappers import read_data @@ -74,7 +74,8 @@ def test_threshold(): assert np.float32(intercept - constant) == run_threshold(thresh, 10.0) # check constant -def test_make_fixed(): +@pytest.mark.parametrize("use_jump", [True, False]) +def test_make_fixed(use_jump): pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] data = read_data(pattern, 3.0) @@ -84,8 +85,37 @@ def test_make_fixed(): intercept = np.float32(5.5) constant = np.float32(1/3) - fixed = make_fixed(t_bar, tau, n_reads, intercept, constant, True) + fixed = make_fixed(t_bar, tau, n_reads, intercept, constant, use_jump) + # Basic sanity checks that data passed in survives assert (fixed['data']['t_bar'] == t_bar).all() + assert (fixed['data']['tau'] == tau).all() + assert (fixed['data']['n_reads'] == n_reads).all() assert fixed["intercept"] == intercept assert fixed["constant"] == constant + + # Check the computed data + if use_jump: + single_gen = zip(fixed['t_bar_1'], fixed['t_bar_1_sq'], fixed['recip_1'], fixed['slope_var_1']) + double_gen = zip(fixed['t_bar_2'], fixed['t_bar_2_sq'], fixed['recip_2'], fixed['slope_var_2']) + + for index, (t_bar_1, t_bar_1_sq, recip_1, slope_var_1) in enumerate(single_gen): + assert t_bar_1 == t_bar[index + 1] - t_bar[index] + assert t_bar_1_sq == (t_bar[index + 1] - t_bar[index])**2 + assert recip_1 == np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) + assert slope_var_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) + + for index, (t_bar_2, t_bar_2_sq, recip_2, slope_var_2) in enumerate(double_gen): + assert t_bar_2 == t_bar[index + 2] - t_bar[index] + assert t_bar_2_sq == (t_bar[index + 2] - t_bar[index])**2 + assert recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) + assert slope_var_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) + else: + assert fixed['t_bar_1'] == np.zeros(1, np.float32) + assert fixed['t_bar_2'] == np.zeros(1, np.float32) + assert fixed['t_bar_1_sq'] == np.zeros(1, np.float32) + assert fixed['t_bar_2_sq'] == np.zeros(1, np.float32) + assert fixed['recip_1'] == np.zeros(1, np.float32) + assert fixed['recip_2'] == np.zeros(1, np.float32) + assert fixed['slope_var_1'] == np.zeros(1, np.float32) + assert fixed['slope_var_2'] == np.zeros(1, np.float32) From 4aa62249f3f84fa233ef5014e16a489b19b8cec7 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 31 Aug 2023 12:51:01 -0400 Subject: [PATCH 41/90] Add tests for building pixels --- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 1 + src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 30 ++++++-- .../ramp_fitting/ols_cas22/_wrappers.pyx | 70 ++++++++++++++++++- tests/test_jump_cas22.py | 42 ++++++++++- 4 files changed, 135 insertions(+), 8 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index 1189f8c1..f0df4743 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -14,6 +14,7 @@ cdef class Pixel: cdef float[:] resultants_diff(Pixel self, int offset) cdef RampFit fit_ramp(Pixel self, RampIndex ramp) + cdef float correction(Pixel self, int i, int offset, RampIndex ramp) cdef float[:] stats(Pixel self, float slope, RampIndex ramp) cdef RampFits fit_ramps(Pixel self, stack[RampIndex] ramps) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 18a6613c..b3763c9d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -187,6 +187,17 @@ cdef class Pixel: return ramp_fit + cdef inline float correction(Pixel self, int i, int offset, RampIndex ramp): + cdef float comp = ((self.fixed.data.t_bar[i + offset] - self.fixed.data.t_bar[i]) / + (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start])) + + if offset == 1: + return (1 - comp)**2 + elif offset == 2: + return (1 - 0.75 * comp)**2 + else: + raise ValueError("offset must be 1 or 2") + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -194,9 +205,11 @@ cdef class Pixel: """ Compute fit statistics for jump detection on a single ramp Computed using: + corr_1[i] = correction(i, 1, ramp) + corr_2[i] = correction(i, 2, ramp) - var_1[i] = ((sigma_1[i] + slope * slope_var_1[i]) / t_bar_1_sq[i]) - var_2[i] = ((sigma_2[i] + slope * slope_var_2[i]) / t_bar_2_sq[i]) + var_1[i] = ((sigma_1[i] + slope * slope_var_1[i] * corr_1[i]) / t_bar_1_sq[i]) + var_2[i] = ((sigma_2[i] + slope * slope_var_2[i] * corr_2[i]) / t_bar_2_sq[i]) s_1[i] = (delta_1[i] - slope) / sqrt(var_1[i]) s_2[i] = (delta_2[i] - slope) / sqrt(var_2[i]) @@ -215,14 +228,19 @@ cdef class Pixel: cdef int start = ramp.start cdef int end = ramp.end - 1 + cdef float[:] slope_var_1 = np.zeros(end - start + 1, dtype=np.float32) + cdef float[:] slope_var_2 = np.zeros(end - start + 1, dtype=np.float32) + cdef int i + for i in range(end - start + 1): + slope_var_1[i] = slope * self.fixed.slope_var_1[start + i] * self.correction(start + i, 1, ramp) + slope_var_2[i] = slope * self.fixed.slope_var_2[start + i] * self.correction(start + i, 2, ramp) + cdef float[:] delta_1 = np.subtract(self.delta_1[start:end], slope) cdef float[:] delta_2 = np.subtract(self.delta_2[start:end], slope) - cdef float[:] var_1 = np.divide(np.add(self.sigma_1[start:end], - np.multiply(slope, self.slope_var_1[start:end])), + cdef float[:] var_1 = np.divide(np.add(self.sigma_1[start:end], slope_var_1), self.fixed.t_bar_1_sq[start:end]) - cdef float[:] var_2 = np.divide(np.add(self.sigma_2[start:end], - np.multiply(slope, self.slope_var_2[start:end])), + cdef float[:] var_2 = np.divide(np.add(self.sigma_2[start:end], slope_var_2), self.fixed.t_bar_2_sq[start:end]) cdef float[:] stats_1 = np.divide(delta_1, np.sqrt(var_1)) diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index b1ac5493..4c9a9c96 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -13,6 +13,10 @@ from stcal.ramp_fitting.ols_cas22._core cimport make_threshold as c_make_thresho from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed as c_make_fixed +from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel +from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel as c_make_pixel + + def read_data(list[list[int]] read_pattern, float read_time): return c_read_data(read_pattern, read_time) @@ -135,7 +139,7 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, raise AttributeError("slope_var_2 should not exist") - return dict(data=data, + return dict(data=fixed.data, intercept=intercept_, constant=constant_, t_bar_1=t_bar_1, @@ -146,3 +150,67 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, recip_2=recip_2, slope_var_1=slope_var_1, slope_var_2=slope_var_2) + + +def make_pixel(np.ndarray[float, ndim=1] resultants, + np.ndarray[float, ndim=1] t_bar, + np.ndarray[float, ndim=1] tau, + np.ndarray[int, ndim=1] n_reads, + float read_noise, + float intercept, + float constant, + bool use_jump): + + cdef DerivedData data = DerivedData(t_bar, tau, n_reads) + cdef Thresh threshold = c_make_threshold(intercept, constant) + + cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) + + cdef Pixel pixel = c_make_pixel(fixed, read_noise, resultants) + + cdef np.ndarray[float, ndim=1] resultants_ = np.array(pixel.resultants, dtype=np.float32) + + cdef np.ndarray[float, ndim=1] delta_1, delta_2 + cdef np.ndarray[float, ndim=1] sigma_1, sigma_2 + + if use_jump: + delta_1 = np.array(pixel.delta_1, dtype=np.float32) + delta_2 = np.array(pixel.delta_2, dtype=np.float32) + sigma_1 = np.array(pixel.sigma_1, dtype=np.float32) + sigma_2 = np.array(pixel.sigma_2, dtype=np.float32) + else: + try: + pixel.delta_1 + except AttributeError: + delta_1 = np.zeros(1, np.float32) + else: + raise AttributeError("delta_1 should not exist") + + try: + pixel.delta_2 + except AttributeError: + delta_2 = np.zeros(1, np.float32) + else: + raise AttributeError("delta_2 should not exist") + + try: + pixel.sigma_1 + except AttributeError: + sigma_1 = np.zeros(1, np.float32) + else: + raise AttributeError("sigma_1 should not exist") + + try: + pixel.sigma_2 + except AttributeError: + sigma_2 = np.zeros(1, np.float32) + else: + raise AttributeError("sigma_2 should not exist") + + # only return computed values (assume fixed is correct) + return dict(resultants=resultants_, + read_noise=pixel.read_noise, + delta_1=delta_1, + delta_2=delta_2, + sigma_1=sigma_1, + sigma_2=sigma_2) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 35d69727..ee750957 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -4,7 +4,7 @@ from stcal.ramp_fitting.ols_cas22._wrappers import read_data from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold, make_fixed +from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold, make_fixed, make_pixel def test_read_data(): """Test turning read_pattern into the time data""" @@ -119,3 +119,43 @@ def test_make_fixed(use_jump): assert fixed['recip_2'] == np.zeros(1, np.float32) assert fixed['slope_var_1'] == np.zeros(1, np.float32) assert fixed['slope_var_2'] == np.zeros(1, np.float32) + + +@pytest.mark.parametrize("use_jump", [True, False]) +def test_make_pixel(use_jump): + pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] + data = read_data(pattern, 3.0) + + resultants = np.random.random(4).astype(np.float32) + read_noise = np.float32(1.4) + t_bar = np.array(data['t_bar'], dtype=np.float32) + tau = np.array(data['tau'], dtype=np.float32) + n_reads = np.array(data['n_reads'], dtype=np.int32) + intercept = np.float32(5.5) + constant = np.float32(1/3) + + pixel = make_pixel(resultants, t_bar, tau, n_reads, read_noise, intercept, constant, use_jump) + + assert (pixel['resultants'] == resultants).all() + assert read_noise == pixel['read_noise'] + + if use_jump: + single_gen = zip(pixel['delta_1'], pixel['sigma_1']) + double_gen = zip(pixel['delta_2'], pixel['sigma_2']) + + for index, (delta_1, sigma_1) in enumerate(single_gen): + assert delta_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) + assert sigma_1 == read_noise * ( + np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) + ) + + for index, (delta_2, sigma_2) in enumerate(double_gen): + assert delta_2 == (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) + assert sigma_2 == read_noise * ( + np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) + ) + else: + assert pixel['delta_1'] == np.zeros(1, np.float32) + assert pixel['delta_2'] == np.zeros(1, np.float32) + assert pixel['sigma_1'] == np.zeros(1, np.float32) + assert pixel['sigma_2'] == np.zeros(1, np.float32) From fac7c5bc9836a0366817c8409d1e6060d18420db Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 5 Sep 2023 15:32:54 -0400 Subject: [PATCH 42/90] Add more testing for ramp fitting --- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 16 +- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 6 +- .../ramp_fitting/ols_cas22/_wrappers.pyx | 22 ++- tests/test_jump_cas22.py | 163 +++++++++++++++--- 4 files changed, 173 insertions(+), 34 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index f40a077f..4822f30d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -1,3 +1,4 @@ +import re import numpy as np cimport numpy as np from libcpp.vector cimport vector @@ -58,10 +59,13 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, resend : np.ndarray[nramp] The last resultant in this ramp. """ - cdef int n_resultants = len(read_pattern) - if n_resultants != resultants.shape[0]: - raise RuntimeError(f'MA table length {n_resultants} does not ' - f'match number of resultants {resultants.shape[0]}') + cdef int n_pixels, n_resultants + n_resultants = resultants.shape[0] + n_pixels = resultants.shape[1] + + if n_resultants != len(read_pattern): + raise RuntimeError(f'The read pattern length {len(read_pattern)} does not ' + f'match number of resultants {n_resultants}') # Pre-compute data for all pixels cdef Fixed fixed = make_fixed(read_data(read_pattern, read_time), @@ -77,9 +81,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # Perform all of the fits cdef RampFits ramp_fits cdef int index - for index in range(n_resultants): + for index in range(n_pixels): # Fit all the ramps for the given pixel - ramp_fits = make_pixel(fixed, read_noise, + ramp_fits = make_pixel(fixed, read_noise[index], resultants[:, index]).fit_ramps(pixel_ramps[index]) # Build the output arrays diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index b3763c9d..a331d8ed 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -125,11 +125,11 @@ cdef class Pixel: cdef int[:] n_reads_ = self.fixed.data.n_reads.data() # Setup data for fitting (work over subset of data) - cdef float[:] resultants = self.fixed.resultants[ramp.start:ramp.end + 1] + cdef float[:] resultants = self.resultants[ramp.start:ramp.end + 1] cdef float[:] t_bar = t_bar_[ramp.start:ramp.end + 1] cdef float[:] tau = tau_[ramp.start:ramp.end + 1] cdef int[:] n_reads = n_reads_[ramp.start:ramp.end + 1] - cdef float read_noise = self.fixed.read_noise + cdef float read_noise = self.read_noise # initalize fit cdef int i = 0, j = 0 @@ -280,7 +280,7 @@ cdef class Pixel: ramps.pop() # Compute fit - ramp_fit = self.ramp_fit(ramp) + ramp_fit = self.fit_ramp(ramp) if self.fixed.use_jump: stats = self.stats(ramp_fit.slope, ramp) diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 4c9a9c96..60b5bf35 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -5,7 +5,7 @@ from libcpp cimport bool from libcpp.stack cimport stack from libcpp.deque cimport deque -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, DerivedData, Thresh +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, DerivedData, Thresh, RampFit from stcal.ramp_fitting.ols_cas22._core cimport read_data as c_read_data from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps from stcal.ramp_fitting.ols_cas22._core cimport make_threshold as c_make_threshold @@ -214,3 +214,23 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, delta_2=delta_2, sigma_1=sigma_1, sigma_2=sigma_2) + + +def fit_ramp(np.ndarray[float, ndim=1] resultants, + np.ndarray[float, ndim=1] t_bar, + np.ndarray[float, ndim=1] tau, + np.ndarray[int, ndim=1] n_reads, + float read_noise, + int start, + int end): + + cdef DerivedData data = DerivedData(t_bar, tau, n_reads) + cdef Thresh threshold = c_make_threshold(0, 1) + cdef Fixed fixed = c_make_fixed(data, threshold, False) + + cdef Pixel pixel = c_make_pixel(fixed, read_noise, resultants) + cdef RampIndex ramp_index = RampIndex(start, end) + + cdef RampFit ramp_fit = pixel.fit_ramp(ramp_index) + + return ramp_fit diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index ee750957..48d8b753 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -4,12 +4,33 @@ from stcal.ramp_fitting.ols_cas22._wrappers import read_data from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold, make_fixed, make_pixel +from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold, make_fixed, make_pixel, fit_ramp -def test_read_data(): +from stcal.ramp_fitting.ols_cas22 import fit_ramps + + +RNG = np.random.default_rng(619) +ROMAN_READ_TIME = 3.04 + + +@pytest.fixture(scope="module") +def base_ramp_data(): + """Basic data for simulating ramps for testing (not unpacked)""" + read_pattern = [ + [1, 2, 3, 4], + [5], + [6, 7, 8], + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18], + [19, 20, 21], + [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36] + ] + + yield read_pattern, read_data(read_pattern, ROMAN_READ_TIME) + + +def test_read_data(base_ramp_data): """Test turning read_pattern into the time data""" - pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] - data = read_data(pattern, 3.0) + _, data = base_ramp_data # Basic sanity checks (structs become dicts) assert isinstance(data, dict) @@ -19,9 +40,9 @@ def test_read_data(): assert len(data) == 3 # Check that the data is correct - assert_allclose(data['t_bar'], [4.5, 15, 21, 28.5]) - assert_allclose(data['tau'], [3.75, 13.666667, 21, 26.625]) - assert data['n_reads'] == [2, 3, 1, 4] + assert_allclose(data['t_bar'], [7.6, 15.2, 21.279999, 41.040001, 60.799999, 88.159996]) + assert_allclose(data['tau'], [5.7, 15.2, 19.928888, 36.023998, 59.448887, 80.593781]) + assert data['n_reads'] == [4, 1, 3, 10, 3, 15] def test_init_ramps(): @@ -64,6 +85,7 @@ def test_init_ramps(): def test_threshold(): + """Test the threshold object/fucnction)""" intercept = np.float32(5.5) constant = np.float32(1/3) @@ -74,14 +96,21 @@ def test_threshold(): assert np.float32(intercept - constant) == run_threshold(thresh, 10.0) # check constant +@pytest.fixture(scope="module") +def ramp_data(base_ramp_data): + """Upacked data for simulating ramps for testing""" + t_bar = np.array(base_ramp_data[1]['t_bar'], dtype=np.float32) + tau = np.array(base_ramp_data[1]['tau'], dtype=np.float32) + n_reads = np.array(base_ramp_data[1]['n_reads'], dtype=np.int32) + + yield base_ramp_data[0], t_bar, tau, n_reads + + @pytest.mark.parametrize("use_jump", [True, False]) -def test_make_fixed(use_jump): - pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] - data = read_data(pattern, 3.0) +def test_make_fixed(ramp_data, use_jump): + """Test computing the fixed data for all pixels""" + _, t_bar, tau, n_reads = ramp_data - t_bar = np.array(data['t_bar'], dtype=np.float32) - tau = np.array(data['tau'], dtype=np.float32) - n_reads = np.array(data['n_reads'], dtype=np.int32) intercept = np.float32(5.5) constant = np.float32(1/3) @@ -101,13 +130,13 @@ def test_make_fixed(use_jump): for index, (t_bar_1, t_bar_1_sq, recip_1, slope_var_1) in enumerate(single_gen): assert t_bar_1 == t_bar[index + 1] - t_bar[index] - assert t_bar_1_sq == (t_bar[index + 1] - t_bar[index])**2 + assert t_bar_1_sq == np.float32((t_bar[index + 1] - t_bar[index])**2) assert recip_1 == np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) assert slope_var_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) for index, (t_bar_2, t_bar_2_sq, recip_2, slope_var_2) in enumerate(double_gen): assert t_bar_2 == t_bar[index + 2] - t_bar[index] - assert t_bar_2_sq == (t_bar[index + 2] - t_bar[index])**2 + assert t_bar_2_sq == np.float32((t_bar[index + 2] - t_bar[index])**2) assert recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) assert slope_var_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) else: @@ -121,16 +150,44 @@ def test_make_fixed(use_jump): assert fixed['slope_var_2'] == np.zeros(1, np.float32) +def _generate_resultants(read_pattern, flux, read_noise): + """Generate a set of resultants for a pixel""" + resultants = np.zeros(len(read_pattern), dtype=np.float32) + + # Use Poisson process to simulate the accumulation of the ramp + ramp_value = 0 # Last value of ramp + for index, reads in enumerate(read_pattern): + resultant_total = 0 # Total of all reads in this resultant + for _ in reads: + # Compute the next value of the ramp + ramp_value += RNG.poisson(flux * ROMAN_READ_TIME) # generate value to accumulate + ramp_value += RNG.standard_normal() * read_noise # include read noise + + # Add to running total for the resultant + resultant_total += ramp_value + + # Record the average value for resultant (i.e., the average of the reads) + resultants[index] = np.float32(resultant_total / len(reads)) + + return resultants + + +@pytest.fixture(scope="module") +def pixel_data(ramp_data): + read_noise = np.float32(5) + flux = 100 + + read_pattern, t_bar, tau, n_reads = ramp_data + resultants = _generate_resultants(read_pattern, flux, read_noise) + + yield resultants, t_bar, tau, n_reads, read_noise, flux + + @pytest.mark.parametrize("use_jump", [True, False]) -def test_make_pixel(use_jump): - pattern = [[1, 2], [4, 5, 6], [7], [8, 9, 10, 11]] - data = read_data(pattern, 3.0) - - resultants = np.random.random(4).astype(np.float32) - read_noise = np.float32(1.4) - t_bar = np.array(data['t_bar'], dtype=np.float32) - tau = np.array(data['tau'], dtype=np.float32) - n_reads = np.array(data['n_reads'], dtype=np.int32) +def test_make_pixel(pixel_data, use_jump): + """Test computing the pixel data""" + resultants, t_bar, tau, n_reads, read_noise, _ = pixel_data + intercept = np.float32(5.5) constant = np.float32(1/3) @@ -159,3 +216,61 @@ def test_make_pixel(use_jump): assert pixel['delta_2'] == np.zeros(1, np.float32) assert pixel['sigma_1'] == np.zeros(1, np.float32) assert pixel['sigma_2'] == np.zeros(1, np.float32) + + +def test_fit_ramp_slope(pixel_data): + """ + Test fitting the slope of a ramp + + Note that this only tests the slope, not the variances. Those require us do a more + statistical test, which can be done by fitting multiple ramps + """ + resultants, t_bar, tau, n_reads, read_noise, flux = pixel_data + + fit = fit_ramp(resultants, t_bar, tau, n_reads, read_noise, 0, len(resultants) - 1) + assert_allclose(fit['slope'], flux, atol=1, rtol=1e-3) + + +@pytest.fixture(scope="module") +def detector_data(ramp_data): + read_pattern, *_ = ramp_data + + n_pixels = 100_000 + read_noise = RNG.lognormal(5, size=n_pixels).astype(np.float32) + flux = 100 + + resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) + for index in range(n_pixels): + resultants[:, index] = _generate_resultants(read_pattern, flux, read_noise[index]) + + return resultants, read_noise, read_pattern + + +def _compute_averages(slope, read_var, poisson_var): + weights = (read_var != 0) / (read_var + (read_var == 0)) # Avoid divide by zero and map those to 0 + total_weight = np.sum(weights) + + average_slope = np.sum(weights * slope) / (total_weight + (total_weight == 0)) + average_read_var = np.sum(weights**2 * read_var) / (total_weight**2 + (total_weight == 0)) + average_poisson_var = np.sum(weights**2 * poisson_var) / (total_weight**2 + (total_weight == 0)) * average_slope + + return average_slope, average_read_var, average_poisson_var + + +def test_fit_ramps(detector_data): + """ + Test fitting ramps without jump detection + """ + resultants, read_noise, read_pattern = detector_data + dq = np.zeros(resultants.shape, dtype=np.int32) + + fit = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) + + slope = np.array(fit['slope'], dtype=np.float32) + read_var = np.array(fit['read_var'], dtype=np.float32) + poisson_var = np.array(fit['poisson_var'], dtype=np.float32) + + # Only one slope per pixel + assert slope.shape == (resultants.shape[1], 1) + assert read_var.shape == (resultants.shape[1], 1) + assert poisson_var.shape == (resultants.shape[1], 1) From bdf1351ad7979a081fd33889d6cfff28076bf925 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 6 Sep 2023 09:02:24 -0400 Subject: [PATCH 43/90] Fixed offset issue --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 11 ++++--- tests/test_jump_cas22.py | 34 ++++++++++++--------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index a331d8ed..af881b52 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -86,8 +86,9 @@ cdef class Pixel: (resultants[i+offset] - resultants[i]) """ cdef float[:] resultants = self.resultants + cdef int end = len(resultants) - return np.subtract(resultants[offset:], resultants[:-offset]) + return np.subtract(resultants[offset:], resultants[:end - offset]) @cython.boundscheck(False) @cython.wraparound(False) @@ -131,24 +132,26 @@ cdef class Pixel: cdef int[:] n_reads = n_reads_[ramp.start:ramp.end + 1] cdef float read_noise = self.read_noise + cdef int end = ramp.end + 1 - ramp.start + # initalize fit cdef int i = 0, j = 0 cdef vector[float] weights = vector[float](n_resultants) cdef vector[float] coeffs = vector[float](n_resultants) - cdef float t_bar_mid = (t_bar[0] + t_bar[- 1]) / 2 + cdef float t_bar_mid = (t_bar[0] + t_bar[end]) / 2 # Casertano+2022 Eq. 44 # Note we've departed from Casertano+22 slightly; # there s is just resultants[ramp.end]. But that doesn't seem good if, e.g., # a CR in the first resultant has boosted the whole ramp high but there # is no actual signal. - cdef float s = max(resultants[-1] - resultants[0], 0) + cdef float s = max(resultants[end] - resultants[0], 0) s = s / sqrt(read_noise**2 + s) cdef float power = get_power(s) # It's easy to use up a lot of dynamic range on something like # (tbar - tbarmid) ** 10. Rescale these. - cdef float t_scale = (t_bar[-1] - t_bar[0]) / 2 + cdef float t_scale = (t_bar[end] - t_bar[0]) / 2 t_scale = 1 if t_scale == 0 else t_scale cdef float f0 = 0, f1 = 0, f2 = 0 diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 48d8b753..da7b7320 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -150,24 +150,29 @@ def test_make_fixed(ramp_data, use_jump): assert fixed['slope_var_2'] == np.zeros(1, np.float32) -def _generate_resultants(read_pattern, flux, read_noise): +def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): """Generate a set of resultants for a pixel""" - resultants = np.zeros(len(read_pattern), dtype=np.float32) + resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) # Use Poisson process to simulate the accumulation of the ramp - ramp_value = 0 # Last value of ramp + ramp_value = np.zeros(n_pixels, dtype=np.float32) # Last value of ramp for index, reads in enumerate(read_pattern): - resultant_total = 0 # Total of all reads in this resultant + resultant_total = np.zeros(n_pixels, dtype=np.float32) # Total of all reads in this resultant for _ in reads: # Compute the next value of the ramp - ramp_value += RNG.poisson(flux * ROMAN_READ_TIME) # generate value to accumulate - ramp_value += RNG.standard_normal() * read_noise # include read noise + # - Poisson process for the flux + # - Gaussian process for the read noise + ramp_value += RNG.poisson(flux * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) + ramp_value += RNG.standard_normal(size=n_pixels, dtype=np.float32) * read_noise # Add to running total for the resultant resultant_total += ramp_value # Record the average value for resultant (i.e., the average of the reads) - resultants[index] = np.float32(resultant_total / len(reads)) + resultants[index] = (resultant_total / len(reads)).astype(np.float32) + + if n_pixels == 1: + resultants = resultants[:, 0] return resultants @@ -221,14 +226,17 @@ def test_make_pixel(pixel_data, use_jump): def test_fit_ramp_slope(pixel_data): """ Test fitting the slope of a ramp - - Note that this only tests the slope, not the variances. Those require us do a more - statistical test, which can be done by fitting multiple ramps """ resultants, t_bar, tau, n_reads, read_noise, flux = pixel_data fit = fit_ramp(resultants, t_bar, tau, n_reads, read_noise, 0, len(resultants) - 1) - assert_allclose(fit['slope'], flux, atol=1, rtol=1e-3) + assert_allclose(fit['slope'], flux, atol=1, rtol=1e-7) + + # total_var = fit['read_var'] + fit['poisson_var'] * fit['slope'] + # assert False, total_var + # # chi2 = (fit['slope'] - flux)**2 / total_var**2 + + # assert np.abs(chi2 - 1) < 0.03 @pytest.fixture(scope="module") @@ -239,9 +247,7 @@ def detector_data(ramp_data): read_noise = RNG.lognormal(5, size=n_pixels).astype(np.float32) flux = 100 - resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) - for index in range(n_pixels): - resultants[:, index] = _generate_resultants(read_pattern, flux, read_noise[index]) + resultants = _generate_resultants(read_pattern, flux, read_noise, n_pixels=n_pixels) return resultants, read_noise, read_pattern From 6eca68a9f7ca065247afa3ccd708a7fa088e2dc3 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 6 Sep 2023 10:19:44 -0400 Subject: [PATCH 44/90] Bugfix endpoint --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 2 +- tests/test_jump_cas22.py | 39 +++++++++++---------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index af881b52..2cbc4006 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -132,7 +132,7 @@ cdef class Pixel: cdef int[:] n_reads = n_reads_[ramp.start:ramp.end + 1] cdef float read_noise = self.read_noise - cdef int end = ramp.end + 1 - ramp.start + cdef int end = len(resultants) - 1 # initalize fit cdef int i = 0, j = 0 diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index da7b7320..01026f7d 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -230,13 +230,14 @@ def test_fit_ramp_slope(pixel_data): resultants, t_bar, tau, n_reads, read_noise, flux = pixel_data fit = fit_ramp(resultants, t_bar, tau, n_reads, read_noise, 0, len(resultants) - 1) - assert_allclose(fit['slope'], flux, atol=1, rtol=1e-7) - # total_var = fit['read_var'] + fit['poisson_var'] * fit['slope'] - # assert False, total_var - # # chi2 = (fit['slope'] - flux)**2 / total_var**2 + # check that the fit is correct is enough + assert_allclose(fit['slope'], flux, atol=1, rtol=1e-2) - # assert np.abs(chi2 - 1) < 0.03 + # check that the variances and slope are correct relative to each other + total_var = fit['read_var'] + fit['poisson_var'] * fit['slope'] + chi2 = (fit['slope'] - flux)**2 / total_var**2 + assert np.abs(chi2 - 1) < 0.03 @pytest.fixture(scope="module") @@ -252,15 +253,15 @@ def detector_data(ramp_data): return resultants, read_noise, read_pattern -def _compute_averages(slope, read_var, poisson_var): - weights = (read_var != 0) / (read_var + (read_var == 0)) # Avoid divide by zero and map those to 0 - total_weight = np.sum(weights) +# def _compute_averages(slope, read_var, poisson_var): +# weights = (read_var != 0) / (read_var + (read_var == 0)) # Avoid divide by zero and map those to 0 +# total_weight = np.sum(weights) - average_slope = np.sum(weights * slope) / (total_weight + (total_weight == 0)) - average_read_var = np.sum(weights**2 * read_var) / (total_weight**2 + (total_weight == 0)) - average_poisson_var = np.sum(weights**2 * poisson_var) / (total_weight**2 + (total_weight == 0)) * average_slope +# average_slope = np.sum(weights * slope) / (total_weight + (total_weight == 0)) +# average_read_var = np.sum(weights**2 * read_var) / (total_weight**2 + (total_weight == 0)) +# average_poisson_var = np.sum(weights**2 * poisson_var) / (total_weight**2 + (total_weight == 0)) * average_slope - return average_slope, average_read_var, average_poisson_var +# return average_slope, average_read_var, average_poisson_var def test_fit_ramps(detector_data): @@ -272,11 +273,11 @@ def test_fit_ramps(detector_data): fit = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) - slope = np.array(fit['slope'], dtype=np.float32) - read_var = np.array(fit['read_var'], dtype=np.float32) - poisson_var = np.array(fit['poisson_var'], dtype=np.float32) +# slope = np.array(fit['slope'], dtype=np.float32) +# read_var = np.array(fit['read_var'], dtype=np.float32) +# poisson_var = np.array(fit['poisson_var'], dtype=np.float32) - # Only one slope per pixel - assert slope.shape == (resultants.shape[1], 1) - assert read_var.shape == (resultants.shape[1], 1) - assert poisson_var.shape == (resultants.shape[1], 1) +# # Only one slope per pixel +# assert slope.shape == (resultants.shape[1], 1) +# assert read_var.shape == (resultants.shape[1], 1) +# assert poisson_var.shape == (resultants.shape[1], 1) From 8fffb21f957c7423d7d4c138c57aa6db90ec6290 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 6 Sep 2023 13:04:41 -0400 Subject: [PATCH 45/90] Clean up final ramp fitter --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 15 ++++---- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 21 ++++------- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 2 +- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 35 +++++++++++++------ tests/test_jump_cas22.py | 35 +++++++------------ 5 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 2230ed2f..e922929e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -1,6 +1,5 @@ from libcpp.vector cimport vector from libcpp.stack cimport stack -from libcpp.list cimport list as cpp_list from libcpp.deque cimport deque @@ -15,12 +14,16 @@ cdef struct RampFit: float poisson_var +cdef struct AverageRampFit: + float slope + float read_var + float poisson_var + + cdef struct RampFits: - cpp_list[float] slope - cpp_list[float] read_var - cpp_list[float] poisson_var - cpp_list[int] start - cpp_list[int] end + vector[RampFit] fits + vector[RampIndex] index + AverageRampFit average cdef struct DerivedData: diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 4822f30d..b77ea2c8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -75,23 +75,16 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # Compute all the initial sets of ramps cdef deque[stack[RampIndex]] pixel_ramps = init_ramps(dq) - cdef cpp_list[cpp_list[float]] slopes, read_vars, poisson_vars - cdef cpp_list[cpp_list[int]] starts, ends + # Use list because this might grow very large which would require constant + # reallocation. We don't need random access, and this gets cast to a python + # list in the end. + cdef cpp_list[RampFits] ramp_fits # Perform all of the fits - cdef RampFits ramp_fits cdef int index for index in range(n_pixels): # Fit all the ramps for the given pixel - ramp_fits = make_pixel(fixed, read_noise[index], - resultants[:, index]).fit_ramps(pixel_ramps[index]) + ramp_fits.push_back(make_pixel(fixed, read_noise[index], + resultants[:, index]).fit_ramps(pixel_ramps[index])) - # Build the output arrays - slopes.push_back(ramp_fits.slope) - read_vars.push_back(ramp_fits.read_var) - poisson_vars.push_back(ramp_fits.poisson_var) - starts.push_back(ramp_fits.start) - ends.push_back(ramp_fits.end) - - return dict(slope=slopes, read_var=read_vars, - poisson_var=poisson_vars, start=starts, end=ends) + return ramp_fits diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index f0df4743..29e2a267 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -1,6 +1,6 @@ from libcpp.stack cimport stack -from stcal.ramp_fitting.ols_cas22._core cimport RampFit, RampFits, RampIndex, Thresh +from stcal.ramp_fitting.ols_cas22._core cimport RampFit, RampFits, RampIndex from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Pixel: diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 2cbc4006..6389f0ad 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -275,6 +275,7 @@ cdef class Pixel: cdef RampFit ramp_fit cdef float [:] stats cdef int split + cdef float weight, total_weight = 0 # Run while the stack is non-empty while not ramps.empty(): @@ -301,16 +302,30 @@ cdef class Pixel: # Add ramp_fit to ramp_fits if no jump detection or stats are less # than threshold - # Note push_front and use of cpp_list are because ramps are computed - # backward in time meaning we need to add to the front of the list - # cpp_list over vector because need to append to the front which - # is slow for vector. Additionally, we don't need random access - # and cpp_list is closer to python lists then deque. - ramp_fits.slope.push_front(ramp_fit.slope) - ramp_fits.read_var.push_front(ramp_fit.read_var) - ramp_fits.poisson_var.push_front(ramp_fit.poisson_var) - ramp_fits.start.push_front(ramp.start) - ramp_fits.end.push_front(ramp.end) + # Note that ramps are computed backward in time meaning we need to + # reverse the order of the fits at the end + ramp_fits.fits.push_back(ramp_fit) + ramp_fits.index.push_back(ramp) + + # Start computing the averages + weight = 0 if ramp_fit.read_var == 0 else 1 / ramp_fit.read_var + total_weight += weight + + ramp_fits.average.slope += weight * ramp_fit.slope + ramp_fits.average.read_var += weight**2 * ramp_fit.read_var + ramp_fits.average.poisson_var += weight**2 * ramp_fit.poisson_var + + # Reverse to order in time + ramp_fits.fits = ramp_fits.fits[::-1] + ramp_fits.index = ramp_fits.index[::-1] + + # Finish computing averages + ramp_fits.average.slope /= total_weight if total_weight != 0 else 1 + ramp_fits.average.read_var /= total_weight**2 if total_weight != 0 else 1 + ramp_fits.average.poisson_var /= total_weight**2 if total_weight != 0 else 1 + + # Multiply poisson term by flux, (no negative fluxes) + ramp_fits.average.poisson_var *= max(ramp_fits.average.slope, 0) return ramp_fits diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 01026f7d..8f2e5a22 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -236,7 +236,7 @@ def test_fit_ramp_slope(pixel_data): # check that the variances and slope are correct relative to each other total_var = fit['read_var'] + fit['poisson_var'] * fit['slope'] - chi2 = (fit['slope'] - flux)**2 / total_var**2 + chi2 = (fit["slope"] - flux)**2 / total_var**2 assert np.abs(chi2 - 1) < 0.03 @@ -245,39 +245,28 @@ def detector_data(ramp_data): read_pattern, *_ = ramp_data n_pixels = 100_000 - read_noise = RNG.lognormal(5, size=n_pixels).astype(np.float32) + read_noise = np.ones(n_pixels, dtype=np.float32) * 5 flux = 100 resultants = _generate_resultants(read_pattern, flux, read_noise, n_pixels=n_pixels) - return resultants, read_noise, read_pattern - - -# def _compute_averages(slope, read_var, poisson_var): -# weights = (read_var != 0) / (read_var + (read_var == 0)) # Avoid divide by zero and map those to 0 -# total_weight = np.sum(weights) - -# average_slope = np.sum(weights * slope) / (total_weight + (total_weight == 0)) -# average_read_var = np.sum(weights**2 * read_var) / (total_weight**2 + (total_weight == 0)) -# average_poisson_var = np.sum(weights**2 * poisson_var) / (total_weight**2 + (total_weight == 0)) * average_slope - -# return average_slope, average_read_var, average_poisson_var + return resultants, read_noise, read_pattern, n_pixels, flux def test_fit_ramps(detector_data): """ Test fitting ramps without jump detection """ - resultants, read_noise, read_pattern = detector_data + resultants, read_noise, read_pattern, n_pixels, flux = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fit = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) + fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) + + chi2 = 0 + for fit in fits: + total_var = fit['average']['read_var'] + fit['average']['poisson_var'] + chi2 += (fit['average']['slope'] - flux)**2 / total_var**2 -# slope = np.array(fit['slope'], dtype=np.float32) -# read_var = np.array(fit['read_var'], dtype=np.float32) -# poisson_var = np.array(fit['poisson_var'], dtype=np.float32) + chi2 /= n_pixels -# # Only one slope per pixel -# assert slope.shape == (resultants.shape[1], 1) -# assert read_var.shape == (resultants.shape[1], 1) -# assert poisson_var.shape == (resultants.shape[1], 1) + assert np.abs(chi2 - 1) < 0.03 From b5de3e661f25db0ee318f26c6b221402865fb6de Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 6 Sep 2023 14:01:01 -0400 Subject: [PATCH 46/90] Change Thresh from class to struct --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 10 ++--- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 38 ++++--------------- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 4 +- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 4 +- .../ramp_fitting/ols_cas22/_wrappers.pyx | 18 ++++----- tests/test_jump_cas22.py | 8 ++-- 6 files changed, 26 insertions(+), 56 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index e922929e..92bb3a22 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -32,13 +32,11 @@ cdef struct DerivedData: vector[int] n_reads -cdef class Thresh: - cdef float intercept - cdef float constant +cdef struct Thresh: + float intercept + float constant - cdef float run(Thresh self, float slope) - -cdef Thresh make_threshold(float intercept, float constant) +cdef float threshold(Thresh thresh, float slope) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) cdef DerivedData read_data(list[list[int]] read_pattern, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 00dfa5bc..652da0e4 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -73,44 +73,22 @@ cdef inline float get_power(float s): return PTABLE[1][i] -cdef class Thresh: - cdef inline float run(Thresh self, float slope): - """ - Compute jump threshold - - Parameters - ---------- - slope : float - slope of the ramp in question - - Returns - ------- - intercept - constant * log10(slope) - """ - return self.intercept - self.constant * log10(slope) - - -cdef Thresh make_threshold(float intercept, float constant): +cdef inline float threshold(Thresh thresh, float slope): """ - Create a Thresh object + Compute jump threshold Parameters ---------- - intercept : float - intercept of the threshold - constant : float - constant of the threshold + thresh : Thresh + threshold parameters struct + slope : float + slope of the ramp in question Returns ------- - Thresh object + intercept - constant * log10(slope) """ - - thresh = Thresh() - thresh.intercept = intercept - thresh.constant = constant - - return thresh + return thresh.intercept - thresh.constant * log10(slope) cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index b77ea2c8..a181a43d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -8,7 +8,7 @@ from libcpp.deque cimport deque cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( - RampFits, RampIndex, make_threshold, read_data, init_ramps) + RampFits, RampIndex, Thresh, read_data, init_ramps) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @@ -69,7 +69,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # Pre-compute data for all pixels cdef Fixed fixed = make_fixed(read_data(read_pattern, read_time), - make_threshold(5.5, 1/3.0), + Thresh(5.5, 1/3.0), use_jumps) # Compute all the initial sets of ramps diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 6389f0ad..134006fc 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -21,7 +21,7 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport get_power, RampFit, RampFits, RampIndex +from stcal.ramp_fitting.ols_cas22._core cimport get_power, threshold, RampFit, RampFits, RampIndex from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel @@ -289,7 +289,7 @@ cdef class Pixel: if self.fixed.use_jump: stats = self.stats(ramp_fit.slope, ramp) - if max(stats) > self.threshold.run(ramp_fit.slope): + if max(stats) > threshold(self.threshold, ramp_fit.slope): # Compute split point to create two new ramps split = np.argmax(stats) diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 60b5bf35..1882b01c 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -5,10 +5,9 @@ from libcpp cimport bool from libcpp.stack cimport stack from libcpp.deque cimport deque -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, DerivedData, Thresh, RampFit +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, DerivedData, Thresh, RampFit, threshold from stcal.ramp_fitting.ols_cas22._core cimport read_data as c_read_data from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps -from stcal.ramp_fitting.ols_cas22._core cimport make_threshold as c_make_threshold from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed as c_make_fixed @@ -42,12 +41,9 @@ def init_ramps(np.ndarray[int, ndim=2] dq): return out -def make_threshold(float intercept, float constant): - return c_make_threshold(intercept, constant) - - -def run_threshold(Thresh threshold, float slope): - return threshold.run(slope) +def run_threshold(float intercept, float constant, float slope): + cdef Thresh thresh = Thresh(intercept, constant) + return threshold(thresh, slope) def make_fixed(np.ndarray[float, ndim=1] t_bar, @@ -58,7 +54,7 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, bool use_jump): cdef DerivedData data = DerivedData(t_bar, tau, n_reads) - cdef Thresh threshold = c_make_threshold(intercept, constant) + cdef Thresh threshold = Thresh(intercept, constant) cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) @@ -162,7 +158,7 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, bool use_jump): cdef DerivedData data = DerivedData(t_bar, tau, n_reads) - cdef Thresh threshold = c_make_threshold(intercept, constant) + cdef Thresh threshold = Thresh(intercept, constant) cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) @@ -225,7 +221,7 @@ def fit_ramp(np.ndarray[float, ndim=1] resultants, int end): cdef DerivedData data = DerivedData(t_bar, tau, n_reads) - cdef Thresh threshold = c_make_threshold(0, 1) + cdef Thresh threshold = Thresh(0, 1) cdef Fixed fixed = c_make_fixed(data, threshold, False) cdef Pixel pixel = c_make_pixel(fixed, read_noise, resultants) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 8f2e5a22..b385f5a5 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -4,7 +4,7 @@ from stcal.ramp_fitting.ols_cas22._wrappers import read_data from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import make_threshold, run_threshold, make_fixed, make_pixel, fit_ramp +from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, make_fixed, make_pixel, fit_ramp from stcal.ramp_fitting.ols_cas22 import fit_ramps @@ -89,11 +89,9 @@ def test_threshold(): intercept = np.float32(5.5) constant = np.float32(1/3) - thresh = make_threshold(intercept, constant) - # Parameters are not directly accessible - assert intercept == run_threshold(thresh, 1.0) # check intercept - assert np.float32(intercept - constant) == run_threshold(thresh, 10.0) # check constant + assert intercept == run_threshold(intercept, constant, 1.0) # check intercept + assert np.float32(intercept - constant) == run_threshold(intercept, constant, 10.0) # check constant @pytest.fixture(scope="module") From 308c2ee0587113f68e7a20978218701ec819eaef Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 20 Sep 2023 10:18:07 -0400 Subject: [PATCH 47/90] Add tests for fitting ramps with dq flags --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 5 ++ tests/test_jump_cas22.py | 59 +++++++++++++-------- 2 files changed, 43 insertions(+), 21 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 134006fc..6c07ad61 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -273,6 +273,11 @@ cdef class Pixel: cdef RampFits ramp_fits cdef RampIndex ramp cdef RampFit ramp_fit + + ramp_fits.average.slope = 0 + ramp_fits.average.read_var = 0 + ramp_fits.average.poisson_var = 0 + cdef float [:] stats cdef int split cdef float weight, total_weight = 0 diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index b385f5a5..285282ec 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -161,7 +161,7 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): # - Poisson process for the flux # - Gaussian process for the read noise ramp_value += RNG.poisson(flux * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) - ramp_value += RNG.standard_normal(size=n_pixels, dtype=np.float32) * read_noise + ramp_value += RNG.standard_normal(size=n_pixels, dtype=np.float32) * read_noise / np.sqrt(len(reads)) # Add to running total for the resultant resultant_total += ramp_value @@ -221,23 +221,6 @@ def test_make_pixel(pixel_data, use_jump): assert pixel['sigma_2'] == np.zeros(1, np.float32) -def test_fit_ramp_slope(pixel_data): - """ - Test fitting the slope of a ramp - """ - resultants, t_bar, tau, n_reads, read_noise, flux = pixel_data - - fit = fit_ramp(resultants, t_bar, tau, n_reads, read_noise, 0, len(resultants) - 1) - - # check that the fit is correct is enough - assert_allclose(fit['slope'], flux, atol=1, rtol=1e-2) - - # check that the variances and slope are correct relative to each other - total_var = fit['read_var'] + fit['poisson_var'] * fit['slope'] - chi2 = (fit["slope"] - flux)**2 / total_var**2 - assert np.abs(chi2 - 1) < 0.03 - - @pytest.fixture(scope="module") def detector_data(ramp_data): read_pattern, *_ = ramp_data @@ -251,20 +234,54 @@ def detector_data(ramp_data): return resultants, read_noise, read_pattern, n_pixels, flux -def test_fit_ramps(detector_data): +def test_fit_ramps_no_dq(detector_data): """ - Test fitting ramps without jump detection + Test fitting ramps without jump detection and no dq flags set """ resultants, read_noise, read_pattern, n_pixels, flux = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) + assert len(fits) == n_pixels chi2 = 0 for fit in fits: + assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump + total_var = fit['average']['read_var'] + fit['average']['poisson_var'] - chi2 += (fit['average']['slope'] - flux)**2 / total_var**2 + chi2 += (fit['average']['slope'] - flux)**2 / total_var chi2 /= n_pixels assert np.abs(chi2 - 1) < 0.03 + + +def test_fit_ramps_dq(detector_data): + """ + Test fitting ramps without jump detection, but with dq flags set + """ + resultants, read_noise, read_pattern, n_pixels, flux = detector_data + dq = np.zeros(resultants.shape, dtype=np.int32) + (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) + + # only use okay ramps + # ramps passing the below criterion have at least two adjacent valid reads + # i.e., we can make a measurement from them. + okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 + + fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) + + + chi2 = 0 + for fit, use in zip(fits, okay): + if use: + # Add okay ramps to chi2 + total_var = fit['average']['read_var'] + fit['average']['poisson_var'] + chi2 += (fit['average']['slope'] - flux)**2 / total_var + else: + # Check no slope fit for bad ramps + assert fit['average']['slope'] == 0 + assert fit['average']['read_var'] == 0 + assert fit['average']['poisson_var'] == 0 + + chi2 /= np.sum(okay) + assert np.abs(chi2 - 1) < 0.03 From 1fa80789c616eeb560f6e2aa56e36d30dcfd6ec6 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 20 Sep 2023 15:11:14 -0400 Subject: [PATCH 48/90] Add testing of jump detection (with no jumps added) to existing tests --- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 13 ++++++++----- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 8 ++++---- tests/test_jump_cas22.py | 19 ++++++++++++------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index a181a43d..5a474c3f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -1,7 +1,6 @@ -import re import numpy as np cimport numpy as np -from libcpp.vector cimport vector +from libcpp cimport bool from libcpp.stack cimport stack from libcpp.list cimport list as cpp_list from libcpp.deque cimport deque @@ -17,9 +16,10 @@ from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @cython.wraparound(False) def fit_ramps(np.ndarray[float, ndim=2] resultants, np.ndarray[int, ndim=2] dq, - np.ndarray[float, ndim=1] read_noise, read_time, + np.ndarray[float, ndim=1] read_noise, + float read_time, list[list[int]] read_pattern, - int use_jumps=False): + bool use_jump=False): """Fit ramps using the Casertano+22 algorithm. This implementation fits all ramp segments between bad pixels @@ -40,6 +40,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, Time to perform a readout. For Roman data, this is FRAME_TIME. read_pattern : list[list[int]] the read pattern for the image + use_jump : bool + If True, use the jump detection algorithm to identify CRs. + If False, use the DQ array to identify CRs. Returns ------- @@ -70,7 +73,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # Pre-compute data for all pixels cdef Fixed fixed = make_fixed(read_data(read_pattern, read_time), Thresh(5.5, 1/3.0), - use_jumps) + use_jump) # Compute all the initial sets of ramps cdef deque[stack[RampIndex]] pixel_ramps = init_ramps(dq) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 6c07ad61..0c832c77 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -231,10 +231,10 @@ cdef class Pixel: cdef int start = ramp.start cdef int end = ramp.end - 1 - cdef float[:] slope_var_1 = np.zeros(end - start + 1, dtype=np.float32) - cdef float[:] slope_var_2 = np.zeros(end - start + 1, dtype=np.float32) + cdef float[:] slope_var_1 = np.zeros(end - start, dtype=np.float32) + cdef float[:] slope_var_2 = np.zeros(end - start, dtype=np.float32) cdef int i - for i in range(end - start + 1): + for i in range(end - start): slope_var_1[i] = slope * self.fixed.slope_var_1[start + i] * self.correction(start + i, 1, ramp) slope_var_2[i] = slope * self.fixed.slope_var_2[start + i] * self.correction(start + i, 2, ramp) @@ -294,7 +294,7 @@ cdef class Pixel: if self.fixed.use_jump: stats = self.stats(ramp_fit.slope, ramp) - if max(stats) > threshold(self.threshold, ramp_fit.slope): + if max(stats) > threshold(self.fixed.threshold, ramp_fit.slope): # Compute split point to create two new ramps split = np.argmax(stats) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 285282ec..0a69eb8f 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -234,14 +234,17 @@ def detector_data(ramp_data): return resultants, read_noise, read_pattern, n_pixels, flux -def test_fit_ramps_no_dq(detector_data): +@pytest.mark.parametrize("use_jump", [True, False]) +def test_fit_ramps_no_dq(detector_data, use_jump): """ - Test fitting ramps without jump detection and no dq flags set + Test fitting ramps with no dq flags set on data which has no jumps + Since no jumps are simulated in the data, jump detection shouldn't pick + up any jumps. """ resultants, read_noise, read_pattern, n_pixels, flux = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) + fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) assert len(fits) == n_pixels chi2 = 0 @@ -256,9 +259,12 @@ def test_fit_ramps_no_dq(detector_data): assert np.abs(chi2 - 1) < 0.03 -def test_fit_ramps_dq(detector_data): +@pytest.mark.parametrize("use_jump", [True, False]) +def test_fit_ramps_dq(detector_data, use_jump): """ - Test fitting ramps without jump detection, but with dq flags set + Test fitting ramps with dq flags set + Since no jumps are simulated in the data, jump detection shouldn't pick + up any jumps. """ resultants, read_noise, read_pattern, n_pixels, flux = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) + (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) @@ -268,8 +274,7 @@ def test_fit_ramps_dq(detector_data): # i.e., we can make a measurement from them. okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 - fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, False) - + fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) chi2 = 0 for fit, use in zip(fits, okay): From ea95dcdf7da257df7093fae537862b1dde9983d4 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 20 Sep 2023 15:27:49 -0400 Subject: [PATCH 49/90] add test with jumps in data --- tests/test_jump_cas22.py | 52 +++++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 0a69eb8f..9b0cb959 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -11,6 +11,9 @@ RNG = np.random.default_rng(619) ROMAN_READ_TIME = 3.04 +N_PIXELS = 100_000 +FLUX = 100 +JUMP_VALUE = 10_000 @pytest.fixture(scope="module") @@ -148,20 +151,30 @@ def test_make_fixed(ramp_data, use_jump): assert fixed['slope_var_2'] == np.zeros(1, np.float32) -def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): +def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1, add_jumps=False): """Generate a set of resultants for a pixel""" resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) + jumps = [] # Use Poisson process to simulate the accumulation of the ramp ramp_value = np.zeros(n_pixels, dtype=np.float32) # Last value of ramp for index, reads in enumerate(read_pattern): resultant_total = np.zeros(n_pixels, dtype=np.float32) # Total of all reads in this resultant + read_jumps = [] for _ in reads: # Compute the next value of the ramp # - Poisson process for the flux # - Gaussian process for the read noise ramp_value += RNG.poisson(flux * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) ramp_value += RNG.standard_normal(size=n_pixels, dtype=np.float32) * read_noise / np.sqrt(len(reads)) + if add_jumps: + # Add jumps only to ~1% of the pixels for any given read + jump_points = RNG.standard_normal(size=n_pixels, dtype=np.float32) > 0.99 + read_jumps.append(jump_points) + + # Add a large value to the ramp + ramp_value += (JUMP_VALUE * jump_points).astype(np.float32) + # Add to running total for the resultant resultant_total += ramp_value @@ -169,21 +182,23 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): # Record the average value for resultant (i.e., the average of the reads) resultants[index] = (resultant_total / len(reads)).astype(np.float32) + # Record all the jumps for this resultant + jumps.append(read_jumps) + if n_pixels == 1: resultants = resultants[:, 0] - return resultants + return resultants, jumps @pytest.fixture(scope="module") def pixel_data(ramp_data): read_noise = np.float32(5) - flux = 100 read_pattern, t_bar, tau, n_reads = ramp_data - resultants = _generate_resultants(read_pattern, flux, read_noise) + resultants, _ = _generate_resultants(read_pattern, FLUX, read_noise) - yield resultants, t_bar, tau, n_reads, read_noise, flux + yield resultants, t_bar, tau, n_reads, read_noise, FLUX @pytest.mark.parametrize("use_jump", [True, False]) @@ -224,14 +239,11 @@ def test_make_pixel(pixel_data, use_jump): @pytest.fixture(scope="module") def detector_data(ramp_data): read_pattern, *_ = ramp_data + read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 - n_pixels = 100_000 - read_noise = np.ones(n_pixels, dtype=np.float32) * 5 - flux = 100 - - resultants = _generate_resultants(read_pattern, flux, read_noise, n_pixels=n_pixels) + resultants, _ = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS) - return resultants, read_noise, read_pattern, n_pixels, flux + return resultants, read_noise, read_pattern, N_PIXELS, FLUX @pytest.mark.parametrize("use_jump", [True, False]) @@ -290,3 +302,21 @@ def test_fit_ramps_dq(detector_data, use_jump): chi2 /= np.sum(okay) assert np.abs(chi2 - 1) < 0.03 + + +@pytest.fixture(scope="module") +def jump_data(ramp_data): + read_pattern, *_ = ramp_data + read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 + + resultants, _ = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS, add_jumps=True) + + return resultants, read_noise, read_pattern, N_PIXELS, FLUX + + +def test_fit_ramps_with_jumps_no_dq(jump_data): + resultants, read_noise, read_pattern, n_pixels, flux = jump_data + dq = np.zeros(resultants.shape, dtype=np.int32) + + fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + assert len(fits) == n_pixels From 8cbc2cd9a6eb9a72b13b1ae7b67e06ae22779f18 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 26 Sep 2023 16:36:12 -0400 Subject: [PATCH 50/90] Clean up some indexing and variables --- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 14 +-- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 96 +++++++------- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 9 +- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 92 ++++++++------ .../ramp_fitting/ols_cas22/_wrappers.pyx | 117 ++++-------------- tests/test_jump_cas22.py | 56 +++++---- 6 files changed, 161 insertions(+), 223 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 311463fc..51d4cf22 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -8,15 +8,13 @@ cdef class Fixed: cdef DerivedData data cdef Thresh threshold - cdef float[:] t_bar_1, t_bar_2 - cdef float[:] t_bar_1_sq, t_bar_2_sq - cdef float[:] recip_1, recip_2 - cdef float[:] slope_var_1, slope_var_2 + cdef float[:, :] t_bar_diff + cdef float[:, :] recip + cdef float[:, :] slope_var - cdef float[:] t_bar_diff(Fixed self, int offset) - cdef float[:] t_bar_diff_sq(Fixed self, int offset) - cdef float[:] recip_val(Fixed self, int offset) - cdef float[:] slope_var_val(Fixed self, int offset) + cdef float[:, :] t_bar_diff_val(Fixed self) + cdef float[:, :] recip_val(Fixed self) + cdef float[:, :] slope_var_val(Fixed self) cdef Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 1807598a..215ba592 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -72,18 +72,15 @@ cdef class Fixed: from pre-computing the values and reusing them. """ - cdef inline float[:] t_bar_diff(Fixed self, int offset): + cdef inline float[:, :] t_bar_diff_val(Fixed self): """ Compute the difference offset of t_bar - Parameters - ---------- - offset : int - index offset to compute difference - Returns ------- - t_bar[i+offset] - t_bar[i] + [ + , + , """ # Cast vector to memory view # This way of doing it is potentially memory unsafe because the memory @@ -93,36 +90,28 @@ cdef class Fixed: # stays local to the function (numpy operations create brand new objects) cdef float[:] t_bar = self.data.t_bar.data() - return np.subtract(t_bar[offset:], t_bar[:-offset]) - - cdef inline float[:] t_bar_diff_sq(Fixed self, int offset): - """ - Compute the square difference offset of t_bar + cdef np.ndarray[float, ndim=2] t_bar_diff = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - Parameters - ---------- - offset : int - index offset + t_bar_diff[0, :] = np.subtract(t_bar[1:], t_bar[:-1]) + t_bar_diff[1, :-1] = np.subtract(t_bar[2:], t_bar[:-2]) + t_bar_diff[1, -1] = np.nan # last double difference is undefined - Returns - ------- - (t_bar[i+offset] - t_bar[i])**2 - """ - return np.array(self.t_bar_diff(offset)) ** 2 + return t_bar_diff - cdef inline float[:] recip_val(Fixed self, int offset): + cdef inline float[:, :] recip_val(Fixed self): """ Compute the recip values - (1/n_reads[i+offset] + 1/n_reads[i]) - - Parameters - ---------- - offset : int - index offset + (1/n_reads[i+1] + 1/n_reads[i]) + and + (1/n_reads[i+2] + 1/n_reads[i]) Returns ------- - (1/n_reads[i+offset] + 1/n_reads[i]) + [ + <(1/n_reads[i+1] + 1/n_reads[i])>, + <(1/n_reads[i+2] + 1/n_reads[i])> + ] + """ # Cast vector to memory view # This way of doing it is potentially memory unsafe because the memory @@ -132,23 +121,27 @@ cdef class Fixed: # stays local to the function (numpy operations create brand new objects) cdef int[:] n_reads = self.data.n_reads.data() - return (np.divide(1.0, n_reads[offset:], dtype=np.float32) + - np.divide(1.0, n_reads[:-offset], dtype=np.float32)) + cdef np.ndarray[float, ndim=2] recip = np.zeros((2, self.data.n_reads.size() - 1), dtype=np.float32) + recip[0, :] = (np.divide(1.0, n_reads[1:], dtype=np.float32) + + np.divide(1.0, n_reads[:-1], dtype=np.float32)) + recip[1, :-1] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + + np.divide(1.0, n_reads[:-2], dtype=np.float32)) + recip[1, -1] = np.nan # last double difference is undefined + + return recip - cdef inline float[:] slope_var_val(Fixed self, int offset): - """ - Compute the sigma values - Parameters - ---------- - offset : int - index offset + cdef inline float[:, :] slope_var_val(Fixed self): + """ + Compute slope part of the variance Returns ------- - (tau[i] + tau[i+offset] - min(t_bar[i], t_bar[i+offset])) * - correction(i, i+offset) + [ + <(tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) * correction(i, i+1)>, + <(tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) * correction(i, i+2)>, + ] """ # Cast vectors to memory views # This way of doing it is potentially memory unsafe because the memory @@ -159,8 +152,13 @@ cdef class Fixed: cdef float[:] t_bar = self.data.t_bar.data() cdef float[:] tau = self.data.tau.data() - return (np.add(tau[offset:], tau[:-offset]) - - np.minimum(t_bar[offset:], t_bar[:-offset])) + cdef np.ndarray[float, ndim=2] slope_var = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) + + slope_var[0, :] = (np.add(tau[1:], tau[:-1]) - np.minimum(t_bar[1:], t_bar[:-1])) + slope_var[1, :-1] = (np.add(tau[2:], tau[:-2]) - np.minimum(t_bar[2:], t_bar[:-2])) + slope_var[1, -1] = np.nan # last double difference is undefined + + return slope_var cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): @@ -193,16 +191,8 @@ cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): # Pre-compute jump detection computations shared by all pixels if use_jump: - fixed.t_bar_1 = fixed.t_bar_diff(1) - fixed.t_bar_2 = fixed.t_bar_diff(2) - - fixed.t_bar_1_sq = fixed.t_bar_diff_sq(1) - fixed.t_bar_2_sq = fixed.t_bar_diff_sq(2) - - fixed.recip_1 = fixed.recip_val(1) - fixed.recip_2 = fixed.recip_val(2) - - fixed.slope_var_1 = fixed.slope_var_val(1) - fixed.slope_var_2 = fixed.slope_var_val(2) + fixed.t_bar_diff = fixed.t_bar_diff_val() + fixed.recip = fixed.recip_val() + fixed.slope_var = fixed.slope_var_val() return fixed diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index 29e2a267..95e4d4fd 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -8,13 +8,14 @@ cdef class Pixel: cdef float read_noise cdef float [:] resultants - cdef float[:] delta_1, delta_2 - cdef float[:] sigma_1, sigma_2 + cdef float[:, :] delta + cdef float[:, :] sigma - cdef float[:] resultants_diff(Pixel self, int offset) + cdef float[:, :] delta_val(Pixel self) cdef RampFit fit_ramp(Pixel self, RampIndex ramp) - cdef float correction(Pixel self, int i, int offset, RampIndex ramp) + cdef float correction(Pixel self, RampIndex ramp, int index, int diff) + cdef float stat(Pixel self, float slope, RampIndex ramp, int index, int diff) cdef float[:] stats(Pixel self, float slope, RampIndex ramp) cdef RampFits fit_ramps(Pixel self, stack[RampIndex] ramps) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 0c832c77..510260eb 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -73,14 +73,10 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:] resultants_diff(Pixel self, int offset): + cdef inline float[:, :] delta_val(Pixel self): """ Compute the difference offset of resultants - Parameters - ---------- - offset : int - index offset to compute difference Returns ------- (resultants[i+offset] - resultants[i]) @@ -88,7 +84,14 @@ cdef class Pixel: cdef float[:] resultants = self.resultants cdef int end = len(resultants) - return np.subtract(resultants[offset:], resultants[:end - offset]) + cdef np.ndarray[float, ndim=2] t_bar_diff = np.array(self.fixed.t_bar_diff, dtype=np.float32) + cdef np.ndarray[float, ndim=2] delta = np.zeros((2, end - 1), dtype=np.float32) + + delta[0, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diff[0, :]).astype(np.float32) + delta[1, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diff[1, :end-2]).astype(np.float32) + delta[1, end-2] = np.nan # last double difference is undefined + + return delta @cython.boundscheck(False) @cython.wraparound(False) @@ -190,17 +193,31 @@ cdef class Pixel: return ramp_fit - cdef inline float correction(Pixel self, int i, int offset, RampIndex ramp): - cdef float comp = ((self.fixed.data.t_bar[i + offset] - self.fixed.data.t_bar[i]) / + cdef inline float correction(Pixel self, RampIndex ramp, int index, int diff): + cdef float comp = (self.fixed.t_bar_diff[diff, index] / (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start])) - if offset == 1: + if diff == 0: return (1 - comp)**2 - elif offset == 2: + elif diff == 1: return (1 - 0.75 * comp)**2 else: raise ValueError("offset must be 1 or 2") + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline float stat(Pixel self, float slope, RampIndex ramp, int index, int diff): + + cdef float delta = ((self.delta[diff, index] - slope) * + fabs(self.fixed.t_bar_diff[diff, index])) + cdef float var = (self.sigma[diff, index] + + slope * self.fixed.slope_var[diff, index] * + self.correction(ramp, index, diff)) + + return delta / sqrt(var) + + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -228,28 +245,30 @@ cdef class Pixel: list of statistics for each resultant except for the last 2 due to single/double difference due to indexing """ - cdef int start = ramp.start - cdef int end = ramp.end - 1 - - cdef float[:] slope_var_1 = np.zeros(end - start, dtype=np.float32) - cdef float[:] slope_var_2 = np.zeros(end - start, dtype=np.float32) - cdef int i - for i in range(end - start): - slope_var_1[i] = slope * self.fixed.slope_var_1[start + i] * self.correction(start + i, 1, ramp) - slope_var_2[i] = slope * self.fixed.slope_var_2[start + i] * self.correction(start + i, 2, ramp) - - cdef float[:] delta_1 = np.subtract(self.delta_1[start:end], slope) - cdef float[:] delta_2 = np.subtract(self.delta_2[start:end], slope) - - cdef float[:] var_1 = np.divide(np.add(self.sigma_1[start:end], slope_var_1), - self.fixed.t_bar_1_sq[start:end]) - cdef float[:] var_2 = np.divide(np.add(self.sigma_2[start:end], slope_var_2), - self.fixed.t_bar_2_sq[start:end]) - - cdef float[:] stats_1 = np.divide(delta_1, np.sqrt(var_1)) - cdef float[:] stats_2 = np.divide(delta_2, np.sqrt(var_2)) - - return np.maximum(stats_1, stats_2, dtype=np.float32) + cdef int start = ramp.start # index of first resultant for ramp + cdef int end = ramp.end # index of last resultant for ramp + + # Observe that the length of the ramp's sub array of the resultant would + # be `end - start + 1`. However, we are computing single and double + # "differences" which means we need to reference at least two points in + # this subarray at a time. For the single case, the maximum index allowed + # would be `end - 1`. Observe that `range(start, end)` will iterate over + # `start, start+1, start+1, ..., end-2, end-1` + # as the second argument to the `range` is the first index outside of the + # range + + cdef np.ndarray[float, ndim=1] stats = np.zeros(end - start, dtype=np.float32) + + cdef int index, stat + for stat, index in enumerate(range(start, end)): + if index == end - 1: + stats[stat] = self.stat(slope, ramp, index, 0) + else: + stats[stat] = max(self.stat(slope, ramp, index, 0), + self.stat(slope, ramp, index, 1)) + + return stats + @cython.boundscheck(False) @cython.wraparound(False) @@ -364,12 +383,7 @@ cdef inline Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants # Pre-compute values for jump detection shared by all pixels for this pixel if fixed.use_jump: - pixel.delta_1 = (np.array(pixel.resultants_diff(1)) / - np.array(fixed.t_bar_1)).astype(np.float32) - pixel.delta_2 = (np.array(pixel.resultants_diff(2)) / - np.array(fixed.t_bar_2)).astype(np.float32) - - pixel.sigma_1 = read_noise * np.array(fixed.recip_1) - pixel.sigma_2 = read_noise * np.array(fixed.recip_2) + pixel.delta = pixel.delta_val() + pixel.sigma = read_noise * np.array(fixed.recip) return pixel diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 1882b01c..7bdc6f93 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -61,91 +61,42 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, cdef float intercept_ = fixed.threshold.intercept cdef float constant_ = fixed.threshold.constant - cdef np.ndarray[float, ndim=1] t_bar_1, t_bar_2 - cdef np.ndarray[float, ndim=1] t_bar_1_sq, t_bar_2_sq - cdef np.ndarray[float, ndim=1] recip_1, recip_2 - cdef np.ndarray[float, ndim=1] slope_var_1, slope_var_2 + cdef np.ndarray[float, ndim=2] t_bar_diff + cdef np.ndarray[float, ndim=2] recip + cdef np.ndarray[float, ndim=2] slope_var if use_jump: - t_bar_1 = np.array(fixed.t_bar_1, dtype=np.float32) - t_bar_2 = np.array(fixed.t_bar_2, dtype=np.float32) - t_bar_1_sq = np.array(fixed.t_bar_1_sq, dtype=np.float32) - t_bar_2_sq = np.array(fixed.t_bar_2_sq, dtype=np.float32) - - recip_1 = np.array(fixed.recip_1, dtype=np.float32) - recip_2 = np.array(fixed.recip_2, dtype=np.float32) - - slope_var_1 = np.array(fixed.slope_var_1, dtype=np.float32) - slope_var_2 = np.array(fixed.slope_var_2, dtype=np.float32) + t_bar_diff = np.array(fixed.t_bar_diff, dtype=np.float32) + recip = np.array(fixed.recip, dtype=np.float32) + slope_var = np.array(fixed.slope_var, dtype=np.float32) else: try: - fixed.t_bar_1 + fixed.t_bar_diff except AttributeError: - t_bar_1 = np.zeros(1, np.float32) + t_bar_diff = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("t_bar_1 should not exist") try: - fixed.t_bar_2 - except AttributeError: - t_bar_2 = np.zeros(1, np.float32) - else: - raise AttributeError("t_bar_2 should not exist") - - try: - fixed.t_bar_1_sq - except AttributeError: - t_bar_1_sq = np.zeros(1, np.float32) - else: - raise AttributeError("t_bar_1_sq should not exist") - - try: - fixed.t_bar_2_sq - except AttributeError: - t_bar_2_sq = np.zeros(1, np.float32) - else: - raise AttributeError("t_bar_2_sq should not exist") - - try: - fixed.recip_1 + fixed.recip except AttributeError: - recip_1 = np.zeros(1, np.float32) + recip = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("recip_1 should not exist") try: - fixed.recip_2 + fixed.slope_var except AttributeError: - recip_2 = np.zeros(1, np.float32) - else: - raise AttributeError("recip_2 should not exist") - - try: - fixed.slope_var_1 - except AttributeError: - slope_var_1 = np.zeros(1, np.float32) + slope_var = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("slope_var_1 should not exist") - try: - fixed.slope_var_2 - except AttributeError: - slope_var_2 = np.zeros(1, np.float32) - else: - raise AttributeError("slope_var_2 should not exist") - - return dict(data=fixed.data, intercept=intercept_, constant=constant_, - t_bar_1=t_bar_1, - t_bar_2=t_bar_2, - t_bar_1_sq=t_bar_1_sq, - t_bar_2_sq=t_bar_2_sq, - recip_1=recip_1, - recip_2=recip_2, - slope_var_1=slope_var_1, - slope_var_2=slope_var_2) + t_bar_diff=t_bar_diff, + recip=recip, + slope_var=slope_var) def make_pixel(np.ndarray[float, ndim=1] resultants, @@ -166,50 +117,32 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, cdef np.ndarray[float, ndim=1] resultants_ = np.array(pixel.resultants, dtype=np.float32) - cdef np.ndarray[float, ndim=1] delta_1, delta_2 - cdef np.ndarray[float, ndim=1] sigma_1, sigma_2 + cdef np.ndarray[float, ndim=2] delta + cdef np.ndarray[float, ndim=2] sigma if use_jump: - delta_1 = np.array(pixel.delta_1, dtype=np.float32) - delta_2 = np.array(pixel.delta_2, dtype=np.float32) - sigma_1 = np.array(pixel.sigma_1, dtype=np.float32) - sigma_2 = np.array(pixel.sigma_2, dtype=np.float32) + delta = np.array(pixel.delta, dtype=np.float32) + sigma = np.array(pixel.sigma, dtype=np.float32) else: try: - pixel.delta_1 + pixel.delta except AttributeError: - delta_1 = np.zeros(1, np.float32) + delta = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("delta_1 should not exist") try: - pixel.delta_2 + pixel.sigma except AttributeError: - delta_2 = np.zeros(1, np.float32) - else: - raise AttributeError("delta_2 should not exist") - - try: - pixel.sigma_1 - except AttributeError: - sigma_1 = np.zeros(1, np.float32) + sigma = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("sigma_1 should not exist") - try: - pixel.sigma_2 - except AttributeError: - sigma_2 = np.zeros(1, np.float32) - else: - raise AttributeError("sigma_2 should not exist") - # only return computed values (assume fixed is correct) return dict(resultants=resultants_, read_noise=pixel.read_noise, - delta_1=delta_1, - delta_2=delta_2, - sigma_1=sigma_1, - sigma_2=sigma_2) + delta=delta, + sigma=sigma) def fit_ramp(np.ndarray[float, ndim=1] resultants, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 9b0cb959..f6518981 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -126,29 +126,28 @@ def test_make_fixed(ramp_data, use_jump): # Check the computed data if use_jump: - single_gen = zip(fixed['t_bar_1'], fixed['t_bar_1_sq'], fixed['recip_1'], fixed['slope_var_1']) - double_gen = zip(fixed['t_bar_2'], fixed['t_bar_2_sq'], fixed['recip_2'], fixed['slope_var_2']) + single_gen = zip(fixed['t_bar_diff'][0], fixed['recip'][0], fixed['slope_var'][0]) + double_gen = zip(fixed['t_bar_diff'][1], fixed['recip'][1], fixed['slope_var'][1]) - for index, (t_bar_1, t_bar_1_sq, recip_1, slope_var_1) in enumerate(single_gen): + for index, (t_bar_1, recip_1, slope_var_1) in enumerate(single_gen): assert t_bar_1 == t_bar[index + 1] - t_bar[index] - assert t_bar_1_sq == np.float32((t_bar[index + 1] - t_bar[index])**2) assert recip_1 == np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) assert slope_var_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) - for index, (t_bar_2, t_bar_2_sq, recip_2, slope_var_2) in enumerate(double_gen): - assert t_bar_2 == t_bar[index + 2] - t_bar[index] - assert t_bar_2_sq == np.float32((t_bar[index + 2] - t_bar[index])**2) - assert recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) - assert slope_var_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) + for index, (t_bar_2, recip_2, slope_var_2) in enumerate(double_gen): + if index == len(fixed['t_bar_diff'][1]) - 1: + # Last value must be NaN + assert np.isnan(t_bar_2) + assert np.isnan(recip_2) + assert np.isnan(slope_var_2) + else: + assert t_bar_2 == t_bar[index + 2] - t_bar[index] + assert recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) + assert slope_var_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) else: - assert fixed['t_bar_1'] == np.zeros(1, np.float32) - assert fixed['t_bar_2'] == np.zeros(1, np.float32) - assert fixed['t_bar_1_sq'] == np.zeros(1, np.float32) - assert fixed['t_bar_2_sq'] == np.zeros(1, np.float32) - assert fixed['recip_1'] == np.zeros(1, np.float32) - assert fixed['recip_2'] == np.zeros(1, np.float32) - assert fixed['slope_var_1'] == np.zeros(1, np.float32) - assert fixed['slope_var_2'] == np.zeros(1, np.float32) + assert np.isnan(fixed['t_bar_diff']).all() + assert np.isnan(fixed['recip']).all() + assert np.isnan(fixed['slope_var']).all() def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1, add_jumps=False): @@ -215,8 +214,8 @@ def test_make_pixel(pixel_data, use_jump): assert read_noise == pixel['read_noise'] if use_jump: - single_gen = zip(pixel['delta_1'], pixel['sigma_1']) - double_gen = zip(pixel['delta_2'], pixel['sigma_2']) + single_gen = zip(pixel['delta'][0], pixel['sigma'][0]) + double_gen = zip(pixel['delta'][1], pixel['sigma'][1]) for index, (delta_1, sigma_1) in enumerate(single_gen): assert delta_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) @@ -225,15 +224,18 @@ def test_make_pixel(pixel_data, use_jump): ) for index, (delta_2, sigma_2) in enumerate(double_gen): - assert delta_2 == (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) - assert sigma_2 == read_noise * ( - np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) - ) + if index == len(pixel['delta'][1]) - 1: + # Last value must be NaN + assert np.isnan(delta_2) + assert np.isnan(sigma_2) + else: + assert delta_2 == (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) + assert sigma_2 == read_noise * ( + np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) + ) else: - assert pixel['delta_1'] == np.zeros(1, np.float32) - assert pixel['delta_2'] == np.zeros(1, np.float32) - assert pixel['sigma_1'] == np.zeros(1, np.float32) - assert pixel['sigma_2'] == np.zeros(1, np.float32) + assert np.isnan(pixel['delta']).all() + assert np.isnan(pixel['sigma']).all() @pytest.fixture(scope="module") From 059c41e8659d89ef38bfd57aeb2fcd73e5e7a521 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 26 Sep 2023 17:53:50 -0400 Subject: [PATCH 51/90] Fix the jump detection indexing issues. --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 51 +++++++++++++++++++-- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 510260eb..b6133d06 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -129,6 +129,10 @@ cdef class Pixel: cdef int[:] n_reads_ = self.fixed.data.n_reads.data() # Setup data for fitting (work over subset of data) + # Recall that the RampIndex contains the index of the first and last + # index of the ramp. Therefore, the Python slice needed to get all the + # data within the ramp is: + # ramp.start:ramp.end + 1 cdef float[:] resultants = self.resultants[ramp.start:ramp.end + 1] cdef float[:] t_bar = t_bar_[ramp.start:ramp.end + 1] cdef float[:] tau = tau_[ramp.start:ramp.end + 1] @@ -310,16 +314,55 @@ cdef class Pixel: # Compute fit ramp_fit = self.fit_ramp(ramp) + # Run jump detection if enabled if self.fixed.use_jump: stats = self.stats(ramp_fit.slope, ramp) - if max(stats) > threshold(self.fixed.threshold, ramp_fit.slope): + # We have to protect against the case where the passed "ramp" is only + # a single point. In that case, stats will be empty. This will create + # an error in the max() call. + if len(stats) > 0 and max(stats) > threshold(self.fixed.threshold, ramp_fit.slope): # Compute split point to create two new ramps + # The split will map to the index of the resultant with the detected jump + # resultant_jump_index = ramp.start + split + # This resultant index needs to be removed, therefore the two possible new + # ramps are: + # RampIndex(ramp.start, ramp.start + split - 1) + # RampIndex(ramp.start + split + 1, ramp.end) + # This is because the RampIndex contains the index of the first and last + # resulants in the sub-ramp it describes. split = np.argmax(stats) - # add ramps so last ramp in time is on top of stack - ramps.push(RampIndex(ramp.start, ramp.start + split)) - ramps.push(RampIndex(ramp.start + split + 2, ramp.end)) + # The algorithm works via working over the sub-ramps backward + # in time. Therefore, since we are using a stack, we need to + # add the ramps in the time order they were observed in. This + # results in the last observation ramp being the top of the + # stack; meaning that, it will be the next ramp handeled. + + if split > 0: + # When split == 0, the jump has been detected in the resultant + # corresponding to the first resultant in the ramp, i.e + # ramp.start + # So the "split" is just excluding the first resultant in the + # ramp currently being considered. Therefore, there is no need + # to handle a ramp in this case. + ramps.push(RampIndex(ramp.start, ramp.start + split - 1)) + + # Note that because the stats can only be calculated for ramp + # length - 1 # positions due to the need to compute at least + # single differences. # Therefore the maximum value for + # argmax(stats) is ramp length - 2, as the index of the last + # element of stats is length of stats - 1. Thus + # max(argmax(stats)) = len(stats) - 1 + # = len(ramp) - 2 + # = ramp.end - ramp.start - 1 + # So we have that the maximium value for the lower index of + # this sub-ramp is + # ramp.start + split + 1 = ramp.start + ramp.end + # - ramp.start - 1 + 1 + # = ramp.end + # This is always a valid ramp. + ramps.push(RampIndex(ramp.start + split + 1, ramp.end)) # Return to top of loop to fit new ramps (without adding to fits) continue From 8c5ef0d1c755da6d363815239a08fbe0482515a5 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 09:34:50 -0400 Subject: [PATCH 52/90] Use NaN for signalling degenerancy instead of 0 --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 38 +++++++++++++-------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index b6133d06..8b7e0ec4 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -110,12 +110,16 @@ cdef class Pixel: RampFit struct of slope, read_var, poisson_var """ cdef int n_resultants = ramp.end - ramp.start + 1 - cdef RampFit ramp_fit = RampFit(0, 0, 0) - # Special case where there is no or one resultant, there is no fit. + # Special case where there is no or one resultant, there is no fit and + # we bail out before any computations. + # Note that in this case, we cannot compute the slope or the variances + # because these computations require at least two resultants. Therefore, + # this case is degernate and we return NaNs for the values. if n_resultants <= 1: - return ramp_fit - # Else, do the fitting. + return RampFit(np.nan, np.nan, np.nan) + + # Start computing the fit # Cast vectors to memory views for faster access # This way of doing it is potentially memory unsafe because the memory @@ -137,14 +141,13 @@ cdef class Pixel: cdef float[:] t_bar = t_bar_[ramp.start:ramp.end + 1] cdef float[:] tau = tau_[ramp.start:ramp.end + 1] cdef int[:] n_reads = n_reads_[ramp.start:ramp.end + 1] + + # Reference read_noise as a local variable to avoid calling through Python + # every time it is accessed. cdef float read_noise = self.read_noise + # Compute mid point time cdef int end = len(resultants) - 1 - - # initalize fit - cdef int i = 0, j = 0 - cdef vector[float] weights = vector[float](n_resultants) - cdef vector[float] coeffs = vector[float](n_resultants) cdef float t_bar_mid = (t_bar[0] + t_bar[end]) / 2 # Casertano+2022 Eq. 44 @@ -161,7 +164,13 @@ cdef class Pixel: cdef float t_scale = (t_bar[end] - t_bar[0]) / 2 t_scale = 1 if t_scale == 0 else t_scale + # Initalize the fit loop + cdef int i = 0, j = 0 + cdef vector[float] weights = vector[float](n_resultants) + cdef vector[float] coeffs = vector[float](n_resultants) + cdef RampFit ramp_fit = RampFit(0, 0, 0) cdef float f0 = 0, f1 = 0, f2 = 0 + # Issue when tbar[] == tbarmid causes exception otherwise with cython.cpow(True): for i in range(n_resultants): @@ -375,12 +384,13 @@ cdef class Pixel: ramp_fits.index.push_back(ramp) # Start computing the averages - weight = 0 if ramp_fit.read_var == 0 else 1 / ramp_fit.read_var - total_weight += weight + if not np.isnan(ramp_fit.slope): + weight = 0 if ramp_fit.read_var == 0 else 1 / ramp_fit.read_var + total_weight += weight - ramp_fits.average.slope += weight * ramp_fit.slope - ramp_fits.average.read_var += weight**2 * ramp_fit.read_var - ramp_fits.average.poisson_var += weight**2 * ramp_fit.poisson_var + ramp_fits.average.slope += weight * ramp_fit.slope + ramp_fits.average.read_var += weight**2 * ramp_fit.read_var + ramp_fits.average.poisson_var += weight**2 * ramp_fit.poisson_var # Reverse to order in time ramp_fits.fits = ramp_fits.fits[::-1] From 7ecd0d5ed088f50c03a8738bff5404f2f39fe9b9 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 09:58:06 -0400 Subject: [PATCH 53/90] Update doc strings --- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 45 +++++-------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 71 ++++++++++++++------- 2 files changed, 64 insertions(+), 52 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 215ba592..686def56 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -36,37 +36,28 @@ cdef class Fixed: use_jump : bool flag to indicate whether to use jump detection (user input) - t_bar_1 : float[:] + t_bar_diff : float[:, :] single differences of t_bar: - (t_bar[i+1] - t_bar[i]) - t_bar_1_sq : float[:] - squared single differences of t_bar: - (t_bar[i+1] - t_bar[i])**2 - t_bar_2 : float[:] + t_bar_diff[0, :] = (t_bar[i+1] - t_bar[i]) double differences of t_bar: - (t_bar[i+2] - t_bar[i]) - t_bar_2_sq: float[:] - squared double differences of t_bar: - (t_bar[i+2] - t_bar[i])**2 - recip_1 : vector[float] + t_bar_diff[1, :] = (t_bar[i+2] - t_bar[i]) + recip : float[:, :] single sum of reciprocal n_reads: - ((1/n_reads[i+1]) + (1/n_reads[i])) - recip_2 : vector[float] + recip[0, :] = ((1/n_reads[i+1]) + (1/n_reads[i])) double sum of reciprocal n_reads: - ((1/n_reads[i+2]) + (1/n_reads[i])) - slope_var_1 : vector[float] + recip[1, :] = ((1/n_reads[i+2]) + (1/n_reads[i])) + slope_var : float[:, :] single of slope variance term: - ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) - slope_var_2 : vector[float] + slope_var[0, :] = ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) double of slope variance term: - ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) + slope_var[1, :] = ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) Notes ----- - - t_bar_*, t_bar_*_sq, recip_*, slope_var_* are only computed if use_jump is True. - These values represent reused computations for jump detection which are used by - every pixel for jump detection. They are computed once and stored in the Fixed - for reuse by all pixels. + - t_bar_diff, recip, slope_var are only computed if use_jump is True. These + values represent reused computations for jump detection which are used by + every pixel for jump detection. They are computed once and stored in the + Fixed for reuse by all pixels. - The computations are done using vectorized operations for some performance increases. However, this is marginal compaired with the performance increase from pre-computing the values and reusing them. @@ -78,9 +69,10 @@ cdef class Fixed: Returns ------- - [ + [ , , + ] """ # Cast vector to memory view # This way of doing it is potentially memory unsafe because the memory @@ -100,16 +92,13 @@ cdef class Fixed: cdef inline float[:, :] recip_val(Fixed self): """ - Compute the recip values - (1/n_reads[i+1] + 1/n_reads[i]) - and - (1/n_reads[i+2] + 1/n_reads[i]) + Compute the reciprical sum values Returns ------- [ <(1/n_reads[i+1] + 1/n_reads[i])>, - <(1/n_reads[i+2] + 1/n_reads[i])> + <(1/n_reads[i+2] + 1/n_reads[i])>, ] """ diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 8b7e0ec4..ce8a075c 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -41,25 +41,23 @@ cdef class Pixel: resultants : float [:] array of resultants for single pixel (data input) - delta_1 : float [:] + delta : float [:, :] single difference delta+slope: - (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) - delta_2 : float [:] + delta[0, :] = (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) double difference delta+slope: - (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) - sigma_1 : float [:] + delta[1, :] = (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) + sigma : float [:, :] single difference "sigma": - read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) - sigma_2 : float [:] + sigma[0, :] = read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) double difference "sigma": - read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) + sigma[1, :] = read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) Notes ----- - - delta_*, sigma_* are only computed if use_jump is True. These values - represent reused computations for jump detection which are used by every - ramp for the given pixel for jump detection. They are computed once and - stored for reuse by all ramp computations for the pixel. + - delta, sigma are only computed if use_jump is True. These values represent + reused computations for jump detection which are used by every ramp for + the given pixel for jump detection. They are computed once and stored for + reuse by all ramp computations for the pixel. - The computations are done using vectorized operations for some performance increases. However, this is marginal compaired with the performance increase from pre-computing the values and reusing them. @@ -79,7 +77,10 @@ cdef class Pixel: Returns ------- - (resultants[i+offset] - resultants[i]) + [ + <(resultants[i+1] - resultants[i])>, + <(resultants[i+2] - resultants[i])>, + ] """ cdef float[:] resultants = self.resultants cdef int end = len(resultants) @@ -221,7 +222,33 @@ cdef class Pixel: @cython.wraparound(False) @cython.cdivision(True) cdef inline float stat(Pixel self, float slope, RampIndex ramp, int index, int diff): + """ + Compute a single set of fit statistics + delta / sqrt(var) + where + delta = ((R[j] - R[i]) / (t_bar[j] - t_bar[i]) - slope) + * (t_bar[j] - t_bar[i]) + var = sigma * (1/N[j] + 1/N[i]) + + slope * (tau[j] + tau[i] - min(t_bar[j], t_bar[i])) + * correction(offset) + + Parameters + ---------- + slope : float + The computed slope for the ramp + ramp : RampIndex + Struct for start and end indices resultants for the ramp + index : int + The main index for the resultant to compute the statistic for + diff : int + The offset to use for the delta and sigma values + 0 : single difference + 1 : double difference + Returns + ------- + Create a single instance of the stastic for the given parameters + """ cdef float delta = ((self.delta[diff, index] - slope) * fabs(self.fixed.t_bar_diff[diff, index])) cdef float var = (self.sigma[diff, index] + @@ -237,26 +264,19 @@ cdef class Pixel: cdef inline float[:] stats(Pixel self, float slope, RampIndex ramp): """ Compute fit statistics for jump detection on a single ramp - Computed using: - corr_1[i] = correction(i, 1, ramp) - corr_2[i] = correction(i, 2, ramp) - - var_1[i] = ((sigma_1[i] + slope * slope_var_1[i] * corr_1[i]) / t_bar_1_sq[i]) - var_2[i] = ((sigma_2[i] + slope * slope_var_2[i] * corr_2[i]) / t_bar_2_sq[i]) - - s_1[i] = (delta_1[i] - slope) / sqrt(var_1[i]) - s_2[i] = (delta_2[i] - slope) / sqrt(var_2[i]) + stats[i] = max(stat(i, 0), stat(i, 1)) + Note for i == end - 1, no stat(i, 1) exists, so its just stat(i, 0) - stats[i] = max(s_1[i], s_2[i]) Parameters ---------- + slope : float + The computed slope for the ramp ramp : RampIndex Struct for start and end of ramp to fit Returns ------- list of statistics for each resultant - except for the last 2 due to single/double difference due to indexing """ cdef int start = ramp.start # index of first resultant for ramp cdef int end = ramp.end # index of last resultant for ramp @@ -275,6 +295,9 @@ cdef class Pixel: cdef int index, stat for stat, index in enumerate(range(start, end)): if index == end - 1: + # It is not possible to compute double differences for the second + # to last resultant in the ramp. Therefore, we just compute the + # single difference for this resultant. stats[stat] = self.stat(slope, ramp, index, 0) else: stats[stat] = max(self.stat(slope, ramp, index, 0), From a795cbd7e8d42cebd2101b45bc18417b0c5d44fc Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 13:08:54 -0400 Subject: [PATCH 54/90] Use enum to enumerate single vs double diff dimensions --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 6 ++++++ src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 24 ++++++++++----------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 15 +++++++------ 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 92bb3a22..49cb3c06 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -36,6 +36,12 @@ cdef struct Thresh: float intercept float constant + +cdef enum Diff: + single = 0 + double = 1 + + cdef float threshold(Thresh thresh, float slope) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 686def56..3ea8cd47 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -16,7 +16,7 @@ make_fixed : function import numpy as np cimport numpy as np -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData, Diff from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Fixed: @@ -84,9 +84,9 @@ cdef class Fixed: cdef np.ndarray[float, ndim=2] t_bar_diff = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - t_bar_diff[0, :] = np.subtract(t_bar[1:], t_bar[:-1]) - t_bar_diff[1, :-1] = np.subtract(t_bar[2:], t_bar[:-2]) - t_bar_diff[1, -1] = np.nan # last double difference is undefined + t_bar_diff[Diff.single, :] = np.subtract(t_bar[1:], t_bar[:-1]) + t_bar_diff[Diff.double, :-1] = np.subtract(t_bar[2:], t_bar[:-2]) + t_bar_diff[Diff.double, -1] = np.nan # last double difference is undefined return t_bar_diff @@ -112,11 +112,11 @@ cdef class Fixed: cdef np.ndarray[float, ndim=2] recip = np.zeros((2, self.data.n_reads.size() - 1), dtype=np.float32) - recip[0, :] = (np.divide(1.0, n_reads[1:], dtype=np.float32) + - np.divide(1.0, n_reads[:-1], dtype=np.float32)) - recip[1, :-1] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + - np.divide(1.0, n_reads[:-2], dtype=np.float32)) - recip[1, -1] = np.nan # last double difference is undefined + recip[Diff.single, :] = (np.divide(1.0, n_reads[1:], dtype=np.float32) + + np.divide(1.0, n_reads[:-1], dtype=np.float32)) + recip[Diff.double, :-1] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + + np.divide(1.0, n_reads[:-2], dtype=np.float32)) + recip[Diff.double, -1] = np.nan # last double difference is undefined return recip @@ -143,9 +143,9 @@ cdef class Fixed: cdef np.ndarray[float, ndim=2] slope_var = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - slope_var[0, :] = (np.add(tau[1:], tau[:-1]) - np.minimum(t_bar[1:], t_bar[:-1])) - slope_var[1, :-1] = (np.add(tau[2:], tau[:-2]) - np.minimum(t_bar[2:], t_bar[:-2])) - slope_var[1, -1] = np.nan # last double difference is undefined + slope_var[Diff.single, :] = (np.add(tau[1:], tau[:-1]) - np.minimum(t_bar[1:], t_bar[:-1])) + slope_var[Diff.double, :-1] = (np.add(tau[2:], tau[:-2]) - np.minimum(t_bar[2:], t_bar[:-2])) + slope_var[Diff.double, -1] = np.nan # last double difference is undefined return slope_var diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index ce8a075c..4837ae05 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -21,7 +21,7 @@ cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport get_power, threshold, RampFit, RampFits, RampIndex +from stcal.ramp_fitting.ols_cas22._core cimport get_power, threshold, RampFit, RampFits, RampIndex, Diff from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel @@ -88,9 +88,9 @@ cdef class Pixel: cdef np.ndarray[float, ndim=2] t_bar_diff = np.array(self.fixed.t_bar_diff, dtype=np.float32) cdef np.ndarray[float, ndim=2] delta = np.zeros((2, end - 1), dtype=np.float32) - delta[0, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diff[0, :]).astype(np.float32) - delta[1, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diff[1, :end-2]).astype(np.float32) - delta[1, end-2] = np.nan # last double difference is undefined + delta[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diff[0, :]).astype(np.float32) + delta[Diff.double, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diff[1, :end-2]).astype(np.float32) + delta[Diff.double, end-2] = np.nan # last double difference is undefined return delta @@ -298,10 +298,10 @@ cdef class Pixel: # It is not possible to compute double differences for the second # to last resultant in the ramp. Therefore, we just compute the # single difference for this resultant. - stats[stat] = self.stat(slope, ramp, index, 0) + stats[stat] = self.stat(slope, ramp, index, Diff.single) else: - stats[stat] = max(self.stat(slope, ramp, index, 0), - self.stat(slope, ramp, index, 1)) + stats[stat] = max(self.stat(slope, ramp, index, Diff.double), + self.stat(slope, ramp, index, Diff.double)) return stats @@ -407,6 +407,7 @@ cdef class Pixel: ramp_fits.index.push_back(ramp) # Start computing the averages + # Note we do not do anything in the NaN case for degenerate ramps if not np.isnan(ramp_fit.slope): weight = 0 if ramp_fit.read_var == 0 else 1 / ramp_fit.read_var total_weight += weight From 68f99400c52fb7694f2af74a6048374e1e5075cd Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 13:22:43 -0400 Subject: [PATCH 55/90] Clean up documentation further --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 8 +---- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 35 +++++++++++-------- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 16 +-------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 24 ++++++++++--- 4 files changed, 41 insertions(+), 42 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 49cb3c06..5beb2a87 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -14,16 +14,10 @@ cdef struct RampFit: float poisson_var -cdef struct AverageRampFit: - float slope - float read_var - float poisson_var - - cdef struct RampFits: vector[RampFit] fits vector[RampIndex] index - AverageRampFit average + RampFit average cdef struct DerivedData: diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 652da0e4..a56cc357 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -1,7 +1,7 @@ """ Define the basic types and functions for the CAS22 algorithm with jump detection -Structs: +Structs ------- RampIndex int start: starting index of the ramp in the resultants @@ -11,31 +11,36 @@ Structs: float read_var: read noise variance of a single ramp float poisson_var: poisson noise variance of single ramp RampFits - cpp_list[float] slope: slopes of the ramps for a single pixel - cpp_list[float] read_var: read noise variances of the ramps for a single - pixel - cpp_list[float] poisson_var: poisson noise variances of the ramps for a - single pixel + vector[RampFit] fits: ramp fits (in time order) for a single pixel + vector[RampIndex] index: ramp indices (in time order) for a single pixel + RampFit average: average ramp fit for a single pixel DerivedData vector[float] t_bar: mean time of each resultant vector[float] tau: variance time of each resultant vector[int] n_reads: number of reads in each resultant + Thresh + float intercept: intercept of the threshold + float constant: constant of the threshold -Objects -------- - Thresh : class - Hold the threshold parameters and compute the threshold +Enums +----- + Diff + This is the enum to track the index for single vs double difference related + computations. + + single: single difference + double: double difference -Functions: ----------- +Functions +--------- get_power Return the power from Casertano+22, Table 2 threshold Compute jump threshold init_ramps - Find initial ramps for each pixel - read_ma_table - Read the MA table and Derive the necessary data from it + Find initial ramps for each pixel, accounts for DQ flags + read_data + Read the read pattern and derive the baseline data parameters needed """ from libcpp.stack cimport stack from libcpp.deque cimport deque diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 5a474c3f..7d8dea47 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -46,21 +46,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, Returns ------- - dictionary containing the following keywords: - slope : np.ndarray[nramp] - slopes fit for each ramp - slopereadvar : np.ndarray[nramp] - variance in slope due to read noise - slopepoissonvar : np.ndarray[nramp] - variance in slope due to Poisson noise, divided by the slope - i.e., the slope poisson variance is coefficient * flux; this term - is the coefficient. - pix : np.ndarray[nramp] - the pixel each ramp is in - resstart : np.ndarray[nramp] - The first resultant in this ramp - resend : np.ndarray[nramp] - The last resultant in this ramp. + A list of RampFits objects, one for each pixel. """ cdef int n_pixels, n_resultants n_resultants = resultants.shape[0] diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 4837ae05..93878acb 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -64,9 +64,11 @@ cdef class Pixel: Methods ------- - fits (ramp_stack) : method + fit_ramp (ramp_index) : method + Compute the ramp fit for a single ramp defined by an inputed RampIndex + fit_ramps (ramp_stack) : method Compute all the ramps for a single pixel using the Casertano+22 algorithm - with jump detection. + with jump detection. """ @cython.boundscheck(False) @@ -208,6 +210,19 @@ cdef class Pixel: return ramp_fit cdef inline float correction(Pixel self, RampIndex ramp, int index, int diff): + """ + Compute the correction factor for the variance used by a statistic + + Parameters + ---------- + ramp : RampIndex + Struct for start and end indices resultants for the ramp + index : int + The main index for the resultant to compute the statistic for + diff : int + The offset to use for the delta and sigma values, this should be + a value from the Diff enum. + """ cdef float comp = (self.fixed.t_bar_diff[diff, index] / (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start])) @@ -241,9 +256,8 @@ cdef class Pixel: index : int The main index for the resultant to compute the statistic for diff : int - The offset to use for the delta and sigma values - 0 : single difference - 1 : double difference + The offset to use for the delta and sigma values, this should be + a value from the Diff enum. Returns ------- From ceef0e5d6e0f2c878feeb4585de244a0ae51ae16 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 14:51:31 -0400 Subject: [PATCH 56/90] Record jumps --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 1 + src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 43 +++++++++-------- tests/test_jump_cas22.py | 51 +++++++++++++++------ 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 5beb2a87..731f7c58 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -17,6 +17,7 @@ cdef struct RampFit: cdef struct RampFits: vector[RampFit] fits vector[RampIndex] index + vector[int] jumps RampFit average diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 93878acb..cf5e8f24 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -364,34 +364,37 @@ cdef class Pixel: if self.fixed.use_jump: stats = self.stats(ramp_fit.slope, ramp) - # We have to protect against the case where the passed "ramp" is only - # a single point. In that case, stats will be empty. This will create - # an error in the max() call. + # We have to protect against the case where the passed "ramp" is + # only a single point. In that case, stats will be empty. This + # will create an error in the max() call. if len(stats) > 0 and max(stats) > threshold(self.fixed.threshold, ramp_fit.slope): # Compute split point to create two new ramps - # The split will map to the index of the resultant with the detected jump + # The split will map to the index of the resultant with the + # detected jump: # resultant_jump_index = ramp.start + split - # This resultant index needs to be removed, therefore the two possible new - # ramps are: - # RampIndex(ramp.start, ramp.start + split - 1) - # RampIndex(ramp.start + split + 1, ramp.end) - # This is because the RampIndex contains the index of the first and last - # resulants in the sub-ramp it describes. split = np.argmax(stats) - - # The algorithm works via working over the sub-ramps backward - # in time. Therefore, since we are using a stack, we need to - # add the ramps in the time order they were observed in. This - # results in the last observation ramp being the top of the - # stack; meaning that, it will be the next ramp handeled. + ramp_fits.jumps.push_back(ramp.start + split) + + # This resultant index needs to be removed, therefore the two + # possible new ramps are: + # RampIndex(ramp.start, ramp.start + split - 1) + # RampIndex(ramp.start + split + 1, ramp.end) + # This is because the RampIndex contains the index of the + # first and last resulants in the sub-ramp it describes. + # Note: The algorithm works via working over the sub-ramps + # backward in time. Therefore, since we are using a stack, + # we need to add the ramps in the time order they were + # observed in. This results in the last observation ramp + # being the top of the stack; meaning that, + # it will be the next ramp handeled. if split > 0: # When split == 0, the jump has been detected in the resultant # corresponding to the first resultant in the ramp, i.e # ramp.start - # So the "split" is just excluding the first resultant in the - # ramp currently being considered. Therefore, there is no need - # to handle a ramp in this case. + # So the "split" is just excluding the first resultant in + # the ramp currently being considered. Therefore, there + # is no need to handle a ramp in this case. ramps.push(RampIndex(ramp.start, ramp.start + split - 1)) # Note that because the stats can only be calculated for ramp @@ -410,7 +413,7 @@ cdef class Pixel: # This is always a valid ramp. ramps.push(RampIndex(ramp.start + split + 1, ramp.end)) - # Return to top of loop to fit new ramps (without adding to fits) + # Return to top of loop to fit new ramps without recording continue # Add ramp_fit to ramp_fits if no jump detection or stats are less diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index f6518981..582e9cd4 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -14,6 +14,7 @@ N_PIXELS = 100_000 FLUX = 100 JUMP_VALUE = 10_000 +CHI2_TOL = 0.03 @pytest.fixture(scope="module") @@ -92,14 +93,13 @@ def test_threshold(): intercept = np.float32(5.5) constant = np.float32(1/3) - # Parameters are not directly accessible assert intercept == run_threshold(intercept, constant, 1.0) # check intercept assert np.float32(intercept - constant) == run_threshold(intercept, constant, 10.0) # check constant @pytest.fixture(scope="module") def ramp_data(base_ramp_data): - """Upacked data for simulating ramps for testing""" + """Unpacked data for simulating ramps for testing""" t_bar = np.array(base_ramp_data[1]['t_bar'], dtype=np.float32) tau = np.array(base_ramp_data[1]['tau'], dtype=np.float32) n_reads = np.array(base_ramp_data[1]['n_reads'], dtype=np.int32) @@ -125,6 +125,8 @@ def test_make_fixed(ramp_data, use_jump): assert fixed["constant"] == constant # Check the computed data + # These are computed via vectorized operations in the main code, here we + # check using item-by-item operations if use_jump: single_gen = zip(fixed['t_bar_diff'][0], fixed['recip'][0], fixed['slope_var'][0]) double_gen = zip(fixed['t_bar_diff'][1], fixed['recip'][1], fixed['slope_var'][1]) @@ -145,6 +147,8 @@ def test_make_fixed(ramp_data, use_jump): assert recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) assert slope_var_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) else: + # If not using jumps, these values should not even exist. However, for wrapping + # purposes, they are checked to be non-existent and then set to NaN assert np.isnan(fixed['t_bar_diff']).all() assert np.isnan(fixed['recip']).all() assert np.isnan(fixed['slope_var']).all() @@ -174,7 +178,6 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1, add_jumps=F # Add a large value to the ramp ramp_value += (JUMP_VALUE * jump_points).astype(np.float32) - # Add to running total for the resultant resultant_total += ramp_value @@ -182,16 +185,17 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1, add_jumps=F resultants[index] = (resultant_total / len(reads)).astype(np.float32) # Record all the jumps for this resultant - jumps.append(read_jumps) + jumps.append(np.any(read_jumps, axis=0)) if n_pixels == 1: resultants = resultants[:, 0] - return resultants, jumps + return resultants, np.array(jumps) @pytest.fixture(scope="module") def pixel_data(ramp_data): + """Create data for a single pixel""" read_noise = np.float32(5) read_pattern, t_bar, tau, n_reads = ramp_data @@ -202,7 +206,7 @@ def pixel_data(ramp_data): @pytest.mark.parametrize("use_jump", [True, False]) def test_make_pixel(pixel_data, use_jump): - """Test computing the pixel data""" + """Test computing the initial pixel data""" resultants, t_bar, tau, n_reads, read_noise, _ = pixel_data intercept = np.float32(5.5) @@ -210,9 +214,13 @@ def test_make_pixel(pixel_data, use_jump): pixel = make_pixel(resultants, t_bar, tau, n_reads, read_noise, intercept, constant, use_jump) + # Basic sanity checks that data passed in survives assert (pixel['resultants'] == resultants).all() assert read_noise == pixel['read_noise'] + # Check the computed data + # These are computed via vectorized operations in the main code, here we + # check using item-by-item operations if use_jump: single_gen = zip(pixel['delta'][0], pixel['sigma'][0]) double_gen = zip(pixel['delta'][1], pixel['sigma'][1]) @@ -234,12 +242,18 @@ def test_make_pixel(pixel_data, use_jump): np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) ) else: + # If not using jumps, these values should not even exist. However, for wrapping + # purposes, they are checked to be non-existent and then set to NaN assert np.isnan(pixel['delta']).all() assert np.isnan(pixel['sigma']).all() @pytest.fixture(scope="module") def detector_data(ramp_data): + """ + Generate a set of with no jumps data as if for a single detector as it + would be passed in by the supporting code. + """ read_pattern, *_ = ramp_data read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 @@ -259,8 +273,9 @@ def test_fit_ramps_no_dq(detector_data, use_jump): dq = np.zeros(resultants.shape, dtype=np.int32) fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) - assert len(fits) == n_pixels + assert len(fits) == n_pixels # sanity check that a fit is output for each pixel + # Check that the chi2 for the resulting fit relative to the assumed flux is ~1 chi2 = 0 for fit in fits: assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump @@ -270,7 +285,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): chi2 /= n_pixels - assert np.abs(chi2 - 1) < 0.03 + assert np.abs(chi2 - 1) < CHI2_TOL @pytest.mark.parametrize("use_jump", [True, False]) @@ -289,6 +304,7 @@ def test_fit_ramps_dq(detector_data, use_jump): okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + assert len(fits) == n_pixels # sanity check that a fit is output for each pixel chi2 = 0 for fit, use in zip(fits, okay): @@ -303,22 +319,31 @@ def test_fit_ramps_dq(detector_data, use_jump): assert fit['average']['poisson_var'] == 0 chi2 /= np.sum(okay) - assert np.abs(chi2 - 1) < 0.03 + assert np.abs(chi2 - 1) < CHI2_TOL @pytest.fixture(scope="module") def jump_data(ramp_data): + """ + Generate a set of with jumps data as if for a single detector as it + would be passed in by the supporting code. + """ read_pattern, *_ = ramp_data read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 - resultants, _ = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS, add_jumps=True) + resultants, jumps = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS, add_jumps=True) - return resultants, read_noise, read_pattern, N_PIXELS, FLUX + return resultants, read_noise, read_pattern, N_PIXELS, FLUX, jumps def test_fit_ramps_with_jumps_no_dq(jump_data): - resultants, read_noise, read_pattern, n_pixels, flux = jump_data + resultants, read_noise, read_pattern, n_pixels, flux, jumps = jump_data + assert resultants.shape == jumps.shape # sanity check that we have a jump result for each resultant dq = np.zeros(resultants.shape, dtype=np.int32) fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - assert len(fits) == n_pixels + assert len(fits) == n_pixels # sanity check that a fit is output for each pixel + + for fit, jump in zip(fits, np.transpose(jumps)): + for jump_index in fit['jumps']: + assert jump[jump_index], f"{jump=} {fit['jumps']=}" # check the identified jump is recorded as a jump From c35c06eca795cf20732dbe30007b07579a99488e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 15:15:15 -0400 Subject: [PATCH 57/90] Use "jump" instead of "split" --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 55 ++++++++++++--------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index cf5e8f24..2c3316ec 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -348,7 +348,7 @@ cdef class Pixel: ramp_fits.average.poisson_var = 0 cdef float [:] stats - cdef int split + cdef int jump cdef float weight, total_weight = 0 # Run while the stack is non-empty @@ -368,17 +368,20 @@ cdef class Pixel: # only a single point. In that case, stats will be empty. This # will create an error in the max() call. if len(stats) > 0 and max(stats) > threshold(self.fixed.threshold, ramp_fit.slope): - # Compute split point to create two new ramps - # The split will map to the index of the resultant with the - # detected jump: - # resultant_jump_index = ramp.start + split - split = np.argmax(stats) - ramp_fits.jumps.push_back(ramp.start + split) + # Compute jump point to create two new ramps + # This jump point corresponds to the index of the largest + # statistic: + # argmax(stats) + # These statistics are indexed relative to the + # ramp's range. Therefore, we need to add the start index + # of the ramp to the result. + jump = np.argmax(stats) + ramp.start + ramp_fits.jumps.push_back(jump) # This resultant index needs to be removed, therefore the two # possible new ramps are: - # RampIndex(ramp.start, ramp.start + split - 1) - # RampIndex(ramp.start + split + 1, ramp.end) + # RampIndex(ramp.start, jump - 1) + # RampIndex(jump + 1, ramp.end) # This is because the RampIndex contains the index of the # first and last resulants in the sub-ramp it describes. # Note: The algorithm works via working over the sub-ramps @@ -388,30 +391,36 @@ cdef class Pixel: # being the top of the stack; meaning that, # it will be the next ramp handeled. - if split > 0: - # When split == 0, the jump has been detected in the resultant - # corresponding to the first resultant in the ramp, i.e - # ramp.start - # So the "split" is just excluding the first resultant in + if jump > ramp.start: + # When jump == ramp.start, the jump has been detected in + # the resultant in the first resultant of the ramp. So + # the "split" is just excluding the first resultant in # the ramp currently being considered. Therefore, there # is no need to handle a ramp in this case. - ramps.push(RampIndex(ramp.start, ramp.start + split - 1)) + # Note that by construction jump >= ramp.start. So + # something has seriously gone wrong if jump < ramp.start + ramps.push(RampIndex(ramp.start, jump - 1)) # Note that because the stats can only be calculated for ramp - # length - 1 # positions due to the need to compute at least - # single differences. # Therefore the maximum value for + # length - 1 positions due to the need to compute at least + # single differences. Therefore the maximum value for # argmax(stats) is ramp length - 2, as the index of the last # element of stats is length of stats - 1. Thus # max(argmax(stats)) = len(stats) - 1 # = len(ramp) - 2 # = ramp.end - ramp.start - 1 + # Thus we have + # max(jump) = ramp.start + max(argmax(stats)) + # = ramp.start + ramp.end - ramp.start - 1 + # = ramp.end - 1 # So we have that the maximium value for the lower index of # this sub-ramp is - # ramp.start + split + 1 = ramp.start + ramp.end - # - ramp.start - 1 + 1 - # = ramp.end - # This is always a valid ramp. - ramps.push(RampIndex(ramp.start + split + 1, ramp.end)) + # max(jump) + 1 = ramp.end - 1 + 1 + # = ramp.end + # (ramp.end, ramp.end) is technically a valid ramp, which + # will immediately get thrown out in the next iteration of + # because stats will be empty. + ramps.push(RampIndex(jump + 1, ramp.end)) # Return to top of loop to fit new ramps without recording continue @@ -426,6 +435,8 @@ cdef class Pixel: # Start computing the averages # Note we do not do anything in the NaN case for degenerate ramps if not np.isnan(ramp_fit.slope): + # protect weight against the extremely unlikely case of a zero + # variance weight = 0 if ramp_fit.read_var == 0 else 1 / ramp_fit.read_var total_weight += weight From c6fed094660269f561149242414dfd5c29bd52b7 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 27 Sep 2023 15:33:46 -0400 Subject: [PATCH 58/90] Start to adapt code to use the new ramp_fit --- src/stcal/ramp_fitting/ols_cas22_fit.py | 84 ++++++++++--------------- 1 file changed, 33 insertions(+), 51 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index daadea6f..2db44a81 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -36,7 +36,7 @@ from .ols_cas22_util import ma_table_to_tau, ma_table_to_tbar, readpattern_to_matable -def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, read_pattern=None): +def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, read_pattern=None, use_jump=False): """Fit ramps following Casertano+2022, including averaging partial ramps. Ramps are broken where dq != 0, and fits are performed on each sub-ramp. @@ -60,6 +60,9 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re read_pattern : list[list[int]] or None The read pattern prescription. If None, use `ma_table`. One of `ma_table` or `read_pattern` must be defined. + use_jump : bool + If True, use the jump detection algorithm to identify CRs. + If False, use the DQ array to identify CRs. Returns ------- @@ -71,80 +74,59 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re """ # Get the Multi-accum table, either as given or from the read pattern - if ma_table is None: - if read_pattern is not None: - ma_table = readpattern_to_matable(read_pattern) - if ma_table is None: + if read_pattern is None: + if ma_table is not None: + read_pattern = ma_table_to_readpattern(ma_table) + if read_pattern is None: raise RuntimeError('One of `ma_table` or `read_pattern` must be given.') resultants_unit = getattr(resultants, 'unit', None) if resultants_unit is not None: resultants = resultants.to(u.electron).value - resultants = np.array(resultants).astype('f4') + resultants = np.array(resultants).astype(np.float32) - dq = np.array(dq).astype('i4') + dq = np.array(dq).astype(np.float32) if np.ndim(read_noise) <= 1: read_noise = read_noise * np.ones(resultants.shape[1:]) - read_noise = np.array(read_noise).astype('f4') + read_noise = np.array(read_noise).astype(np.float32) - origshape = resultants.shape + orig_shape = resultants.shape if len(resultants.shape) == 1: # single ramp. resultants = resultants.reshape(origshape + (1,)) dq = dq.reshape(origshape + (1,)) read_noise = read_noise.reshape(origshape[1:] + (1,)) - rampfitdict = ols_cas22.fit_ramps( + ramp_fits = ols_cas22.fit_ramps( resultants.reshape(resultants.shape[0], -1), dq.reshape(resultants.shape[0], -1), read_noise.reshape(-1), read_time, - ma_table) - - par = np.zeros(resultants.shape[1:] + (2,), dtype='f4') - var = np.zeros(resultants.shape[1:] + (3,), dtype='f4') - - npix = resultants.reshape(resultants.shape[0], -1).shape[1] - # we need to do some averaging to merge the results in each ramp. - # inverse variance weights based on slopereadvar - weight = ((rampfitdict['slopereadvar'] != 0) / ( - rampfitdict['slopereadvar'] + (rampfitdict['slopereadvar'] == 0))) - totweight = np.bincount(rampfitdict['pix'], weights=weight, minlength=npix) - totval = np.bincount(rampfitdict['pix'], - weights=weight * rampfitdict['slope'], - minlength=npix) - # fill in the averaged slopes - par.reshape(npix, 2)[:, 1] = ( - totval / (totweight + (totweight == 0))) - - # read noise variances - totval = np.bincount( - rampfitdict['pix'], weights=weight ** 2 * rampfitdict['slopereadvar'], - minlength=npix) - var.reshape(npix, 3,)[:, 0] = ( - totval / (totweight ** 2 + (totweight == 0))) - - # poisson noise variances - totval = np.bincount( - rampfitdict['pix'], - weights=weight ** 2 * rampfitdict['slopepoissonvar'], minlength=npix) - var.reshape(npix, 3)[..., 1] = ( - totval / (totweight ** 2 + (totweight == 0))) - - # multiply Poisson term by flux. Clip at zero; no negative Poisson variances. - var[..., 1] *= np.clip(par[..., 1], 0, np.inf) - var[..., 2] = var[..., 0] + var[..., 1] - - if resultants.shape != origshape: - par = par[0] - var = var[0] + read_pattern, + use_jump) + + parameters = np.zeros((len(ramp_fits), 2), dtype=np.float32) + variances = np.zeros((len(ramp_fits), 3), dtype=np.float32) + + # Extract the data request from the ramp fits + for index, ramp_fit in enumerate(ramp_fits): + parameters[1, :] = ramp_fit['average']['slope'] + + variances[0, :] = ramp_fit['average']['read_var'] + variances[1, :] = ramp_fit['average']['poisson_var'] + + variances[2, :] = variances[0, :] + variances[1, :] + + if resultants.shape != orig_shape: + parameters = parameters[0] + variances = variances[0] if resultants_unit is not None: - par = par * resultants_unit + parameters = parameters * resultants_unit - return par, var + return parameters, variances def fit_ramps_casertano_no_dq(resultants, read_noise, ma_table): From dc071e437f66f36f54481c56b4cfc10659533fd1 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Tue, 3 Oct 2023 14:36:55 -0400 Subject: [PATCH 59/90] Bugfix for off by one indexing for jump and add proper jump test --- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 90 ++++++++------- tests/test_jump_cas22.py | 122 +++++++++++++++----- 2 files changed, 142 insertions(+), 70 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 2c3316ec..7e348871 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -314,7 +314,7 @@ cdef class Pixel: # single difference for this resultant. stats[stat] = self.stat(slope, ramp, index, Diff.single) else: - stats[stat] = max(self.stat(slope, ramp, index, Diff.double), + stats[stat] = max(self.stat(slope, ramp, index, Diff.single), self.stat(slope, ramp, index, Diff.double)) return stats @@ -348,7 +348,7 @@ cdef class Pixel: ramp_fits.average.poisson_var = 0 cdef float [:] stats - cdef int jump + cdef int jump0, jump1 cdef float weight, total_weight = 0 # Run while the stack is non-empty @@ -375,13 +375,30 @@ cdef class Pixel: # These statistics are indexed relative to the # ramp's range. Therefore, we need to add the start index # of the ramp to the result. - jump = np.argmax(stats) + ramp.start - ramp_fits.jumps.push_back(jump) - - # This resultant index needs to be removed, therefore the two + # + # Note that because the resultants are averages of reads, but + # jumps occur in individual reads, it is possible that the + # jump is averaged down by the resultant with the actual jump + # causing the computed jump to be off by one index. + # In the idealized case this is when the jump occurs near + # the start of the resultant with the jump. In this case, + # the statistic for the resultant will be maximized at + # index - 1 rather than index. This means that we have to + # remove argmax(stats) + 1 as it is also a possible jump. + # This case is difficult to distinguish from the case where + # argmax(stats) does correspond to the jump resultant. + # Therefore, we just remove both possible resultants from + # consideration. + jump0 = np.argmax(stats) + ramp.start + jump1 = jump0 + 1 + ramp_fits.jumps.push_back(jump0) + ramp_fits.jumps.push_back(jump1) + + # The two resultant indicies need to be skipped, therefore + # the two # possible new ramps are: - # RampIndex(ramp.start, jump - 1) - # RampIndex(jump + 1, ramp.end) + # RampIndex(ramp.start, jump0 - 1) + # RampIndex(jump1 + 1, ramp.end) # This is because the RampIndex contains the index of the # first and last resulants in the sub-ramp it describes. # Note: The algorithm works via working over the sub-ramps @@ -391,38 +408,31 @@ cdef class Pixel: # being the top of the stack; meaning that, # it will be the next ramp handeled. - if jump > ramp.start: - # When jump == ramp.start, the jump has been detected in - # the resultant in the first resultant of the ramp. So - # the "split" is just excluding the first resultant in - # the ramp currently being considered. Therefore, there - # is no need to handle a ramp in this case. - # Note that by construction jump >= ramp.start. So - # something has seriously gone wrong if jump < ramp.start - ramps.push(RampIndex(ramp.start, jump - 1)) - - # Note that because the stats can only be calculated for ramp - # length - 1 positions due to the need to compute at least - # single differences. Therefore the maximum value for - # argmax(stats) is ramp length - 2, as the index of the last - # element of stats is length of stats - 1. Thus - # max(argmax(stats)) = len(stats) - 1 - # = len(ramp) - 2 - # = ramp.end - ramp.start - 1 - # Thus we have - # max(jump) = ramp.start + max(argmax(stats)) - # = ramp.start + ramp.end - ramp.start - 1 - # = ramp.end - 1 - # So we have that the maximium value for the lower index of - # this sub-ramp is - # max(jump) + 1 = ramp.end - 1 + 1 - # = ramp.end - # (ramp.end, ramp.end) is technically a valid ramp, which - # will immediately get thrown out in the next iteration of - # because stats will be empty. - ramps.push(RampIndex(jump + 1, ramp.end)) - - # Return to top of loop to fit new ramps without recording + if jump0 > ramp.start: + # Note that when jump0 == ramp.start, we have detected a + # jump in the first resultant of the ramp. This means + # there is no sub-ramp before jump0. + # Also, note that this will produce bad results as + # the ramp indexing will go out of bounds. So it is + # important that we exclude it. + # Note that jump0 < ramp.start is not possible because + # the argmax is always >= 0 + ramps.push(RampIndex(ramp.start, jump0 - 1)) + + if jump1 < ramp.end: + # Note that if jump1 == ramp.end, we have detected a + # jump in the last resultant of the ramp. This means + # there is no sub-ramp after jump1. + # Also, note that this will produce bad results as + # the ramp indexing will go out of bounds. So it is + # important that we exclude it. + # Note that jump1 > ramp.end is technically possible + # however in those potential cases it will draw on + # resultants which are not considered part of the ramp + # under consideration. Therefore, we have to exlude all + # of those values. + ramps.push(RampIndex(jump1 + 1, ramp.end)) + continue # Add ramp_fit to ramp_fits if no jump detection or stats are less diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 582e9cd4..82513725 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -154,29 +154,20 @@ def test_make_fixed(ramp_data, use_jump): assert np.isnan(fixed['slope_var']).all() -def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1, add_jumps=False): +def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): """Generate a set of resultants for a pixel""" resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) - jumps = [] # Use Poisson process to simulate the accumulation of the ramp ramp_value = np.zeros(n_pixels, dtype=np.float32) # Last value of ramp for index, reads in enumerate(read_pattern): resultant_total = np.zeros(n_pixels, dtype=np.float32) # Total of all reads in this resultant - read_jumps = [] for _ in reads: # Compute the next value of the ramp # - Poisson process for the flux # - Gaussian process for the read noise ramp_value += RNG.poisson(flux * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) ramp_value += RNG.standard_normal(size=n_pixels, dtype=np.float32) * read_noise / np.sqrt(len(reads)) - if add_jumps: - # Add jumps only to ~1% of the pixels for any given read - jump_points = RNG.standard_normal(size=n_pixels, dtype=np.float32) > 0.99 - read_jumps.append(jump_points) - - # Add a large value to the ramp - ramp_value += (JUMP_VALUE * jump_points).astype(np.float32) # Add to running total for the resultant resultant_total += ramp_value @@ -184,13 +175,10 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1, add_jumps=F # Record the average value for resultant (i.e., the average of the reads) resultants[index] = (resultant_total / len(reads)).astype(np.float32) - # Record all the jumps for this resultant - jumps.append(np.any(read_jumps, axis=0)) - if n_pixels == 1: resultants = resultants[:, 0] - return resultants, np.array(jumps) + return resultants @pytest.fixture(scope="module") @@ -199,7 +187,7 @@ def pixel_data(ramp_data): read_noise = np.float32(5) read_pattern, t_bar, tau, n_reads = ramp_data - resultants, _ = _generate_resultants(read_pattern, FLUX, read_noise) + resultants = _generate_resultants(read_pattern, FLUX, read_noise) yield resultants, t_bar, tau, n_reads, read_noise, FLUX @@ -257,7 +245,7 @@ def detector_data(ramp_data): read_pattern, *_ = ramp_data read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 - resultants, _ = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS) + resultants = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS) return resultants, read_noise, read_pattern, N_PIXELS, FLUX @@ -323,27 +311,101 @@ def test_fit_ramps_dq(detector_data, use_jump): @pytest.fixture(scope="module") -def jump_data(ramp_data): +def jump_data(): """ - Generate a set of with jumps data as if for a single detector as it - would be passed in by the supporting code. + Generate a set of data were jumps are simulated in each possible read. + - jumps should occur in read of same index as the pixel index. """ - read_pattern, *_ = ramp_data - read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 - resultants, jumps = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS, add_jumps=True) + # Generate a read pattern with 8 reads per resultant + shape = (8, 8) + read_pattern = np.arange(np.prod(shape)).reshape(shape).tolist() + + resultants = np.zeros((len(read_pattern), np.prod(shape)), dtype=np.float32) + jumps = np.zeros((len(read_pattern), np.prod(shape)), dtype=bool) + jump_res = -1 + for jump_index in range(np.prod(shape)): + read_values = np.zeros(np.prod(shape), dtype=np.float32) + for index in range(np.prod(shape)): + if index >= jump_index: + read_values[index] = JUMP_VALUE - return resultants, read_noise, read_pattern, N_PIXELS, FLUX, jumps + if jump_index % shape[1] == 0: + # Start indicating a new resultant + jump_res += 1 + jumps[jump_res, jump_index] = True + + resultants[:, jump_index] = np.mean(read_values.reshape(shape), axis=1).astype(np.float32) + n_pixels = np.prod(shape) + read_noise = np.ones(n_pixels, dtype=np.float32) * 5 -def test_fit_ramps_with_jumps_no_dq(jump_data): - resultants, read_noise, read_pattern, n_pixels, flux, jumps = jump_data - assert resultants.shape == jumps.shape # sanity check that we have a jump result for each resultant + # Add actual ramp data in addition to the jump data + resultants += _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=n_pixels) + + return resultants, read_noise, read_pattern, n_pixels, jumps.transpose() + + +def test_find_jumps(jump_data): + """ + Check that we can locate all the jumps in a given ramp + """ + resultants, read_noise, read_pattern, n_pixels, jumps = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - assert len(fits) == n_pixels # sanity check that a fit is output for each pixel - for fit, jump in zip(fits, np.transpose(jumps)): - for jump_index in fit['jumps']: - assert jump[jump_index], f"{jump=} {fit['jumps']=}" # check the identified jump is recorded as a jump + # Check that all the jumps have been located per the algorithm's constraints + for index, (fit, jump) in enumerate(zip(fits, jumps)): + # sanity check that only one jump should have been added + assert np.where(jump)[0].shape == (1,) + if index == 0: + # There is no way to detect a jump if it is in the very first read + # The very first pixel in this case has a jump in the first read + assert len(fit['jumps']) == 0 + assert jump[0] + assert not np.all(jump[1:]) + + # Test that the correct index was recorded + assert len(fit['index']) == 1 + assert fit['index'][0]['start'] == 0 + assert fit['index'][0]['end'] == len(read_pattern) - 1 + else: + # Select the single jump and check that it is recorded as a jump + assert np.where(jump)[0][0] in fit['jumps'] + + # In all cases here we have to exclude two resultants + assert len(fit['jumps']) == 2 + + # Test that all the jumps recorded are +/- 1 of the real jump + # This is due to the need to exclude two resultants + for jump_index in fit['jumps']: + assert jump[jump_index] or jump[jump_index + 1] or jump[jump_index - 1] + + # Test that the correct indexes are recorded + ramp_indicies = [] + for ramp_index in fit["index"]: + # Note start/end of a ramp_index are inclusive meaning that end + # is an index included in the ramp_index so the range is to end + 1 + new_indicies = list(range(ramp_index["start"], ramp_index["end"] + 1)) + + # check that all the ramps are non-overlapping + assert set(ramp_indicies).isdisjoint(new_indicies) + + ramp_indicies.extend(new_indicies) + + # check that no ramp_index is a jump + assert set(ramp_indicies).isdisjoint(fit['jumps']) + + # check that all resultant indicies are either in a ramp or listed as a jump + assert set(ramp_indicies).union(fit['jumps']) == set(range(len(read_pattern))) + + # Check that the slopes have been estimated reasonably well + # There are not that many pixels to test this against and many resultants + # have been thrown out due to the jumps. Thus we only check the slope is + # "fairly close" to the expected value. This is purposely a loose check + # because the main purpose of this test is to verify that the jumps are + # being detected correctly, above. + chi2 = 0 + for fit in fits: + assert_allclose(fit['average']['slope'], FLUX, rtol=3) From 22c5fbfa3fbfe5a6dbda3d328f5fe48bd347a963 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 13:57:24 -0400 Subject: [PATCH 60/90] Fix integration with rest of stcal --- src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx | 4 ++-- src/stcal/ramp_fitting/ols_cas22_fit.py | 15 +++++++-------- src/stcal/ramp_fitting/ols_cas22_util.py | 14 +++++++++----- tests/test_ramp_fitting_cas22.py | 16 ++++++++++------ 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 7d8dea47..1a526352 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -34,8 +34,8 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, the resultants in electrons dq : np.ndarry[n_resultants, n_pixel] the dq array. dq != 0 implies bad pixel / CR. - read noise : float - the read noise in electrons + read_noise : np.ndarray[n_pixel] + the read noise in electrons for each pixel read_time : float Time to perform a readout. For Roman data, this is FRAME_TIME. read_pattern : list[list[int]] diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 2db44a81..97c63c79 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -33,7 +33,7 @@ import numpy as np from . import ols_cas22 -from .ols_cas22_util import ma_table_to_tau, ma_table_to_tbar, readpattern_to_matable +from .ols_cas22_util import ma_table_to_tau, ma_table_to_tbar, read_pattern_to_ma_table, ma_table_to_read_pattern def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, read_pattern=None, use_jump=False): @@ -76,7 +76,7 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re # Get the Multi-accum table, either as given or from the read pattern if read_pattern is None: if ma_table is not None: - read_pattern = ma_table_to_readpattern(ma_table) + read_pattern = ma_table_to_read_pattern(ma_table) if read_pattern is None: raise RuntimeError('One of `ma_table` or `read_pattern` must be given.') @@ -86,8 +86,7 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re resultants = np.array(resultants).astype(np.float32) - dq = np.array(dq).astype(np.float32) - + dq = np.array(dq).astype(np.int32) if np.ndim(read_noise) <= 1: read_noise = read_noise * np.ones(resultants.shape[1:]) read_noise = np.array(read_noise).astype(np.float32) @@ -112,12 +111,12 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re # Extract the data request from the ramp fits for index, ramp_fit in enumerate(ramp_fits): - parameters[1, :] = ramp_fit['average']['slope'] + parameters[index, 1] = ramp_fit['average']['slope'] - variances[0, :] = ramp_fit['average']['read_var'] - variances[1, :] = ramp_fit['average']['poisson_var'] + variances[index, 0] = ramp_fit['average']['read_var'] + variances[index, 1] = ramp_fit['average']['poisson_var'] - variances[2, :] = variances[0, :] + variances[1, :] + variances[:, 2] = (variances[:, 0] + variances[:, 1]).astype(np.float32) if resultants.shape != orig_shape: parameters = parameters[0] diff --git a/src/stcal/ramp_fitting/ols_cas22_util.py b/src/stcal/ramp_fitting/ols_cas22_util.py index 63bb9a27..6f69d4f8 100644 --- a/src/stcal/ramp_fitting/ols_cas22_util.py +++ b/src/stcal/ramp_fitting/ols_cas22_util.py @@ -2,10 +2,14 @@ """ import numpy as np -__all__ = ['ma_table_to_tau', 'ma_table_to_tbar'] +__all__ = [ + 'ma_table_to_read_pattern', + 'ma_table_to_tau', + 'ma_table_to_tbar', + 'read_pattern_to_ma_table'] -def matable_to_readpattern(ma_table): +def ma_table_to_read_pattern(ma_table): """Convert read patterns to multi-accum lists Using Roman terminology, a "read pattern" is a list of resultants. Each element of this list @@ -26,7 +30,7 @@ def matable_to_readpattern(ma_table): [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] The example above, using this function, should perform as follows: - >>> matable_to_readpattern([[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]]) + >>> ma_table_to_read_pattern([[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]]) [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] Parameters @@ -123,7 +127,7 @@ def ma_table_to_tbar(ma_table, read_time): return meantimes -def readpattern_to_matable(read_pattern): +def read_pattern_to_ma_table(read_pattern): """Convert read patterns to multi-accum lists Using Roman terminology, a "read pattern" is a list of resultants. Each element of this list @@ -144,7 +148,7 @@ def readpattern_to_matable(read_pattern): [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] The example above, using this function, should perform as follows: - >>> readpattern_to_matable([[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]]) + >>> read_pattern_to_ma_table([[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]]) [[1, 1], [2, 2], [4, 1], [5, 4], [9, 2], [11, 1]] Parameters diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 4eb63aa7..935642e4 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -14,22 +14,22 @@ ROMAN_READ_TIME = 3.04 -def test_matable_to_readpattern(): +def test_ma_table_to_read_pattern(): """Test conversion from read pattern to multi-accum table""" ma_table = [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] expected = [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] - result = ols_cas22_util.matable_to_readpattern(ma_table) + result = ols_cas22_util.ma_table_to_read_pattern(ma_table) assert result == expected -def test_readpattern_to_matable(): +def test_read_pattern_to_ma_table(): """Test conversion from read pattern to multi-accum table""" pattern = [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] expected = [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] - result = ols_cas22_util.readpattern_to_matable(pattern) + result = ols_cas22_util.read_pattern_to_ma_table(pattern) assert result == expected @@ -38,14 +38,18 @@ def test_simulated_ramps(): ntrial = 100000 ma_table, flux, read_noise, resultants = simulate_many_ramps(ntrial=ntrial) + dq = np.zeros(resultants.shape, dtype=np.int32) + read_noise = np.ones(resultants.shape[1], dtype=np.float32) * read_noise + par, var = ramp.fit_ramps_casertano( - resultants, resultants * 0, read_noise, ROMAN_READ_TIME, ma_table=ma_table) + resultants, dq, read_noise, ROMAN_READ_TIME, ma_table=ma_table) + chi2dof_slope = np.sum((par[:, 1] - flux)**2 / var[:, 2]) / ntrial assert np.abs(chi2dof_slope - 1) < 0.03 # now let's mark a bunch of the ramps as compromised. bad = np.random.uniform(size=resultants.shape) > 0.7 - dq = resultants * 0 + bad + dq += bad par, var = ramp.fit_ramps_casertano( resultants, dq, read_noise, ROMAN_READ_TIME, ma_table=ma_table) # only use okay ramps From 3e5c5889823a552c1d55c4a95a6df3504a8a3f4d Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 14:16:36 -0400 Subject: [PATCH 61/90] Move array building to cython --- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 18 +++++++++++--- src/stcal/ramp_fitting/ols_cas22_fit.py | 14 +---------- tests/test_jump_cas22.py | 24 ++++++++++++++++--- 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 1a526352..46eab337 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -69,11 +69,23 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # list in the end. cdef cpp_list[RampFits] ramp_fits + cdef np.ndarray[float, ndim=2] parameters = np.zeros((n_pixels, 2), dtype=np.float32) + cdef np.ndarray[float, ndim=2] variances = np.zeros((n_pixels, 3), dtype=np.float32) + # Perform all of the fits + cdef RampFits fit cdef int index for index in range(n_pixels): # Fit all the ramps for the given pixel - ramp_fits.push_back(make_pixel(fixed, read_noise[index], - resultants[:, index]).fit_ramps(pixel_ramps[index])) + fit = make_pixel(fixed, read_noise[index], + resultants[:, index]).fit_ramps(pixel_ramps[index]) + + parameters[index, 1] = fit.average.slope + + variances[index, 0] = fit.average.read_var + variances[index, 1] = fit.average.poisson_var + variances[index, 2] = fit.average.read_var + fit.average.poisson_var + + ramp_fits.push_back(fit) - return ramp_fits + return ramp_fits, parameters, variances diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 97c63c79..974c5d4e 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -98,7 +98,7 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re dq = dq.reshape(origshape + (1,)) read_noise = read_noise.reshape(origshape[1:] + (1,)) - ramp_fits = ols_cas22.fit_ramps( + ramp_fits, parameters, variances = ols_cas22.fit_ramps( resultants.reshape(resultants.shape[0], -1), dq.reshape(resultants.shape[0], -1), read_noise.reshape(-1), @@ -106,18 +106,6 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re read_pattern, use_jump) - parameters = np.zeros((len(ramp_fits), 2), dtype=np.float32) - variances = np.zeros((len(ramp_fits), 3), dtype=np.float32) - - # Extract the data request from the ramp fits - for index, ramp_fit in enumerate(ramp_fits): - parameters[index, 1] = ramp_fit['average']['slope'] - - variances[index, 0] = ramp_fit['average']['read_var'] - variances[index, 1] = ramp_fit['average']['poisson_var'] - - variances[:, 2] = (variances[:, 0] + variances[:, 1]).astype(np.float32) - if resultants.shape != orig_shape: parameters = parameters[0] variances = variances[0] diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 82513725..5849e2c3 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -249,6 +249,24 @@ def detector_data(ramp_data): return resultants, read_noise, read_pattern, N_PIXELS, FLUX +@pytest.mark.parametrize("use_jump", [True, False]) +def test_fit_ramps_array_outputs(detector_data, use_jump): + """ + Test that the array outputs line up with the dictionary output + """ + resultants, read_noise, read_pattern, n_pixels, flux = detector_data + dq = np.zeros(resultants.shape, dtype=np.int32) + + fits, parameters, variances = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + + for fit, par, var in zip(fits, parameters, variances): + assert par[0] == 0 + assert par[1] == fit['average']['slope'] + + assert var[0] == fit['average']['read_var'] + assert var[1] == fit['average']['poisson_var'] + assert var[2] == np.float32(fit['average']['read_var'] + fit['average']['poisson_var']) + @pytest.mark.parametrize("use_jump", [True, False]) def test_fit_ramps_no_dq(detector_data, use_jump): @@ -260,7 +278,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): resultants, read_noise, read_pattern, n_pixels, flux = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) assert len(fits) == n_pixels # sanity check that a fit is output for each pixel # Check that the chi2 for the resulting fit relative to the assumed flux is ~1 @@ -291,7 +309,7 @@ def test_fit_ramps_dq(detector_data, use_jump): # i.e., we can make a measurement from them. okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 - fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) assert len(fits) == n_pixels # sanity check that a fit is output for each pixel chi2 = 0 @@ -353,7 +371,7 @@ def test_find_jumps(jump_data): resultants, read_noise, read_pattern, n_pixels, jumps = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) # Check that all the jumps have been located per the algorithm's constraints for index, (fit, jump) in enumerate(zip(fits, jumps)): From 92e9508bafd98f9ce990a9f0ffe8d407720abe1e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 14:29:45 -0400 Subject: [PATCH 62/90] Minor cleanups with enum values --- src/stcal/ramp_fitting/ols_cas22/__init__.py | 3 ++- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 13 ++++++++++- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 10 ++++----- tests/test_jump_cas22.py | 22 +++++++++---------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py index a5d0f6f8..4a5480d5 100644 --- a/src/stcal/ramp_fitting/ols_cas22/__init__.py +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -1,3 +1,4 @@ from ._fit_ramps import fit_ramps +from ._core import Parameter, Variance, Diff -__all__ = ['fit_ramps'] +__all__ = ['fit_ramps', 'Parameter', 'Variance', 'Diff'] diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 731f7c58..554ab00f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -32,11 +32,22 @@ cdef struct Thresh: float constant -cdef enum Diff: +cpdef enum Diff: single = 0 double = 1 +cpdef enum Parameter: + intercept = 0 + slope = 1 + + +cpdef enum Variance: + read_var = 0 + poisson_var = 1 + total_var = 2 + + cdef float threshold(Thresh thresh, float slope) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 46eab337..41522037 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -7,7 +7,7 @@ from libcpp.deque cimport deque cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( - RampFits, RampIndex, Thresh, read_data, init_ramps) + RampFits, RampIndex, Thresh, read_data, init_ramps, Parameter, Variance) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @@ -80,11 +80,11 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, fit = make_pixel(fixed, read_noise[index], resultants[:, index]).fit_ramps(pixel_ramps[index]) - parameters[index, 1] = fit.average.slope + parameters[index, Parameter.slope] = fit.average.slope - variances[index, 0] = fit.average.read_var - variances[index, 1] = fit.average.poisson_var - variances[index, 2] = fit.average.read_var + fit.average.poisson_var + variances[index, Variance.read_var] = fit.average.read_var + variances[index, Variance.poisson_var] = fit.average.poisson_var + variances[index, Variance.total_var] = fit.average.read_var + fit.average.poisson_var ramp_fits.push_back(fit) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 5849e2c3..bdb3570a 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -6,7 +6,7 @@ from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, make_fixed, make_pixel, fit_ramp -from stcal.ramp_fitting.ols_cas22 import fit_ramps +from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff RNG = np.random.default_rng(619) @@ -128,8 +128,8 @@ def test_make_fixed(ramp_data, use_jump): # These are computed via vectorized operations in the main code, here we # check using item-by-item operations if use_jump: - single_gen = zip(fixed['t_bar_diff'][0], fixed['recip'][0], fixed['slope_var'][0]) - double_gen = zip(fixed['t_bar_diff'][1], fixed['recip'][1], fixed['slope_var'][1]) + single_gen = zip(fixed['t_bar_diff'][Diff.single], fixed['recip'][Diff.single], fixed['slope_var'][Diff.single]) + double_gen = zip(fixed['t_bar_diff'][Diff.double], fixed['recip'][Diff.double], fixed['slope_var'][Diff.double]) for index, (t_bar_1, recip_1, slope_var_1) in enumerate(single_gen): assert t_bar_1 == t_bar[index + 1] - t_bar[index] @@ -210,8 +210,8 @@ def test_make_pixel(pixel_data, use_jump): # These are computed via vectorized operations in the main code, here we # check using item-by-item operations if use_jump: - single_gen = zip(pixel['delta'][0], pixel['sigma'][0]) - double_gen = zip(pixel['delta'][1], pixel['sigma'][1]) + single_gen = zip(pixel['delta'][Diff.single], pixel['sigma'][Diff.single]) + double_gen = zip(pixel['delta'][Diff.double], pixel['sigma'][Diff.double]) for index, (delta_1, sigma_1) in enumerate(single_gen): assert delta_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) @@ -260,12 +260,12 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): fits, parameters, variances = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) for fit, par, var in zip(fits, parameters, variances): - assert par[0] == 0 - assert par[1] == fit['average']['slope'] + assert par[Parameter.intercept] == 0 + assert par[Parameter.slope] == fit['average']['slope'] - assert var[0] == fit['average']['read_var'] - assert var[1] == fit['average']['poisson_var'] - assert var[2] == np.float32(fit['average']['read_var'] + fit['average']['poisson_var']) + assert var[Variance.read_var] == fit['average']['read_var'] + assert var[Variance.poisson_var] == fit['average']['poisson_var'] + assert var[Variance.total_var] == np.float32(fit['average']['read_var'] + fit['average']['poisson_var']) @pytest.mark.parametrize("use_jump", [True, False]) @@ -381,7 +381,7 @@ def test_find_jumps(jump_data): # There is no way to detect a jump if it is in the very first read # The very first pixel in this case has a jump in the first read assert len(fit['jumps']) == 0 - assert jump[0] + assert jump[0] # sanity check that the jump is in the first resultant still assert not np.all(jump[1:]) # Test that the correct index was recorded From d6f0227db33ecfaef640b2284ab976b0adbe3cdb Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 14:42:30 -0400 Subject: [PATCH 63/90] Fix all negative indexing. This enables removing bounds/wrap checks which are slow --- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 8 ++++++ src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 30 ++++++++++++++------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 5 +++- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index a56cc357..d1746421 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -45,8 +45,10 @@ Functions from libcpp.stack cimport stack from libcpp.deque cimport deque from libc.math cimport log10 + import numpy as np cimport numpy as np +cimport cython from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData @@ -57,6 +59,8 @@ cdef float[2][6] PTABLE = [ [0, 0.4, 1, 3, 6, 10]] +@cython.boundscheck(False) +@cython.wraparound(False) cdef inline float get_power(float s): """ Return the power from Casertano+22, Table 2 @@ -96,6 +100,8 @@ cdef inline float threshold(Thresh thresh, float slope): return thresh.intercept - thresh.constant * log10(slope) +@cython.boundscheck(False) +@cython.wraparound(False) cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): """ Create the initial ramp stack for each pixel @@ -166,6 +172,8 @@ cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): return pixel_ramps +@cython.boundscheck(False) +@cython.wraparound(False) cdef DerivedData read_data(list[list[int]] read_pattern, float read_time): """ Derive the input data from the the read pattern diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 3ea8cd47..52e4ac13 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -15,6 +15,7 @@ make_fixed : function """ import numpy as np cimport numpy as np +cimport cython from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData, Diff from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed @@ -63,6 +64,8 @@ cdef class Fixed: from pre-computing the values and reusing them. """ + @cython.boundscheck(False) + @cython.wraparound(False) cdef inline float[:, :] t_bar_diff_val(Fixed self): """ Compute the difference offset of t_bar @@ -81,15 +84,18 @@ cdef class Fixed: # to be a memory view. In this case, I make sure that the memory view # stays local to the function (numpy operations create brand new objects) cdef float[:] t_bar = self.data.t_bar.data() + cdef int end = len(t_bar) cdef np.ndarray[float, ndim=2] t_bar_diff = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - t_bar_diff[Diff.single, :] = np.subtract(t_bar[1:], t_bar[:-1]) - t_bar_diff[Diff.double, :-1] = np.subtract(t_bar[2:], t_bar[:-2]) - t_bar_diff[Diff.double, -1] = np.nan # last double difference is undefined + t_bar_diff[Diff.single, :] = np.subtract(t_bar[1:], t_bar[:end - 1]) + t_bar_diff[Diff.double, :end - 2] = np.subtract(t_bar[2:], t_bar[:end - 2]) + t_bar_diff[Diff.double, end - 2] = np.nan # last double difference is undefined return t_bar_diff + @cython.boundscheck(False) + @cython.wraparound(False) cdef inline float[:, :] recip_val(Fixed self): """ Compute the reciprical sum values @@ -109,18 +115,21 @@ cdef class Fixed: # to be a memory view. In this case, I make sure that the memory view # stays local to the function (numpy operations create brand new objects) cdef int[:] n_reads = self.data.n_reads.data() + cdef int end = len(n_reads) cdef np.ndarray[float, ndim=2] recip = np.zeros((2, self.data.n_reads.size() - 1), dtype=np.float32) recip[Diff.single, :] = (np.divide(1.0, n_reads[1:], dtype=np.float32) + - np.divide(1.0, n_reads[:-1], dtype=np.float32)) - recip[Diff.double, :-1] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + - np.divide(1.0, n_reads[:-2], dtype=np.float32)) - recip[Diff.double, -1] = np.nan # last double difference is undefined + np.divide(1.0, n_reads[:end - 1], dtype=np.float32)) + recip[Diff.double, :end - 2] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + + np.divide(1.0, n_reads[:end - 2], dtype=np.float32)) + recip[Diff.double, end - 2] = np.nan # last double difference is undefined return recip + @cython.boundscheck(False) + @cython.wraparound(False) cdef inline float[:, :] slope_var_val(Fixed self): """ Compute slope part of the variance @@ -140,12 +149,13 @@ cdef class Fixed: # stays local to the function (numpy operations create brand new objects) cdef float[:] t_bar = self.data.t_bar.data() cdef float[:] tau = self.data.tau.data() + cdef int end = len(t_bar) cdef np.ndarray[float, ndim=2] slope_var = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - slope_var[Diff.single, :] = (np.add(tau[1:], tau[:-1]) - np.minimum(t_bar[1:], t_bar[:-1])) - slope_var[Diff.double, :-1] = (np.add(tau[2:], tau[:-2]) - np.minimum(t_bar[2:], t_bar[:-2])) - slope_var[Diff.double, -1] = np.nan # last double difference is undefined + slope_var[Diff.single, :] = (np.add(tau[1:], tau[:end - 1]) - np.minimum(t_bar[1:], t_bar[:end - 1])) + slope_var[Diff.double, :end - 2] = (np.add(tau[2:], tau[:end - 2]) - np.minimum(t_bar[2:], t_bar[:end - 2])) + slope_var[Diff.double, end - 2] = np.nan # last double difference is undefined return slope_var diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 7e348871..605a3820 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -209,6 +209,8 @@ cdef class Pixel: return ramp_fit + @cython.boundscheck(False) + @cython.wraparound(False) cdef inline float correction(Pixel self, RampIndex ramp, int index, int diff): """ Compute the correction factor for the variance used by a statistic @@ -274,7 +276,6 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) - @cython.cdivision(True) cdef inline float[:] stats(Pixel self, float slope, RampIndex ramp): """ Compute fit statistics for jump detection on a single ramp @@ -469,6 +470,8 @@ cdef class Pixel: return ramp_fits +@cython.boundscheck(False) +@cython.wraparound(False) cdef inline Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants): """ Fast constructor for the Pixel C class. From 641c33b51627276c900b90c3c979cc51488d5520 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 14:52:57 -0400 Subject: [PATCH 64/90] Clean up style issues --- .../ramp_fitting/ols_cas22/_wrappers.pyx | 20 ----------- src/stcal/ramp_fitting/ols_cas22_fit.py | 26 ++++++++++---- tests/test_jump_cas22.py | 35 +++++++++++++------ 3 files changed, 44 insertions(+), 37 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 7bdc6f93..975010d6 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -143,23 +143,3 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, read_noise=pixel.read_noise, delta=delta, sigma=sigma) - - -def fit_ramp(np.ndarray[float, ndim=1] resultants, - np.ndarray[float, ndim=1] t_bar, - np.ndarray[float, ndim=1] tau, - np.ndarray[int, ndim=1] n_reads, - float read_noise, - int start, - int end): - - cdef DerivedData data = DerivedData(t_bar, tau, n_reads) - cdef Thresh threshold = Thresh(0, 1) - cdef Fixed fixed = c_make_fixed(data, threshold, False) - - cdef Pixel pixel = c_make_pixel(fixed, read_noise, resultants) - cdef RampIndex ramp_index = RampIndex(start, end) - - cdef RampFit ramp_fit = pixel.fit_ramp(ramp_index) - - return ramp_fit diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 974c5d4e..db6c2d5d 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -33,10 +33,22 @@ import numpy as np from . import ols_cas22 -from .ols_cas22_util import ma_table_to_tau, ma_table_to_tbar, read_pattern_to_ma_table, ma_table_to_read_pattern - - -def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, read_pattern=None, use_jump=False): +from .ols_cas22_util import ( + ma_table_to_tau, + ma_table_to_tbar, + ma_table_to_read_pattern +) + + +def fit_ramps_casertano( + resultants, + dq, + read_noise, + read_time, + ma_table=None, + read_pattern=None, + use_jump=False +): """Fit ramps following Casertano+2022, including averaging partial ramps. Ramps are broken where dq != 0, and fits are performed on each sub-ramp. @@ -94,9 +106,9 @@ def fit_ramps_casertano(resultants, dq, read_noise, read_time, ma_table=None, re orig_shape = resultants.shape if len(resultants.shape) == 1: # single ramp. - resultants = resultants.reshape(origshape + (1,)) - dq = dq.reshape(origshape + (1,)) - read_noise = read_noise.reshape(origshape[1:] + (1,)) + resultants = resultants.reshape(orig_shape + (1,)) + dq = dq.reshape(orig_shape + (1,)) + read_noise = read_noise.reshape(orig_shape[1:] + (1,)) ramp_fits, parameters, variances = ols_cas22.fit_ramps( resultants.reshape(resultants.shape[0], -1), diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index bdb3570a..3101fb92 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -4,7 +4,7 @@ from stcal.ramp_fitting.ols_cas22._wrappers import read_data from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, make_fixed, make_pixel, fit_ramp +from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, make_fixed, make_pixel from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff @@ -128,8 +128,16 @@ def test_make_fixed(ramp_data, use_jump): # These are computed via vectorized operations in the main code, here we # check using item-by-item operations if use_jump: - single_gen = zip(fixed['t_bar_diff'][Diff.single], fixed['recip'][Diff.single], fixed['slope_var'][Diff.single]) - double_gen = zip(fixed['t_bar_diff'][Diff.double], fixed['recip'][Diff.double], fixed['slope_var'][Diff.double]) + single_gen = zip( + fixed['t_bar_diff'][Diff.single], + fixed['recip'][Diff.single], + fixed['slope_var'][Diff.single] + ) + double_gen = zip( + fixed['t_bar_diff'][Diff.double], + fixed['recip'][Diff.double], + fixed['slope_var'][Diff.double] + ) for index, (t_bar_1, recip_1, slope_var_1) in enumerate(single_gen): assert t_bar_1 == t_bar[index + 1] - t_bar[index] @@ -167,7 +175,9 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): # - Poisson process for the flux # - Gaussian process for the read noise ramp_value += RNG.poisson(flux * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) - ramp_value += RNG.standard_normal(size=n_pixels, dtype=np.float32) * read_noise / np.sqrt(len(reads)) + ramp_value += ( + RNG.standard_normal(size=n_pixels, dtype=np.float32)* read_noise / np.sqrt(len(reads)) + ) # Add to running total for the resultant resultant_total += ramp_value @@ -225,7 +235,9 @@ def test_make_pixel(pixel_data, use_jump): assert np.isnan(delta_2) assert np.isnan(sigma_2) else: - assert delta_2 == (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) + assert delta_2 == ( + (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) + ) assert sigma_2 == read_noise * ( np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) ) @@ -257,7 +269,9 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): resultants, read_noise, read_pattern, n_pixels, flux = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, parameters, variances = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + fits, parameters, variances = fit_ramps( + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump + ) for fit, par, var in zip(fits, parameters, variances): assert par[Parameter.intercept] == 0 @@ -265,7 +279,9 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): assert var[Variance.read_var] == fit['average']['read_var'] assert var[Variance.poisson_var] == fit['average']['poisson_var'] - assert var[Variance.total_var] == np.float32(fit['average']['read_var'] + fit['average']['poisson_var']) + assert var[Variance.total_var] == np.float32( + fit['average']['read_var'] + fit['average']['poisson_var'] + ) @pytest.mark.parametrize("use_jump", [True, False]) @@ -302,7 +318,7 @@ def test_fit_ramps_dq(detector_data, use_jump): up any jumps. """ resultants, read_noise, read_pattern, n_pixels, flux = detector_data - dq = np.zeros(resultants.shape, dtype=np.int32) + (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) + dq = (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) # only use okay ramps # ramps passing the below criterion have at least two adjacent valid reads @@ -352,7 +368,7 @@ def jump_data(): # Start indicating a new resultant jump_res += 1 jumps[jump_res, jump_index] = True - + resultants[:, jump_index] = np.mean(read_values.reshape(shape), axis=1).astype(np.float32) n_pixels = np.prod(shape) @@ -424,6 +440,5 @@ def test_find_jumps(jump_data): # "fairly close" to the expected value. This is purposely a loose check # because the main purpose of this test is to verify that the jumps are # being detected correctly, above. - chi2 = 0 for fit in fits: assert_allclose(fit['average']['slope'], FLUX, rtol=3) From d0891b0ac87d0fc56a2ae705b60b805831769275 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 14:56:26 -0400 Subject: [PATCH 65/90] Update changes --- CHANGES.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 66ba6c1f..dd186cf8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -3,6 +3,12 @@ - Added ``alignment`` sub-package. [#179] +ramp_fitting +------------ + +- Refactor Casertano, et.al, 2022 uneven ramp fitting and incorporate the matching + jump detection algorithm into it. [#215] + Changes to API -------------- From 73997009172fd583513f9589c159bc8b4e32a492 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 4 Oct 2023 14:59:27 -0400 Subject: [PATCH 66/90] Remove unused variable --- src/stcal/ramp_fitting/ols_cas22_fit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index db6c2d5d..c5c33063 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -110,7 +110,7 @@ def fit_ramps_casertano( dq = dq.reshape(orig_shape + (1,)) read_noise = read_noise.reshape(orig_shape[1:] + (1,)) - ramp_fits, parameters, variances = ols_cas22.fit_ramps( + _, parameters, variances = ols_cas22.fit_ramps( resultants.reshape(resultants.shape[0], -1), dq.reshape(resultants.shape[0], -1), read_noise.reshape(-1), From e7b6f4a1735c7ded6b551cfc252209c735229758 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 13:04:24 -0400 Subject: [PATCH 67/90] Change name of `DerivedData` --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 4 +- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 39 ++++++++++++++----- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 4 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 6 +-- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 4 +- .../ramp_fitting/ols_cas22/_wrappers.pyx | 12 +++--- tests/test_jump_cas22.py | 6 +-- 7 files changed, 48 insertions(+), 27 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 554ab00f..8b5494e3 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -21,7 +21,7 @@ cdef struct RampFits: RampFit average -cdef struct DerivedData: +cdef struct ReadPatternMetadata: vector[float] t_bar vector[float] tau vector[int] n_reads @@ -51,4 +51,4 @@ cpdef enum Variance: cdef float threshold(Thresh thresh, float slope) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) -cdef DerivedData read_data(list[list[int]] read_pattern, float read_time) +cdef ReadPatternMetadata metadata_from_read_pattern(list[list[int]] read_pattern, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index d1746421..f8a3adf3 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -6,6 +6,8 @@ Structs RampIndex int start: starting index of the ramp in the resultants int end: ending index of the ramp in the resultants + + Note that the Python range would be [start:end+1] for any ramp index. RampFit float slope: slope of a single ramp float read_var: read noise variance of a single ramp @@ -14,10 +16,14 @@ Structs vector[RampFit] fits: ramp fits (in time order) for a single pixel vector[RampIndex] index: ramp indices (in time order) for a single pixel RampFit average: average ramp fit for a single pixel - DerivedData + ReadPatternMetata vector[float] t_bar: mean time of each resultant vector[float] tau: variance time of each resultant vector[int] n_reads: number of reads in each resultant + + Note that these are entirely computed from the read_pattern and + read_time (which should be constant for a given telescope) for the + given observation. Thresh float intercept: intercept of the threshold float constant: constant of the threshold @@ -31,6 +37,21 @@ Enums single: single difference double: double difference + Parameter + This is the enum to track the index of the computed fit parameters for + the ramp fit. + + intercept: the intercept of the ramp fit + slope: the slope of the ramp fit + + Variance + This is the enum to track the index of the computed variance values for + the ramp fit. + + read_var: read variance computed + poisson_var: poisson variance computed + total_var: total variance computed (read_var + poisson_var) + Functions --------- get_power @@ -39,8 +60,8 @@ Functions Compute jump threshold init_ramps Find initial ramps for each pixel, accounts for DQ flags - read_data - Read the read pattern and derive the baseline data parameters needed + metadata_from_read_pattern + Read the read pattern and derive the baseline metadata parameters needed """ from libcpp.stack cimport stack from libcpp.deque cimport deque @@ -50,7 +71,7 @@ import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, ReadPatternMetadata # Casertano+2022, Table 2 @@ -174,7 +195,7 @@ cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): @cython.boundscheck(False) @cython.wraparound(False) -cdef DerivedData read_data(list[list[int]] read_pattern, float read_time): +cdef ReadPatternMetadata metadata_from_read_pattern(list[list[int]] read_pattern, float read_time): """ Derive the input data from the the read pattern @@ -190,15 +211,15 @@ cdef DerivedData read_data(list[list[int]] read_pattern, float read_time): Returns ------- - DerivedData struct: + ReadPatternMetadata struct: vector[float] t_bar: mean time of each resultant vector[float] tau: variance time of each resultant vector[int] n_reads: number of reads in each resultant """ cdef int n_resultants = len(read_pattern) - cdef DerivedData data = DerivedData(vector[float](n_resultants), - vector[float](n_resultants), - vector[int](n_resultants)) + cdef ReadPatternMetadata data = ReadPatternMetadata(vector[float](n_resultants), + vector[float](n_resultants), + vector[int](n_resultants)) cdef int index, n_reads cdef list[int] resultant diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 41522037..883aa14d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -7,7 +7,7 @@ from libcpp.deque cimport deque cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( - RampFits, RampIndex, Thresh, read_data, init_ramps, Parameter, Variance) + RampFits, RampIndex, Thresh, metadata_from_read_pattern, init_ramps, Parameter, Variance) from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @@ -57,7 +57,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, f'match number of resultants {n_resultants}') # Pre-compute data for all pixels - cdef Fixed fixed = make_fixed(read_data(read_pattern, read_time), + cdef Fixed fixed = make_fixed(metadata_from_read_pattern(read_pattern, read_time), Thresh(5.5, 1/3.0), use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 51d4cf22..a2cf3030 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -1,11 +1,11 @@ from libcpp cimport bool -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, ReadPatternMetadata cdef class Fixed: cdef bool use_jump - cdef DerivedData data + cdef ReadPatternMetadata data cdef Thresh threshold cdef float[:, :] t_bar_diff @@ -17,4 +17,4 @@ cdef class Fixed: cdef float[:, :] slope_var_val(Fixed self) -cdef Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump) +cdef Fixed make_fixed(ReadPatternMetadata data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 52e4ac13..85e7b0c6 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -17,7 +17,7 @@ import numpy as np cimport numpy as np cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport Thresh, DerivedData, Diff +from stcal.ramp_fitting.ols_cas22._core cimport Thresh, ReadPatternMetadata, Diff from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed cdef class Fixed: @@ -160,7 +160,7 @@ cdef class Fixed: return slope_var -cdef inline Fixed make_fixed(DerivedData data, Thresh threshold, bool use_jump): +cdef inline Fixed make_fixed(ReadPatternMetadata data, Thresh threshold, bool use_jump): """ Fast constructor for Fixed class Use this instead of an __init__ because it does not incure the overhead of diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 975010d6..89010c10 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -5,8 +5,8 @@ from libcpp cimport bool from libcpp.stack cimport stack from libcpp.deque cimport deque -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, DerivedData, Thresh, RampFit, threshold -from stcal.ramp_fitting.ols_cas22._core cimport read_data as c_read_data +from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, ReadPatternMetadata, Thresh, threshold +from stcal.ramp_fitting.ols_cas22._core cimport metadata_from_read_pattern as c_metadata_from_read_pattern from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed @@ -16,8 +16,8 @@ from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel as c_make_pixel -def read_data(list[list[int]] read_pattern, float read_time): - return c_read_data(read_pattern, read_time) +def metadata_from_read_pattern(list[list[int]] read_pattern, float read_time): + return c_metadata_from_read_pattern(read_pattern, read_time) def init_ramps(np.ndarray[int, ndim=2] dq): @@ -53,7 +53,7 @@ def make_fixed(np.ndarray[float, ndim=1] t_bar, float constant, bool use_jump): - cdef DerivedData data = DerivedData(t_bar, tau, n_reads) + cdef ReadPatternMetadata data = ReadPatternMetadata(t_bar, tau, n_reads) cdef Thresh threshold = Thresh(intercept, constant) cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) @@ -108,7 +108,7 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, float constant, bool use_jump): - cdef DerivedData data = DerivedData(t_bar, tau, n_reads) + cdef ReadPatternMetadata data = ReadPatternMetadata(t_bar, tau, n_reads) cdef Thresh threshold = Thresh(intercept, constant) cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 3101fb92..e3fdf656 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -2,7 +2,7 @@ import pytest from numpy.testing import assert_allclose -from stcal.ramp_fitting.ols_cas22._wrappers import read_data +from stcal.ramp_fitting.ols_cas22._wrappers import metadata_from_read_pattern from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, make_fixed, make_pixel @@ -29,10 +29,10 @@ def base_ramp_data(): [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36] ] - yield read_pattern, read_data(read_pattern, ROMAN_READ_TIME) + yield read_pattern, metadata_from_read_pattern(read_pattern, ROMAN_READ_TIME) -def test_read_data(base_ramp_data): +def test_metadata_from_read_pattern(base_ramp_data): """Test turning read_pattern into the time data""" _, data = base_ramp_data From e86c939ec7072169d228b9208d140f793d0ba8e1 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 13:19:34 -0400 Subject: [PATCH 68/90] Change name of Fixed to FixedValues --- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 8 ++++---- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 10 +++++----- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 14 ++++++------- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 6 +++--- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 3 ++- .../ramp_fitting/ols_cas22/_wrappers.pyx | 20 +++++++++---------- tests/test_jump_cas22.py | 6 +++--- 7 files changed, 34 insertions(+), 33 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 883aa14d..3b8b1a38 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -8,7 +8,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport ( RampFits, RampIndex, Thresh, metadata_from_read_pattern, init_ramps, Parameter, Variance) -from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed, Fixed +from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata, FixedValues from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @@ -57,9 +57,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, f'match number of resultants {n_resultants}') # Pre-compute data for all pixels - cdef Fixed fixed = make_fixed(metadata_from_read_pattern(read_pattern, read_time), - Thresh(5.5, 1/3.0), - use_jump) + cdef FixedValues fixed = fixed_values_from_metadata(metadata_from_read_pattern(read_pattern, read_time), + Thresh(5.5, 1/3.0), + use_jump) # Compute all the initial sets of ramps cdef deque[stack[RampIndex]] pixel_ramps = init_ramps(dq) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index a2cf3030..45035023 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -3,7 +3,7 @@ from libcpp cimport bool from stcal.ramp_fitting.ols_cas22._core cimport Thresh, ReadPatternMetadata -cdef class Fixed: +cdef class FixedValues: cdef bool use_jump cdef ReadPatternMetadata data cdef Thresh threshold @@ -12,9 +12,9 @@ cdef class Fixed: cdef float[:, :] recip cdef float[:, :] slope_var - cdef float[:, :] t_bar_diff_val(Fixed self) - cdef float[:, :] recip_val(Fixed self) - cdef float[:, :] slope_var_val(Fixed self) + cdef float[:, :] t_bar_diff_val(FixedValues self) + cdef float[:, :] recip_val(FixedValues self) + cdef float[:, :] slope_var_val(FixedValues self) -cdef Fixed make_fixed(ReadPatternMetadata data, Thresh threshold, bool use_jump) +cdef FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 85e7b0c6..3f3856c8 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -18,9 +18,9 @@ cimport numpy as np cimport cython from stcal.ramp_fitting.ols_cas22._core cimport Thresh, ReadPatternMetadata, Diff -from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed +from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues -cdef class Fixed: +cdef class FixedValues: """ Class to contain the data fixed for all pixels and commonly referenced universal values for jump detection @@ -66,7 +66,7 @@ cdef class Fixed: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] t_bar_diff_val(Fixed self): + cdef inline float[:, :] t_bar_diff_val(FixedValues self): """ Compute the difference offset of t_bar @@ -96,7 +96,7 @@ cdef class Fixed: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] recip_val(Fixed self): + cdef inline float[:, :] recip_val(FixedValues self): """ Compute the reciprical sum values @@ -130,7 +130,7 @@ cdef class Fixed: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] slope_var_val(Fixed self): + cdef inline float[:, :] slope_var_val(FixedValues self): """ Compute slope part of the variance @@ -160,7 +160,7 @@ cdef class Fixed: return slope_var -cdef inline Fixed make_fixed(ReadPatternMetadata data, Thresh threshold, bool use_jump): +cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump): """ Fast constructor for Fixed class Use this instead of an __init__ because it does not incure the overhead of @@ -179,7 +179,7 @@ cdef inline Fixed make_fixed(ReadPatternMetadata data, Thresh threshold, bool us ------- Fixed parameters object (with pre-computed values if use_jump is True) """ - cdef Fixed fixed = Fixed() + cdef FixedValues fixed = FixedValues() # Fill in input information for all pixels fixed.use_jump = use_jump diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index 95e4d4fd..7545abbf 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -1,10 +1,10 @@ from libcpp.stack cimport stack from stcal.ramp_fitting.ols_cas22._core cimport RampFit, RampFits, RampIndex -from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed +from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues cdef class Pixel: - cdef Fixed fixed + cdef FixedValues fixed cdef float read_noise cdef float [:] resultants @@ -20,4 +20,4 @@ cdef class Pixel: cdef RampFits fit_ramps(Pixel self, stack[RampIndex] ramps) -cdef Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants) +cdef Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 605a3820..dd57a440 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -22,6 +22,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport get_power, threshold, RampFit, RampFits, RampIndex, Diff +from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel @@ -472,7 +473,7 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) -cdef inline Pixel make_pixel(Fixed fixed, float read_noise, float [:] resultants): +cdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants): """ Fast constructor for the Pixel C class. diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 89010c10..d2886ff6 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -9,8 +9,8 @@ from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, ReadPatternMetadata, from stcal.ramp_fitting.ols_cas22._core cimport metadata_from_read_pattern as c_metadata_from_read_pattern from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps -from stcal.ramp_fitting.ols_cas22._fixed cimport Fixed -from stcal.ramp_fitting.ols_cas22._fixed cimport make_fixed as c_make_fixed +from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues +from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata as c_fixed_values_from_metadata from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel as c_make_pixel @@ -46,17 +46,17 @@ def run_threshold(float intercept, float constant, float slope): return threshold(thresh, slope) -def make_fixed(np.ndarray[float, ndim=1] t_bar, - np.ndarray[float, ndim=1] tau, - np.ndarray[int, ndim=1] n_reads, - float intercept, - float constant, - bool use_jump): +def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, + np.ndarray[float, ndim=1] tau, + np.ndarray[int, ndim=1] n_reads, + float intercept, + float constant, + bool use_jump): cdef ReadPatternMetadata data = ReadPatternMetadata(t_bar, tau, n_reads) cdef Thresh threshold = Thresh(intercept, constant) - cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) + cdef FixedValues fixed = c_fixed_values_from_metadata(data, threshold, use_jump) cdef float intercept_ = fixed.threshold.intercept cdef float constant_ = fixed.threshold.constant @@ -111,7 +111,7 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, cdef ReadPatternMetadata data = ReadPatternMetadata(t_bar, tau, n_reads) cdef Thresh threshold = Thresh(intercept, constant) - cdef Fixed fixed = c_make_fixed(data, threshold, use_jump) + cdef FixedValues fixed = c_fixed_values_from_metadata(data, threshold, use_jump) cdef Pixel pixel = c_make_pixel(fixed, read_noise, resultants) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index e3fdf656..f9f55656 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -4,7 +4,7 @@ from stcal.ramp_fitting.ols_cas22._wrappers import metadata_from_read_pattern from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, make_fixed, make_pixel +from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, fixed_values_from_metadata, make_pixel from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff @@ -108,14 +108,14 @@ def ramp_data(base_ramp_data): @pytest.mark.parametrize("use_jump", [True, False]) -def test_make_fixed(ramp_data, use_jump): +def test_fixed_values_from_metadata(ramp_data, use_jump): """Test computing the fixed data for all pixels""" _, t_bar, tau, n_reads = ramp_data intercept = np.float32(5.5) constant = np.float32(1/3) - fixed = make_fixed(t_bar, tau, n_reads, intercept, constant, use_jump) + fixed = fixed_values_from_metadata(t_bar, tau, n_reads, intercept, constant, use_jump) # Basic sanity checks that data passed in survives assert (fixed['data']['t_bar'] == t_bar).all() From 639573bc4e7476b38d70e7f9f714a76ce126046e Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 13:38:34 -0400 Subject: [PATCH 69/90] Change name of some FixedValues parameters --- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 12 ++--- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 46 +++++++++---------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 14 +++--- .../ramp_fitting/ols_cas22/_wrappers.pyx | 30 ++++++------ tests/test_jump_cas22.py | 20 ++++---- 5 files changed, 61 insertions(+), 61 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 45035023..9cbb3135 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -8,13 +8,13 @@ cdef class FixedValues: cdef ReadPatternMetadata data cdef Thresh threshold - cdef float[:, :] t_bar_diff - cdef float[:, :] recip - cdef float[:, :] slope_var + cdef float[:, :] t_bar_diffs + cdef float[:, :] read_recip_coeffs + cdef float[:, :] var_slope_coeffs - cdef float[:, :] t_bar_diff_val(FixedValues self) - cdef float[:, :] recip_val(FixedValues self) - cdef float[:, :] slope_var_val(FixedValues self) + cdef float[:, :] t_bar_diff_vals(FixedValues self) + cdef float[:, :] read_recip_vals(FixedValues self) + cdef float[:, :] var_slope_vals(FixedValues self) cdef FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 3f3856c8..23606bdc 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -66,7 +66,7 @@ cdef class FixedValues: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] t_bar_diff_val(FixedValues self): + cdef inline float[:, :] t_bar_diff_vals(FixedValues self): """ Compute the difference offset of t_bar @@ -86,17 +86,17 @@ cdef class FixedValues: cdef float[:] t_bar = self.data.t_bar.data() cdef int end = len(t_bar) - cdef np.ndarray[float, ndim=2] t_bar_diff = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) + cdef np.ndarray[float, ndim=2] t_bar_diff_vals = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - t_bar_diff[Diff.single, :] = np.subtract(t_bar[1:], t_bar[:end - 1]) - t_bar_diff[Diff.double, :end - 2] = np.subtract(t_bar[2:], t_bar[:end - 2]) - t_bar_diff[Diff.double, end - 2] = np.nan # last double difference is undefined + t_bar_diff_vals[Diff.single, :] = np.subtract(t_bar[1:], t_bar[:end - 1]) + t_bar_diff_vals[Diff.double, :end - 2] = np.subtract(t_bar[2:], t_bar[:end - 2]) + t_bar_diff_vals[Diff.double, end - 2] = np.nan # last double difference is undefined - return t_bar_diff + return t_bar_diff_vals @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] recip_val(FixedValues self): + cdef inline float[:, :] read_recip_vals(FixedValues self): """ Compute the reciprical sum values @@ -117,20 +117,20 @@ cdef class FixedValues: cdef int[:] n_reads = self.data.n_reads.data() cdef int end = len(n_reads) - cdef np.ndarray[float, ndim=2] recip = np.zeros((2, self.data.n_reads.size() - 1), dtype=np.float32) + cdef np.ndarray[float, ndim=2] read_recip_vals = np.zeros((2, self.data.n_reads.size() - 1), dtype=np.float32) - recip[Diff.single, :] = (np.divide(1.0, n_reads[1:], dtype=np.float32) + - np.divide(1.0, n_reads[:end - 1], dtype=np.float32)) - recip[Diff.double, :end - 2] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + - np.divide(1.0, n_reads[:end - 2], dtype=np.float32)) - recip[Diff.double, end - 2] = np.nan # last double difference is undefined + read_recip_vals[Diff.single, :] = (np.divide(1.0, n_reads[1:], dtype=np.float32) + + np.divide(1.0, n_reads[:end - 1], dtype=np.float32)) + read_recip_vals[Diff.double, :end - 2] = (np.divide(1.0, n_reads[2:], dtype=np.float32) + + np.divide(1.0, n_reads[:end - 2], dtype=np.float32)) + read_recip_vals[Diff.double, end - 2] = np.nan # last double difference is undefined - return recip + return read_recip_vals @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] slope_var_val(FixedValues self): + cdef inline float[:, :] var_slope_vals(FixedValues self): """ Compute slope part of the variance @@ -151,13 +151,13 @@ cdef class FixedValues: cdef float[:] tau = self.data.tau.data() cdef int end = len(t_bar) - cdef np.ndarray[float, ndim=2] slope_var = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) + cdef np.ndarray[float, ndim=2] var_slope_vals = np.zeros((2, self.data.t_bar.size() - 1), dtype=np.float32) - slope_var[Diff.single, :] = (np.add(tau[1:], tau[:end - 1]) - np.minimum(t_bar[1:], t_bar[:end - 1])) - slope_var[Diff.double, :end - 2] = (np.add(tau[2:], tau[:end - 2]) - np.minimum(t_bar[2:], t_bar[:end - 2])) - slope_var[Diff.double, end - 2] = np.nan # last double difference is undefined + var_slope_vals[Diff.single, :] = (np.add(tau[1:], tau[:end - 1]) - np.minimum(t_bar[1:], t_bar[:end - 1])) + var_slope_vals[Diff.double, :end - 2] = (np.add(tau[2:], tau[:end - 2]) - np.minimum(t_bar[2:], t_bar[:end - 2])) + var_slope_vals[Diff.double, end - 2] = np.nan # last double difference is undefined - return slope_var + return var_slope_vals cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump): @@ -190,8 +190,8 @@ cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thr # Pre-compute jump detection computations shared by all pixels if use_jump: - fixed.t_bar_diff = fixed.t_bar_diff_val() - fixed.recip = fixed.recip_val() - fixed.slope_var = fixed.slope_var_val() + fixed.t_bar_diffs = fixed.t_bar_diff_vals() + fixed.read_recip_coeffs = fixed.read_recip_vals() + fixed.var_slope_coeffs = fixed.var_slope_vals() return fixed diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index dd57a440..04e50b4d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -88,11 +88,11 @@ cdef class Pixel: cdef float[:] resultants = self.resultants cdef int end = len(resultants) - cdef np.ndarray[float, ndim=2] t_bar_diff = np.array(self.fixed.t_bar_diff, dtype=np.float32) + cdef np.ndarray[float, ndim=2] t_bar_diffs = np.array(self.fixed.t_bar_diffs, dtype=np.float32) cdef np.ndarray[float, ndim=2] delta = np.zeros((2, end - 1), dtype=np.float32) - delta[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diff[0, :]).astype(np.float32) - delta[Diff.double, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diff[1, :end-2]).astype(np.float32) + delta[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diffs[0, :]).astype(np.float32) + delta[Diff.double, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diffs[1, :end-2]).astype(np.float32) delta[Diff.double, end-2] = np.nan # last double difference is undefined return delta @@ -226,7 +226,7 @@ cdef class Pixel: The offset to use for the delta and sigma values, this should be a value from the Diff enum. """ - cdef float comp = (self.fixed.t_bar_diff[diff, index] / + cdef float comp = (self.fixed.t_bar_diffs[diff, index] / (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start])) if diff == 0: @@ -267,9 +267,9 @@ cdef class Pixel: Create a single instance of the stastic for the given parameters """ cdef float delta = ((self.delta[diff, index] - slope) * - fabs(self.fixed.t_bar_diff[diff, index])) + fabs(self.fixed.t_bar_diffs[diff, index])) cdef float var = (self.sigma[diff, index] + - slope * self.fixed.slope_var[diff, index] * + slope * self.fixed.var_slope_coeffs[diff, index] * self.correction(ramp, index, diff)) return delta / sqrt(var) @@ -503,6 +503,6 @@ cdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resu # Pre-compute values for jump detection shared by all pixels for this pixel if fixed.use_jump: pixel.delta = pixel.delta_val() - pixel.sigma = read_noise * np.array(fixed.recip) + pixel.sigma = read_noise * np.array(fixed.read_recip_coeffs) return pixel diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index d2886ff6..71e6b4f5 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -61,42 +61,42 @@ def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, cdef float intercept_ = fixed.threshold.intercept cdef float constant_ = fixed.threshold.constant - cdef np.ndarray[float, ndim=2] t_bar_diff - cdef np.ndarray[float, ndim=2] recip - cdef np.ndarray[float, ndim=2] slope_var + cdef np.ndarray[float, ndim=2] t_bar_diffs + cdef np.ndarray[float, ndim=2] read_recip_coeffs + cdef np.ndarray[float, ndim=2] var_slope_coeffs if use_jump: - t_bar_diff = np.array(fixed.t_bar_diff, dtype=np.float32) - recip = np.array(fixed.recip, dtype=np.float32) - slope_var = np.array(fixed.slope_var, dtype=np.float32) + t_bar_diffs = np.array(fixed.t_bar_diffs, dtype=np.float32) + read_recip_coeffs = np.array(fixed.read_recip_coeffs, dtype=np.float32) + var_slope_coeffs = np.array(fixed.var_slope_coeffs, dtype=np.float32) else: try: - fixed.t_bar_diff + fixed.t_bar_diffs except AttributeError: - t_bar_diff = np.array([[np.nan],[np.nan]], dtype=np.float32) + t_bar_diffs = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("t_bar_1 should not exist") try: - fixed.recip + fixed.read_recip_coeffs except AttributeError: - recip = np.array([[np.nan],[np.nan]], dtype=np.float32) + read_recip_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("recip_1 should not exist") try: - fixed.slope_var + fixed.var_slope_coeffs except AttributeError: - slope_var = np.array([[np.nan],[np.nan]], dtype=np.float32) + var_slope_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) else: raise AttributeError("slope_var_1 should not exist") return dict(data=fixed.data, intercept=intercept_, constant=constant_, - t_bar_diff=t_bar_diff, - recip=recip, - slope_var=slope_var) + t_bar_diffs=t_bar_diffs, + read_recip_coeffs=read_recip_coeffs, + var_slope_coeffs=var_slope_coeffs) def make_pixel(np.ndarray[float, ndim=1] resultants, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index f9f55656..107ef382 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -129,14 +129,14 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): # check using item-by-item operations if use_jump: single_gen = zip( - fixed['t_bar_diff'][Diff.single], - fixed['recip'][Diff.single], - fixed['slope_var'][Diff.single] + fixed['t_bar_diffs'][Diff.single], + fixed['read_recip_coeffs'][Diff.single], + fixed['var_slope_coeffs'][Diff.single] ) double_gen = zip( - fixed['t_bar_diff'][Diff.double], - fixed['recip'][Diff.double], - fixed['slope_var'][Diff.double] + fixed['t_bar_diffs'][Diff.double], + fixed['read_recip_coeffs'][Diff.double], + fixed['var_slope_coeffs'][Diff.double] ) for index, (t_bar_1, recip_1, slope_var_1) in enumerate(single_gen): @@ -145,7 +145,7 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): assert slope_var_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) for index, (t_bar_2, recip_2, slope_var_2) in enumerate(double_gen): - if index == len(fixed['t_bar_diff'][1]) - 1: + if index == len(fixed['t_bar_diffs'][1]) - 1: # Last value must be NaN assert np.isnan(t_bar_2) assert np.isnan(recip_2) @@ -157,9 +157,9 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): else: # If not using jumps, these values should not even exist. However, for wrapping # purposes, they are checked to be non-existent and then set to NaN - assert np.isnan(fixed['t_bar_diff']).all() - assert np.isnan(fixed['recip']).all() - assert np.isnan(fixed['slope_var']).all() + assert np.isnan(fixed['t_bar_diffs']).all() + assert np.isnan(fixed['read_recip_coeffs']).all() + assert np.isnan(fixed['var_slope_coeffs']).all() def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): From 07788334201182a7cc9df25526a6885061184dac Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 13:51:56 -0400 Subject: [PATCH 70/90] Clean up Pixel variable names --- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 6 +-- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 24 +++++----- .../ramp_fitting/ols_cas22/_wrappers.pyx | 30 ++++++------ tests/test_jump_cas22.py | 48 +++++++++---------- 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index 7545abbf..a909b346 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -8,10 +8,10 @@ cdef class Pixel: cdef float read_noise cdef float [:] resultants - cdef float[:, :] delta - cdef float[:, :] sigma + cdef float[:, :] local_slopes + cdef float[:, :] var_read_noise - cdef float[:, :] delta_val(Pixel self) + cdef float[:, :] local_slope_vals(Pixel self) cdef RampFit fit_ramp(Pixel self, RampIndex ramp) cdef float correction(Pixel self, RampIndex ramp, int index, int diff) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 04e50b4d..fff75e49 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -42,12 +42,12 @@ cdef class Pixel: resultants : float [:] array of resultants for single pixel (data input) - delta : float [:, :] + local_slopes : float [:, :] single difference delta+slope: delta[0, :] = (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) double difference delta+slope: delta[1, :] = (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) - sigma : float [:, :] + var_read_noise : float [:, :] single difference "sigma": sigma[0, :] = read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) double difference "sigma": @@ -74,7 +74,7 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float[:, :] delta_val(Pixel self): + cdef inline float[:, :] local_slope_vals(Pixel self): """ Compute the difference offset of resultants @@ -89,13 +89,13 @@ cdef class Pixel: cdef int end = len(resultants) cdef np.ndarray[float, ndim=2] t_bar_diffs = np.array(self.fixed.t_bar_diffs, dtype=np.float32) - cdef np.ndarray[float, ndim=2] delta = np.zeros((2, end - 1), dtype=np.float32) + cdef np.ndarray[float, ndim=2] local_slope_vals = np.zeros((2, end - 1), dtype=np.float32) - delta[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diffs[0, :]).astype(np.float32) - delta[Diff.double, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diffs[1, :end-2]).astype(np.float32) - delta[Diff.double, end-2] = np.nan # last double difference is undefined + local_slope_vals[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diffs[0, :]).astype(np.float32) + local_slope_vals[Diff.double, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diffs[1, :end-2]).astype(np.float32) + local_slope_vals[Diff.double, end-2] = np.nan # last double difference is undefined - return delta + return local_slope_vals @cython.boundscheck(False) @cython.wraparound(False) @@ -266,9 +266,9 @@ cdef class Pixel: ------- Create a single instance of the stastic for the given parameters """ - cdef float delta = ((self.delta[diff, index] - slope) * + cdef float delta = ((self.local_slopes[diff, index] - slope) * fabs(self.fixed.t_bar_diffs[diff, index])) - cdef float var = (self.sigma[diff, index] + + cdef float var = (self.var_read_noise[diff, index] + slope * self.fixed.var_slope_coeffs[diff, index] * self.correction(ramp, index, diff)) @@ -502,7 +502,7 @@ cdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resu # Pre-compute values for jump detection shared by all pixels for this pixel if fixed.use_jump: - pixel.delta = pixel.delta_val() - pixel.sigma = read_noise * np.array(fixed.read_recip_coeffs) + pixel.local_slopes = pixel.local_slope_vals() + pixel.var_read_noise = read_noise * np.array(fixed.read_recip_coeffs) return pixel diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 71e6b4f5..63cc3256 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -75,21 +75,21 @@ def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, except AttributeError: t_bar_diffs = np.array([[np.nan],[np.nan]], dtype=np.float32) else: - raise AttributeError("t_bar_1 should not exist") + raise AttributeError("t_bar_diffs should not exist") try: fixed.read_recip_coeffs except AttributeError: read_recip_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) else: - raise AttributeError("recip_1 should not exist") + raise AttributeError("read_recip_coeffs should not exist") try: fixed.var_slope_coeffs except AttributeError: var_slope_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) else: - raise AttributeError("slope_var_1 should not exist") + raise AttributeError("var_slope_coeffs should not exist") return dict(data=fixed.data, intercept=intercept_, @@ -117,29 +117,29 @@ def make_pixel(np.ndarray[float, ndim=1] resultants, cdef np.ndarray[float, ndim=1] resultants_ = np.array(pixel.resultants, dtype=np.float32) - cdef np.ndarray[float, ndim=2] delta - cdef np.ndarray[float, ndim=2] sigma + cdef np.ndarray[float, ndim=2] local_slopes + cdef np.ndarray[float, ndim=2] var_read_noise if use_jump: - delta = np.array(pixel.delta, dtype=np.float32) - sigma = np.array(pixel.sigma, dtype=np.float32) + local_slopes = np.array(pixel.local_slopes, dtype=np.float32) + var_read_noise = np.array(pixel.var_read_noise, dtype=np.float32) else: try: - pixel.delta + pixel.local_slopes except AttributeError: - delta = np.array([[np.nan],[np.nan]], dtype=np.float32) + local_slopes = np.array([[np.nan],[np.nan]], dtype=np.float32) else: - raise AttributeError("delta_1 should not exist") + raise AttributeError("local_slopes should not exist") try: - pixel.sigma + pixel.var_read_noise except AttributeError: - sigma = np.array([[np.nan],[np.nan]], dtype=np.float32) + var_read_noise = np.array([[np.nan],[np.nan]], dtype=np.float32) else: - raise AttributeError("sigma_1 should not exist") + raise AttributeError("var_read_noise should not exist") # only return computed values (assume fixed is correct) return dict(resultants=resultants_, read_noise=pixel.read_noise, - delta=delta, - sigma=sigma) + local_slopes=local_slopes, + var_read_noise=var_read_noise) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 107ef382..35712210 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -139,21 +139,21 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): fixed['var_slope_coeffs'][Diff.double] ) - for index, (t_bar_1, recip_1, slope_var_1) in enumerate(single_gen): - assert t_bar_1 == t_bar[index + 1] - t_bar[index] - assert recip_1 == np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) - assert slope_var_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) + for index, (t_bar_diff_1, read_recip_1, var_slope_1) in enumerate(single_gen): + assert t_bar_diff_1 == t_bar[index + 1] - t_bar[index] + assert read_recip_1 == np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) + assert var_slope_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) - for index, (t_bar_2, recip_2, slope_var_2) in enumerate(double_gen): + for index, (t_bar_diff_2, read_recip_2, var_slope_2) in enumerate(double_gen): if index == len(fixed['t_bar_diffs'][1]) - 1: # Last value must be NaN - assert np.isnan(t_bar_2) - assert np.isnan(recip_2) - assert np.isnan(slope_var_2) + assert np.isnan(t_bar_diff_2) + assert np.isnan(read_recip_2) + assert np.isnan(var_slope_2) else: - assert t_bar_2 == t_bar[index + 2] - t_bar[index] - assert recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) - assert slope_var_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) + assert t_bar_diff_2 == t_bar[index + 2] - t_bar[index] + assert read_recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) + assert var_slope_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) else: # If not using jumps, these values should not even exist. However, for wrapping # purposes, they are checked to be non-existent and then set to NaN @@ -220,32 +220,32 @@ def test_make_pixel(pixel_data, use_jump): # These are computed via vectorized operations in the main code, here we # check using item-by-item operations if use_jump: - single_gen = zip(pixel['delta'][Diff.single], pixel['sigma'][Diff.single]) - double_gen = zip(pixel['delta'][Diff.double], pixel['sigma'][Diff.double]) + single_gen = zip(pixel['local_slopes'][Diff.single], pixel['var_read_noise'][Diff.single]) + double_gen = zip(pixel['local_slopes'][Diff.double], pixel['var_read_noise'][Diff.double]) - for index, (delta_1, sigma_1) in enumerate(single_gen): - assert delta_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) - assert sigma_1 == read_noise * ( + for index, (local_slope_1, var_read_noise_1) in enumerate(single_gen): + assert local_slope_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) + assert var_read_noise_1 == read_noise * ( np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) ) - for index, (delta_2, sigma_2) in enumerate(double_gen): - if index == len(pixel['delta'][1]) - 1: + for index, (local_slope_2, var_read_noise_2) in enumerate(double_gen): + if index == len(pixel['local_slopes'][1]) - 1: # Last value must be NaN - assert np.isnan(delta_2) - assert np.isnan(sigma_2) + assert np.isnan(local_slope_2) + assert np.isnan(var_read_noise_2) else: - assert delta_2 == ( + assert local_slope_2 == ( (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) ) - assert sigma_2 == read_noise * ( + assert var_read_noise_2 == read_noise * ( np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) ) else: # If not using jumps, these values should not even exist. However, for wrapping # purposes, they are checked to be non-existent and then set to NaN - assert np.isnan(pixel['delta']).all() - assert np.isnan(pixel['sigma']).all() + assert np.isnan(pixel['local_slopes']).all() + assert np.isnan(pixel['var_read_noise']).all() @pytest.fixture(scope="module") From 6398d0ac9cbb581ef084f56ff0c629c43621ec90 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 14:19:32 -0400 Subject: [PATCH 71/90] Clean up doc strings --- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 87 ++++++++++++--------- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 73 +++++++++-------- 2 files changed, 90 insertions(+), 70 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 23606bdc..4d8cc043 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -4,14 +4,14 @@ Define the data which is fixed for all pixels to compute the CAS22 algorithm wit Objects ------- -Fixed : class +FixedValues : class Class to contain the data fixed for all pixels and commonly referenced universal values for jump detection Functions --------- -make_fixed : function - Fast constructor for Fixed class +fixed_values_from_metadata : function + Fast constructor for FixedValues from the read pattern metadata """ import numpy as np cimport numpy as np @@ -22,43 +22,51 @@ from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues cdef class FixedValues: """ - Class to contain the data fixed for all pixels and commonly referenced - universal values for jump detection + Class to contain all the values which are fixed for all pixels for a given + read pattern. + This class is used to pre-compute these values once so that they maybe + reused for all pixels. This is done for performance reasons. Parameters ---------- - t_bar : float[:] - mean times of resultants (data input) - tau : float[:] - variance weighted mean times of resultants (data input) - n_reads : float[:] - number of reads contributing to reach resultant (data input) - use_jump : bool flag to indicate whether to use jump detection (user input) - t_bar_diff : float[:, :] - single differences of t_bar: - t_bar_diff[0, :] = (t_bar[i+1] - t_bar[i]) - double differences of t_bar: - t_bar_diff[1, :] = (t_bar[i+2] - t_bar[i]) - recip : float[:, :] - single sum of reciprocal n_reads: - recip[0, :] = ((1/n_reads[i+1]) + (1/n_reads[i])) - double sum of reciprocal n_reads: - recip[1, :] = ((1/n_reads[i+2]) + (1/n_reads[i])) - slope_var : float[:, :] - single of slope variance term: - slope_var[0, :] = ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) - double of slope variance term: - slope_var[1, :] = ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) + data : ReadPatternMetadata + Metadata struct created from a read pattern + + threshold : Thresh + Parameterization struct for threshold function + + t_bar_diffs : float[:, :] + These are the differences of t_bar used for jump detection. + single differences of t_bar: + t_bar_diffs[Diff.single, :] = (t_bar[i+1] - t_bar[i]) + double differences of t_bar: + t_bar_diffs[Diff.double, :] = (t_bar[i+2] - t_bar[i]) + read_recip_coeffs : float[:, :] + Coefficients for the read noise portion of the variance used to compute + the jump detection statistics. These are formed from the reciprocal sum + of the number of reads. + single sum of reciprocal n_reads: + recip[Diff.single, :] = ((1/n_reads[i+1]) + (1/n_reads[i])) + double sum of reciprocal n_reads: + recip[Diff.double, :] = ((1/n_reads[i+2]) + (1/n_reads[i])) + var_slope_coeffs : float[:, :] + Coefficients for the slope portion of the variance used to compute the + jump detection statistics, which happend to be fixed for any given ramp + fit. + single of slope variance term: + slope_var[Diff.single, :] = ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) + double of slope variance term: + slope_var[Diff.double, :] = ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) Notes ----- - - t_bar_diff, recip, slope_var are only computed if use_jump is True. These - values represent reused computations for jump detection which are used by - every pixel for jump detection. They are computed once and stored in the - Fixed for reuse by all pixels. + - t_bar_diffs, read_recip_coeffs, var_slope_coeffs are only computed if + use_jump is True. These values represent reused computations for jump + detection which are used by every pixel for jump detection. They are + computed once and stored in the FixedValues for reuse by all pixels. - The computations are done using vectorized operations for some performance increases. However, this is marginal compaired with the performance increase from pre-computing the values and reusing them. @@ -98,7 +106,7 @@ cdef class FixedValues: @cython.wraparound(False) cdef inline float[:, :] read_recip_vals(FixedValues self): """ - Compute the reciprical sum values + Compute the reciprical sum of the number of reads Returns ------- @@ -132,7 +140,7 @@ cdef class FixedValues: @cython.wraparound(False) cdef inline float[:, :] var_slope_vals(FixedValues self): """ - Compute slope part of the variance + Compute slope part of the jump statistic variances Returns ------- @@ -162,14 +170,14 @@ cdef class FixedValues: cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump): """ - Fast constructor for Fixed class - Use this instead of an __init__ because it does not incure the overhead of - switching back and forth to python + Fast constructor for FixedValues class + Use this instead of an __init__ because it does not incure the overhead + of switching back and forth to python Parameters ---------- - data : DerivedData - derived data object created from MA table (input data) + data : ReadPatternMetadata + metadata object created from the read pattern (user input) threshold : Thresh threshold object (user input) use_jump : bool @@ -177,7 +185,8 @@ cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thr Returns ------- - Fixed parameters object (with pre-computed values if use_jump is True) + FixedValues object (with pre-computed values for jump detection if use_jump + is True) """ cdef FixedValues fixed = FixedValues() diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index fff75e49..b5968f4d 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -1,5 +1,5 @@ """ -Define the C class for the CAS22 algorithm for fitting ramps with jump detection +Define the C class for the Cassertano22 algorithm for fitting ramps with jump detection Objects ------- @@ -9,8 +9,8 @@ Pixel : class Functions --------- -make_ramp : function - Fast constructor for the Pixel class +make_pixel : function + Fast constructor for a Pixel class from input data. """ from libc.math cimport sqrt, fabs from libcpp.vector cimport vector @@ -35,30 +35,33 @@ cdef class Pixel: Parameters ---------- - fixed : Fixed - Fixed values for all pixels (pre-computed data) + fixed : FixedValues + The object containing all the values and metadata which is fixed for a + given read pattern> read_noise : float - The read noise for the given pixel (data input) + The read noise for the given pixel resultants : float [:] - array of resultants for single pixel (data input) + Resultants input for the given pixel local_slopes : float [:, :] - single difference delta+slope: - delta[0, :] = (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) - double difference delta+slope: - delta[1, :] = (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) + These are the local slopes between the resultants for the pixel. + single difference local slope: + delta[Diff.single, :] = (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) + double difference local slope: + delta[Diff.double, :] = (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) var_read_noise : float [:, :] - single difference "sigma": - sigma[0, :] = read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) - double difference "sigma": - sigma[1, :] = read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) + The read noise variance term of the jump statistics + single difference read noise variance: + sigma[Diff.single, :] = read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + double difference read_noise variance: + sigma[Diff.doule, :] = read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) Notes ----- - - delta, sigma are only computed if use_jump is True. These values represent - reused computations for jump detection which are used by every ramp for - the given pixel for jump detection. They are computed once and stored for - reuse by all ramp computations for the pixel. + - local_slopes and var_read_noise are only computed if use_jump is True. + These values represent reused computations for jump detection which are + used by every ramp for the given pixel for jump detection. They are + computed once and stored for reuse by all ramp computations for the pixel. - The computations are done using vectorized operations for some performance increases. However, this is marginal compaired with the performance increase from pre-computing the values and reusing them. @@ -76,24 +79,29 @@ cdef class Pixel: @cython.wraparound(False) cdef inline float[:, :] local_slope_vals(Pixel self): """ - Compute the difference offset of resultants + Compute the local slopes between resultants for the pixel Returns ------- [ - <(resultants[i+1] - resultants[i])>, - <(resultants[i+2] - resultants[i])>, + <(resultants[i+1] - resultants[i])> / <(t_bar[i+1] - t_bar[i])>, + <(resultants[i+2] - resultants[i])> / <(t_bar[i+2] - t_bar[i])>, ] """ cdef float[:] resultants = self.resultants cdef int end = len(resultants) + # Read the t_bar_diffs into a local variable to avoid calling through Python + # multiple times cdef np.ndarray[float, ndim=2] t_bar_diffs = np.array(self.fixed.t_bar_diffs, dtype=np.float32) + cdef np.ndarray[float, ndim=2] local_slope_vals = np.zeros((2, end - 1), dtype=np.float32) - local_slope_vals[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) / t_bar_diffs[0, :]).astype(np.float32) - local_slope_vals[Diff.double, :end-2] = (np.subtract(resultants[2:], resultants[:end - 2]) / t_bar_diffs[1, :end-2]).astype(np.float32) - local_slope_vals[Diff.double, end-2] = np.nan # last double difference is undefined + local_slope_vals[Diff.single, :] = (np.subtract(resultants[1:], resultants[:end - 1]) + / t_bar_diffs[Diff.single, :]).astype(np.float32) + local_slope_vals[Diff.double, :end - 2] = (np.subtract(resultants[2:], resultants[:end - 2]) + / t_bar_diffs[Diff.double, :end-2]).astype(np.float32) + local_slope_vals[Diff.double, end - 2] = np.nan # last double difference is undefined return local_slope_vals @@ -229,12 +237,12 @@ cdef class Pixel: cdef float comp = (self.fixed.t_bar_diffs[diff, index] / (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start])) - if diff == 0: + if diff == Diff.single: return (1 - comp)**2 - elif diff == 1: + elif diff == Diff.double: return (1 - 0.75 * comp)**2 else: - raise ValueError("offset must be 1 or 2") + raise ValueError("diff must be Diff.single or Diff.double") @cython.boundscheck(False) @cython.wraparound(False) @@ -248,7 +256,7 @@ cdef class Pixel: * (t_bar[j] - t_bar[i]) var = sigma * (1/N[j] + 1/N[i]) + slope * (tau[j] + tau[i] - min(t_bar[j], t_bar[i])) - * correction(offset) + * correction(diff) Parameters ---------- @@ -476,6 +484,7 @@ cdef class Pixel: cdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants): """ Fast constructor for the Pixel C class. + This creates a Pixel object for a single pixel from the input data. This is signifantly faster than using the `__init__` or `__cinit__` this is because this does not have to pass through the Python as part @@ -483,10 +492,12 @@ cdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resu Parameters ---------- - fixed : Fixed + fixed : FixedValues Fixed values for all pixels + read_noise : float + read noise for the single pixel resultants : float [:] - array of resultants for single pixel + array of resultants for the single pixel - memoryview of a numpy array to avoid passing through Python Return From 26ab61f3002f1c92f4ca6fba328efc70790fb18a Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 14:35:39 -0400 Subject: [PATCH 72/90] Add t_bar_diff_sqrs --- src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 1 + src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 7 +++++++ src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx | 10 ++++++++++ tests/test_jump_cas22.py | 9 +++++++-- 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index 9cbb3135..ed99c7de 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -9,6 +9,7 @@ cdef class FixedValues: cdef Thresh threshold cdef float[:, :] t_bar_diffs + cdef float[:, :] t_bar_diff_sqrs cdef float[:, :] read_recip_coeffs cdef float[:, :] var_slope_coeffs diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 4d8cc043..fe0dbdd5 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -44,6 +44,12 @@ cdef class FixedValues: t_bar_diffs[Diff.single, :] = (t_bar[i+1] - t_bar[i]) double differences of t_bar: t_bar_diffs[Diff.double, :] = (t_bar[i+2] - t_bar[i]) + t_bar_diff_sqrs : float[:, :] + These are the squared differnences of t_bar used for jump detection. + single differences of t_bar: + t_bar_diff_sqrs[Diff.single, :] = (t_bar[i+1] - t_bar[i])**2 + double differences of t_bar: + t_bar_diff_sqrs[Diff.double, :] = (t_bar[i+2] - t_bar[i])**2 read_recip_coeffs : float[:, :] Coefficients for the read noise portion of the variance used to compute the jump detection statistics. These are formed from the reciprocal sum @@ -200,6 +206,7 @@ cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thr # Pre-compute jump detection computations shared by all pixels if use_jump: fixed.t_bar_diffs = fixed.t_bar_diff_vals() + fixed.t_bar_diff_sqrs = np.square(fixed.t_bar_diffs, dtype=np.float32) fixed.read_recip_coeffs = fixed.read_recip_vals() fixed.var_slope_coeffs = fixed.var_slope_vals() diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index 63cc3256..f2824d1a 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -62,11 +62,13 @@ def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, cdef float constant_ = fixed.threshold.constant cdef np.ndarray[float, ndim=2] t_bar_diffs + cdef np.ndarray[float, ndim=2] t_bar_diff_sqrs cdef np.ndarray[float, ndim=2] read_recip_coeffs cdef np.ndarray[float, ndim=2] var_slope_coeffs if use_jump: t_bar_diffs = np.array(fixed.t_bar_diffs, dtype=np.float32) + t_bar_diff_sqrs = np.array(fixed.t_bar_diff_sqrs, dtype=np.float32) read_recip_coeffs = np.array(fixed.read_recip_coeffs, dtype=np.float32) var_slope_coeffs = np.array(fixed.var_slope_coeffs, dtype=np.float32) else: @@ -77,6 +79,13 @@ def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, else: raise AttributeError("t_bar_diffs should not exist") + try: + fixed.t_bar_diff_sqrs + except AttributeError: + t_bar_diff_sqrs = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("t_bar_diff_sqrs should not exist") + try: fixed.read_recip_coeffs except AttributeError: @@ -95,6 +104,7 @@ def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, intercept=intercept_, constant=constant_, t_bar_diffs=t_bar_diffs, + t_bar_diff_sqrs=t_bar_diff_sqrs, read_recip_coeffs=read_recip_coeffs, var_slope_coeffs=var_slope_coeffs) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 35712210..7cfaf61d 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -130,21 +130,24 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): if use_jump: single_gen = zip( fixed['t_bar_diffs'][Diff.single], + fixed['t_bar_diff_sqrs'][Diff.single], fixed['read_recip_coeffs'][Diff.single], fixed['var_slope_coeffs'][Diff.single] ) double_gen = zip( fixed['t_bar_diffs'][Diff.double], + fixed['t_bar_diff_sqrs'][Diff.double], fixed['read_recip_coeffs'][Diff.double], fixed['var_slope_coeffs'][Diff.double] ) - for index, (t_bar_diff_1, read_recip_1, var_slope_1) in enumerate(single_gen): + for index, (t_bar_diff_1, t_bar_diff_sqr_1, read_recip_1, var_slope_1) in enumerate(single_gen): assert t_bar_diff_1 == t_bar[index + 1] - t_bar[index] + assert t_bar_diff_sqr_1 == np.float32((t_bar[index + 1] - t_bar[index]) ** 2) assert read_recip_1 == np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) assert var_slope_1 == (tau[index + 1] + tau[index] - min(t_bar[index], t_bar[index + 1])) - for index, (t_bar_diff_2, read_recip_2, var_slope_2) in enumerate(double_gen): + for index, (t_bar_diff_2, t_bar_diff_sqr_2, read_recip_2, var_slope_2) in enumerate(double_gen): if index == len(fixed['t_bar_diffs'][1]) - 1: # Last value must be NaN assert np.isnan(t_bar_diff_2) @@ -152,12 +155,14 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): assert np.isnan(var_slope_2) else: assert t_bar_diff_2 == t_bar[index + 2] - t_bar[index] + assert t_bar_diff_sqr_2 == np.float32((t_bar[index + 2] - t_bar[index])**2) assert read_recip_2 == np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) assert var_slope_2 == (tau[index + 2] + tau[index] - min(t_bar[index], t_bar[index + 2])) else: # If not using jumps, these values should not even exist. However, for wrapping # purposes, they are checked to be non-existent and then set to NaN assert np.isnan(fixed['t_bar_diffs']).all() + assert np.isnan(fixed['t_bar_diff_sqrs']).all() assert np.isnan(fixed['read_recip_coeffs']).all() assert np.isnan(fixed['var_slope_coeffs']).all() From e7e5c8762ae935af121591019a36270208dfa992 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 14:51:37 -0400 Subject: [PATCH 73/90] Use new correction factor formulation --- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 2 +- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 37 +++++++++------------ tests/test_jump_cas22.py | 15 +++++---- 3 files changed, 24 insertions(+), 30 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index a909b346..326c1d1f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -14,7 +14,7 @@ cdef class Pixel: cdef float[:, :] local_slope_vals(Pixel self) cdef RampFit fit_ramp(Pixel self, RampIndex ramp) - cdef float correction(Pixel self, RampIndex ramp, int index, int diff) + cdef float correction(Pixel self, RampIndex ramp, float slope) cdef float stat(Pixel self, float slope, RampIndex ramp, int index, int diff) cdef float[:] stats(Pixel self, float slope, RampIndex ramp) cdef RampFits fit_ramps(Pixel self, stack[RampIndex] ramps) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index b5968f4d..e376123e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -220,29 +220,23 @@ cdef class Pixel: @cython.boundscheck(False) @cython.wraparound(False) - cdef inline float correction(Pixel self, RampIndex ramp, int index, int diff): + cdef inline float correction(Pixel self, RampIndex ramp, float slope): """ Compute the correction factor for the variance used by a statistic + + - slope / (t_bar[end] - t_bar[start]) Parameters ---------- ramp : RampIndex Struct for start and end indices resultants for the ramp - index : int - The main index for the resultant to compute the statistic for - diff : int - The offset to use for the delta and sigma values, this should be - a value from the Diff enum. + slope : float + The computed slope for the ramp """ - cdef float comp = (self.fixed.t_bar_diffs[diff, index] / - (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start])) - if diff == Diff.single: - return (1 - comp)**2 - elif diff == Diff.double: - return (1 - 0.75 * comp)**2 - else: - raise ValueError("diff must be Diff.single or Diff.double") + cdef float diff = (self.fixed.data.t_bar[ramp.end] - self.fixed.data.t_bar[ramp.start]) + + return - slope / diff @cython.boundscheck(False) @cython.wraparound(False) @@ -250,13 +244,12 @@ cdef class Pixel: cdef inline float stat(Pixel self, float slope, RampIndex ramp, int index, int diff): """ Compute a single set of fit statistics - delta / sqrt(var) + (delta / sqrt(var)) + correction where delta = ((R[j] - R[i]) / (t_bar[j] - t_bar[i]) - slope) * (t_bar[j] - t_bar[i]) var = sigma * (1/N[j] + 1/N[i]) + slope * (tau[j] + tau[i] - min(t_bar[j], t_bar[i])) - * correction(diff) Parameters ---------- @@ -274,13 +267,13 @@ cdef class Pixel: ------- Create a single instance of the stastic for the given parameters """ - cdef float delta = ((self.local_slopes[diff, index] - slope) * - fabs(self.fixed.t_bar_diffs[diff, index])) - cdef float var = (self.var_read_noise[diff, index] + - slope * self.fixed.var_slope_coeffs[diff, index] * - self.correction(ramp, index, diff)) + cdef float delta = (self.local_slopes[diff, index] - slope) + cdef float var = ((self.var_read_noise[diff, index] + + slope * self.fixed.var_slope_coeffs[diff, index]) + / self.fixed.t_bar_diff_sqrs[diff, index]) + cdef float correct = self.correction(ramp, slope) - return delta / sqrt(var) + return (delta / sqrt(var)) + correct @cython.boundscheck(False) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 7cfaf61d..76eca258 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -396,6 +396,7 @@ def test_find_jumps(jump_data): # Check that all the jumps have been located per the algorithm's constraints for index, (fit, jump) in enumerate(zip(fits, jumps)): + print(f"{index=}, {fit['jumps']=}, {jump=}") # sanity check that only one jump should have been added assert np.where(jump)[0].shape == (1,) if index == 0: @@ -419,25 +420,25 @@ def test_find_jumps(jump_data): # Test that all the jumps recorded are +/- 1 of the real jump # This is due to the need to exclude two resultants for jump_index in fit['jumps']: - assert jump[jump_index] or jump[jump_index + 1] or jump[jump_index - 1] + assert jump[jump_index] or jump[jump_index - 1] or jump[jump_index + 1] # Test that the correct indexes are recorded - ramp_indicies = [] + ramp_indices = [] for ramp_index in fit["index"]: # Note start/end of a ramp_index are inclusive meaning that end # is an index included in the ramp_index so the range is to end + 1 - new_indicies = list(range(ramp_index["start"], ramp_index["end"] + 1)) + new_indices = list(range(ramp_index["start"], ramp_index["end"] + 1)) # check that all the ramps are non-overlapping - assert set(ramp_indicies).isdisjoint(new_indicies) + assert set(ramp_indices).isdisjoint(new_indices) - ramp_indicies.extend(new_indicies) + ramp_indices.extend(new_indices) # check that no ramp_index is a jump - assert set(ramp_indicies).isdisjoint(fit['jumps']) + assert set(ramp_indices).isdisjoint(fit['jumps']) # check that all resultant indicies are either in a ramp or listed as a jump - assert set(ramp_indicies).union(fit['jumps']) == set(range(len(read_pattern))) + assert set(ramp_indices).union(fit['jumps']) == set(range(len(read_pattern))) # Check that the slopes have been estimated reasonably well # There are not that many pixels to test this against and many resultants From 7111282ed183f49b58f733e38cde13182fc49f33 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 15:08:37 -0400 Subject: [PATCH 74/90] Remove ols_cas22_util module --- src/stcal/ramp_fitting/ols_cas22_fit.py | 20 +-- src/stcal/ramp_fitting/ols_cas22_util.py | 168 ----------------------- tests/test_ramp_fitting_cas22.py | 62 +++------ 3 files changed, 24 insertions(+), 226 deletions(-) delete mode 100644 src/stcal/ramp_fitting/ols_cas22_util.py diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index c5c33063..afc5b908 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -33,11 +33,6 @@ import numpy as np from . import ols_cas22 -from .ols_cas22_util import ( - ma_table_to_tau, - ma_table_to_tbar, - ma_table_to_read_pattern -) def fit_ramps_casertano( @@ -45,8 +40,7 @@ def fit_ramps_casertano( dq, read_noise, read_time, - ma_table=None, - read_pattern=None, + read_pattern, use_jump=False ): """Fit ramps following Casertano+2022, including averaging partial ramps. @@ -66,10 +60,7 @@ def fit_ramps_casertano( the read noise in electrons read_time : float Read time. For Roman data this is the FRAME_TIME keyword. - ma_table : list[list[int]] or None - The MA table prescription. If None, use `read_pattern`. - One of `ma_table` or `read_pattern` must be defined. - read_pattern : list[list[int]] or None + read_pattern : list[list[int]] The read pattern prescription. If None, use `ma_table`. One of `ma_table` or `read_pattern` must be defined. use_jump : bool @@ -85,13 +76,6 @@ def fit_ramps_casertano( the read noise, Poisson source noise, and total noise. """ - # Get the Multi-accum table, either as given or from the read pattern - if read_pattern is None: - if ma_table is not None: - read_pattern = ma_table_to_read_pattern(ma_table) - if read_pattern is None: - raise RuntimeError('One of `ma_table` or `read_pattern` must be given.') - resultants_unit = getattr(resultants, 'unit', None) if resultants_unit is not None: resultants = resultants.to(u.electron).value diff --git a/src/stcal/ramp_fitting/ols_cas22_util.py b/src/stcal/ramp_fitting/ols_cas22_util.py deleted file mode 100644 index 6f69d4f8..00000000 --- a/src/stcal/ramp_fitting/ols_cas22_util.py +++ /dev/null @@ -1,168 +0,0 @@ -"""Utility routines for Mutli-Accum Ramp Fitting -""" -import numpy as np - -__all__ = [ - 'ma_table_to_read_pattern', - 'ma_table_to_tau', - 'ma_table_to_tbar', - 'read_pattern_to_ma_table'] - - -def ma_table_to_read_pattern(ma_table): - """Convert read patterns to multi-accum lists - - Using Roman terminology, a "read pattern" is a list of resultants. Each element of this list - is a list of reads that were combined, on-board, to create a resultant. An example read pattern is - - [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] - - This pattern has 6 resultants, the first consistent of the first read, the - next consisting of reads 2 and 3, the third consists of read 4, and so on. - - A "Multi-accum table" is a short-hand version of the read pattern. It is a - list of 2-tuples consisting of the following: - - (start_read, n_reads) - - For example, the above read pattern would be represented as, using lists instead of tuples: - - [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] - - The example above, using this function, should perform as follows: - >>> ma_table_to_read_pattern([[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]]) - [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] - - Parameters - ---------- - ma_table : [(first_read, n_reads)[,...]] - The multi-accum table to convert. - - Returns - ------- - read_pattern : [[int[,...]][,...]] - The read pattern that represents the given multi-accum table. - - """ - read_pattern = [list(range(start, start + len)) - for start, len in ma_table] - - return read_pattern - - -def ma_table_to_tau(ma_table, read_time): - """Construct the tau for each resultant from an ma_table. - - .. math:: \\tau = \\overline{t} - (n - 1)(n + 1)\\delta t / 6n - - following Casertano (2022). - - Parameters - ---------- - ma_table : list[list] - List of lists specifying the first read and the number of reads in each - resultant. - - read_time : float - Time to perform a read out. For Roman data, this is FRAME_TIME. - - Returns - ------- - :math:`\\tau` - A time scale appropriate for computing variances. - """ - - meantimes = ma_table_to_tbar(ma_table, read_time) - nreads = np.array([x[1] for x in ma_table]) - return meantimes - (nreads - 1) * (nreads + 1) * read_time / 6 / nreads - - -def ma_table_to_tij(ma_table, read_time): - """Get the times of each read going into resultants for a MA table. - - Currently only ma_table_number = 1 is supported, corresponding to a simple - fiducial high latitude imaging MA table. - - This presently uses a hard-coded, somewhat inflexible MA table description - in the parameters file. But that seems like an okay option given that the - current 'official' file is slated for redesign when the format is relaxed. - - Parameters - ---------- - ma_table : list[list] - A list of (first_read, n_reads) tuples going into resultants. - - read_time : float - The time taken for a read-out. For Roman, this is FRAME_TIME. - - Returns - ------- - list[list[float]] - list of list of readout times for each read entering a resultant - """ - tij = [read_time * np.arange(f, f + n) for (f, n) in ma_table] - return tij - - -def ma_table_to_tbar(ma_table, read_time): - """Construct the mean times for each resultant from an ma_table. - - Parameters - ---------- - ma_table : list[list] - List of lists specifying the first read and the number of reads in each - resultant. - - Returns - ------- - tbar : np.ndarray[n_resultant] (float) - The mean time of the reads of each resultant. - """ - firstreads = np.array([x[0] for x in ma_table]) - nreads = np.array([x[1] for x in ma_table]) - meantimes = read_time * firstreads + read_time * (nreads - 1) / 2 - # at some point I need to think hard about whether the first read has - # slightly less exposure time than all other reads due to the read/reset - # time being slightly less than the read time. - return meantimes - - -def read_pattern_to_ma_table(read_pattern): - """Convert read patterns to multi-accum lists - - Using Roman terminology, a "read pattern" is a list of resultants. Each element of this list - is a list of reads that were combined, on-board, to create a resultant. An example read pattern is - - [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] - - This pattern has 6 resultants, the first consistent of the first read, the - next consisting of reads 2 and 3, the third consists of read 4, and so on. - - A "Multi-accum table" is a short-hand version of the read pattern. It is a - list of 2-tuples consisting of the following: - - (start_read, n_reads) - - For example, the above read pattern would be represented as, using lists instead of tuples: - - [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] - - The example above, using this function, should perform as follows: - >>> read_pattern_to_ma_table([[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]]) - [[1, 1], [2, 2], [4, 1], [5, 4], [9, 2], [11, 1]] - - Parameters - ---------- - read_pattern : [[int[,...]][,...]] - The read pattern to convert. - - Returns - ------- - ma_table : [(first_read, n_reads)[,...]] - The multi-accum table that represents the given read pattern. - - """ - ma_table = [[resultant[0], len(resultant)] - for resultant in read_pattern] - - return ma_table diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 935642e4..9ffe0992 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -5,7 +5,6 @@ import numpy as np from stcal.ramp_fitting import ols_cas22_fit as ramp -from stcal.ramp_fitting import ols_cas22_util # Read Time in seconds # For Roman, the read time of the detectors is a fixed value and is currently @@ -14,44 +13,24 @@ ROMAN_READ_TIME = 3.04 -def test_ma_table_to_read_pattern(): - """Test conversion from read pattern to multi-accum table""" - ma_table = [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] - expected = [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] - - result = ols_cas22_util.ma_table_to_read_pattern(ma_table) - - assert result == expected - - -def test_read_pattern_to_ma_table(): - """Test conversion from read pattern to multi-accum table""" - pattern = [[1], [2, 3], [4], [5, 6, 7, 8], [9, 10], [11]] - expected = [[1, 1], [2, 2], [4, 1], [5, 4], [9,2], [11,1]] - - result = ols_cas22_util.read_pattern_to_ma_table(pattern) - - assert result == expected - - def test_simulated_ramps(): ntrial = 100000 - ma_table, flux, read_noise, resultants = simulate_many_ramps(ntrial=ntrial) + read_pattern, flux, read_noise, resultants = simulate_many_ramps(ntrial=ntrial) dq = np.zeros(resultants.shape, dtype=np.int32) read_noise = np.ones(resultants.shape[1], dtype=np.float32) * read_noise par, var = ramp.fit_ramps_casertano( - resultants, dq, read_noise, ROMAN_READ_TIME, ma_table=ma_table) + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern=read_pattern) chi2dof_slope = np.sum((par[:, 1] - flux)**2 / var[:, 2]) / ntrial assert np.abs(chi2dof_slope - 1) < 0.03 # now let's mark a bunch of the ramps as compromised. bad = np.random.uniform(size=resultants.shape) > 0.7 - dq += bad + dq |= bad par, var = ramp.fit_ramps_casertano( - resultants, dq, read_noise, ROMAN_READ_TIME, ma_table=ma_table) + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern=read_pattern) # only use okay ramps # ramps passing the below criterion have at least two adjacent valid reads # i.e., we can make a measurement from them. @@ -65,7 +44,7 @@ def test_simulated_ramps(): # ######### # Utilities # ######### -def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, ma_table=None): +def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, read_pattern=None): """Simulate many ramps with a particular flux, read noise, and ma_table. To test ramp fitting, it's useful to be able to simulate a large number @@ -79,9 +58,8 @@ def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, ma_table=None): flux in electrons / s read_noise : float read noise in electrons - ma_table : list[list] (int) - list of lists indicating first read and number of reads in each - resultant + read_pattern : list[list] (int) + An optional read pattern Returns ------- @@ -94,18 +72,22 @@ def simulate_many_ramps(ntrial=100, flux=100, readnoise=5, ma_table=None): resultants : np.ndarray[n_resultant, ntrial] (float) simulated resultants """ - if ma_table is None: - ma_table = [[1, 4], [5, 1], [6, 3], [9, 10], [19, 3], [22, 15]] - nread = np.array([x[1] for x in ma_table]) - tij = ols_cas22_util.ma_table_to_tij(ma_table, ROMAN_READ_TIME) - resultants = np.zeros((len(ma_table), ntrial), dtype='f4') + if read_pattern is None: + read_pattern = [[1, 2, 3, 4], + [5], + [6, 7, 8], + [9, 10, 11, 12, 13, 14, 15, 16, 17, 18], + [19, 20, 21], + [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36]] + nread = np.array([len(x) for x in read_pattern]) + resultants = np.zeros((len(read_pattern), ntrial), dtype='f4') buf = np.zeros(ntrial, dtype='i4') - for i, ti in enumerate(tij): + for i, reads in enumerate(read_pattern): subbuf = np.zeros(ntrial, dtype='i4') - for t0 in ti: + for _ in reads: buf += np.random.poisson(ROMAN_READ_TIME * flux, ntrial) subbuf += buf - resultants[i] = (subbuf / len(ti)).astype('f4') - resultants += np.random.randn(len(ma_table), ntrial) * ( - readnoise / np.sqrt(nread)).reshape(len(ma_table), 1) - return (ma_table, flux, readnoise, resultants) + resultants[i] = (subbuf / len(reads)).astype('f4') + resultants += np.random.randn(len(read_pattern), ntrial) * ( + readnoise / np.sqrt(nread)).reshape(len(read_pattern), 1) + return (read_pattern, flux, readnoise, resultants) From a1c554a35e953e5b48f7a4c677386f3d46e4785c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 15:27:48 -0400 Subject: [PATCH 75/90] Clean up test constants --- tests/test_jump_cas22.py | 42 ++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 76eca258..26ea1bf6 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -11,6 +11,7 @@ RNG = np.random.default_rng(619) ROMAN_READ_TIME = 3.04 +READ_NOISE = np.float32(5) N_PIXELS = 100_000 FLUX = 100 JUMP_VALUE = 10_000 @@ -167,7 +168,7 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): assert np.isnan(fixed['var_slope_coeffs']).all() -def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): +def _generate_resultants(read_pattern, n_pixels=1): """Generate a set of resultants for a pixel""" resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) @@ -179,9 +180,9 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): # Compute the next value of the ramp # - Poisson process for the flux # - Gaussian process for the read noise - ramp_value += RNG.poisson(flux * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) + ramp_value += RNG.poisson(FLUX * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) ramp_value += ( - RNG.standard_normal(size=n_pixels, dtype=np.float32)* read_noise / np.sqrt(len(reads)) + RNG.standard_normal(size=n_pixels, dtype=np.float32) * READ_NOISE / np.sqrt(len(reads)) ) # Add to running total for the resultant @@ -199,27 +200,26 @@ def _generate_resultants(read_pattern, flux, read_noise, n_pixels=1): @pytest.fixture(scope="module") def pixel_data(ramp_data): """Create data for a single pixel""" - read_noise = np.float32(5) read_pattern, t_bar, tau, n_reads = ramp_data - resultants = _generate_resultants(read_pattern, FLUX, read_noise) + resultants = _generate_resultants(read_pattern) - yield resultants, t_bar, tau, n_reads, read_noise, FLUX + yield resultants, t_bar, tau, n_reads @pytest.mark.parametrize("use_jump", [True, False]) def test_make_pixel(pixel_data, use_jump): """Test computing the initial pixel data""" - resultants, t_bar, tau, n_reads, read_noise, _ = pixel_data + resultants, t_bar, tau, n_reads = pixel_data intercept = np.float32(5.5) constant = np.float32(1/3) - pixel = make_pixel(resultants, t_bar, tau, n_reads, read_noise, intercept, constant, use_jump) + pixel = make_pixel(resultants, t_bar, tau, n_reads, READ_NOISE, intercept, constant, use_jump) # Basic sanity checks that data passed in survives assert (pixel['resultants'] == resultants).all() - assert read_noise == pixel['read_noise'] + assert READ_NOISE == pixel['read_noise'] # Check the computed data # These are computed via vectorized operations in the main code, here we @@ -230,7 +230,7 @@ def test_make_pixel(pixel_data, use_jump): for index, (local_slope_1, var_read_noise_1) in enumerate(single_gen): assert local_slope_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) - assert var_read_noise_1 == read_noise * ( + assert var_read_noise_1 == READ_NOISE * ( np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) ) @@ -243,7 +243,7 @@ def test_make_pixel(pixel_data, use_jump): assert local_slope_2 == ( (resultants[index + 2] - resultants[index]) / (t_bar[index + 2] - t_bar[index]) ) - assert var_read_noise_2 == read_noise * ( + assert var_read_noise_2 == READ_NOISE * ( np.float32(1 / n_reads[index + 2]) + np.float32(1 / n_reads[index]) ) else: @@ -260,18 +260,18 @@ def detector_data(ramp_data): would be passed in by the supporting code. """ read_pattern, *_ = ramp_data - read_noise = np.ones(N_PIXELS, dtype=np.float32) * 5 + read_noise = np.ones(N_PIXELS, dtype=np.float32) * READ_NOISE - resultants = _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=N_PIXELS) + resultants = _generate_resultants(read_pattern, n_pixels=N_PIXELS) - return resultants, read_noise, read_pattern, N_PIXELS, FLUX + return resultants, read_noise, read_pattern, N_PIXELS @pytest.mark.parametrize("use_jump", [True, False]) def test_fit_ramps_array_outputs(detector_data, use_jump): """ Test that the array outputs line up with the dictionary output """ - resultants, read_noise, read_pattern, n_pixels, flux = detector_data + resultants, read_noise, read_pattern, n_pixels = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) fits, parameters, variances = fit_ramps( @@ -296,7 +296,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): Since no jumps are simulated in the data, jump detection shouldn't pick up any jumps. """ - resultants, read_noise, read_pattern, n_pixels, flux = detector_data + resultants, read_noise, read_pattern, n_pixels = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) @@ -308,7 +308,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump total_var = fit['average']['read_var'] + fit['average']['poisson_var'] - chi2 += (fit['average']['slope'] - flux)**2 / total_var + chi2 += (fit['average']['slope'] - FLUX)**2 / total_var chi2 /= n_pixels @@ -322,7 +322,7 @@ def test_fit_ramps_dq(detector_data, use_jump): Since no jumps are simulated in the data, jump detection shouldn't pick up any jumps. """ - resultants, read_noise, read_pattern, n_pixels, flux = detector_data + resultants, read_noise, read_pattern, n_pixels = detector_data dq = (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) # only use okay ramps @@ -338,7 +338,7 @@ def test_fit_ramps_dq(detector_data, use_jump): if use: # Add okay ramps to chi2 total_var = fit['average']['read_var'] + fit['average']['poisson_var'] - chi2 += (fit['average']['slope'] - flux)**2 / total_var + chi2 += (fit['average']['slope'] - FLUX)**2 / total_var else: # Check no slope fit for bad ramps assert fit['average']['slope'] == 0 @@ -377,10 +377,10 @@ def jump_data(): resultants[:, jump_index] = np.mean(read_values.reshape(shape), axis=1).astype(np.float32) n_pixels = np.prod(shape) - read_noise = np.ones(n_pixels, dtype=np.float32) * 5 + read_noise = np.ones(n_pixels, dtype=np.float32) * READ_NOISE # Add actual ramp data in addition to the jump data - resultants += _generate_resultants(read_pattern, FLUX, read_noise, n_pixels=n_pixels) + resultants += _generate_resultants(read_pattern, n_pixels=n_pixels) return resultants, read_noise, read_pattern, n_pixels, jumps.transpose() From 167a0bfea810c7d49cbfc49b55bb4dbc47e265fc Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 15:51:14 -0400 Subject: [PATCH 76/90] Update the random ramp generator --- tests/test_jump_cas22.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 26ea1bf6..375ac658 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -178,16 +178,17 @@ def _generate_resultants(read_pattern, n_pixels=1): resultant_total = np.zeros(n_pixels, dtype=np.float32) # Total of all reads in this resultant for _ in reads: # Compute the next value of the ramp - # - Poisson process for the flux - # - Gaussian process for the read noise + # Using a Poisson process for the flux ramp_value += RNG.poisson(FLUX * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) - ramp_value += ( - RNG.standard_normal(size=n_pixels, dtype=np.float32) * READ_NOISE / np.sqrt(len(reads)) - ) # Add to running total for the resultant resultant_total += ramp_value + # Add read noise to the resultant + resultant_total += ( + RNG.standard_normal(size=n_pixels, dtype=np.float32) * READ_NOISE / np.sqrt(len(reads)) + ) + # Record the average value for resultant (i.e., the average of the reads) resultants[index] = (resultant_total / len(reads)).astype(np.float32) From ba82b1451c147e8371611a713de128388c83d097 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 17:32:40 -0400 Subject: [PATCH 77/90] Add full jump detection test with ramp fitting --- tests/test_jump_cas22.py | 206 +++++++++++++++++++++++++-------------- 1 file changed, 131 insertions(+), 75 deletions(-) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 375ac658..f5adc45b 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -20,7 +20,16 @@ @pytest.fixture(scope="module") def base_ramp_data(): - """Basic data for simulating ramps for testing (not unpacked)""" + """ + Basic data for simulating ramps for testing (not unpacked) + + Returns + ------- + read_pattern : list[list[int]] + The example read pattern + metadata : dict + The metadata computed from the read pattern + """ read_pattern = [ [1, 2, 3, 4], [5], @@ -100,12 +109,26 @@ def test_threshold(): @pytest.fixture(scope="module") def ramp_data(base_ramp_data): - """Unpacked data for simulating ramps for testing""" - t_bar = np.array(base_ramp_data[1]['t_bar'], dtype=np.float32) - tau = np.array(base_ramp_data[1]['tau'], dtype=np.float32) - n_reads = np.array(base_ramp_data[1]['n_reads'], dtype=np.int32) + """ + Unpacked metadata for simulating ramps for testing + + Returns + ------- + read_pattern: + The read pattern used for testing + t_bar: + The t_bar values for the read pattern + tau: + The tau values for the read pattern + n_reads: + The number of reads for the read pattern + """ + read_pattern, read_pattern_metadata = base_ramp_data + t_bar = np.array(read_pattern_metadata['t_bar'], dtype=np.float32) + tau = np.array(read_pattern_metadata['tau'], dtype=np.float32) + n_reads = np.array(read_pattern_metadata['n_reads'], dtype=np.int32) - yield base_ramp_data[0], t_bar, tau, n_reads + yield read_pattern, t_bar, tau, n_reads @pytest.mark.parametrize("use_jump", [True, False]) @@ -169,7 +192,19 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): def _generate_resultants(read_pattern, n_pixels=1): - """Generate a set of resultants for a pixel""" + """ + Generate a set of resultants for a pixel + + Parameters: + read_pattern : list[list[int]] + The read pattern to use + n_pixels: + The number of pixels to generate resultants for. Default is 1. + + Returns: + resultants + The resultants generated + """ resultants = np.zeros((len(read_pattern), n_pixels), dtype=np.float32) # Use Poisson process to simulate the accumulation of the ramp @@ -200,7 +235,19 @@ def _generate_resultants(read_pattern, n_pixels=1): @pytest.fixture(scope="module") def pixel_data(ramp_data): - """Create data for a single pixel""" + """ + Create data for a single pixel + + Returns: + resultants + Resultants for a single pixel + t_bar: + The t_bar values for the read pattern used for the resultants + tau: + The tau values for the read pattern used for the resultants + n_reads: + The number of reads for the read pattern used for the resultants + """ read_pattern, t_bar, tau, n_reads = ramp_data resultants = _generate_resultants(read_pattern) @@ -259,20 +306,29 @@ def detector_data(ramp_data): """ Generate a set of with no jumps data as if for a single detector as it would be passed in by the supporting code. + + Returns: + resultants + The resultants for a large number of pixels + read_noise: + The read noise vector for those pixels + read_pattern: + The read pattern used for the resultants + """ read_pattern, *_ = ramp_data read_noise = np.ones(N_PIXELS, dtype=np.float32) * READ_NOISE resultants = _generate_resultants(read_pattern, n_pixels=N_PIXELS) - return resultants, read_noise, read_pattern, N_PIXELS + return resultants, read_noise, read_pattern @pytest.mark.parametrize("use_jump", [True, False]) def test_fit_ramps_array_outputs(detector_data, use_jump): """ Test that the array outputs line up with the dictionary output """ - resultants, read_noise, read_pattern, n_pixels = detector_data + resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) fits, parameters, variances = fit_ramps( @@ -297,11 +353,11 @@ def test_fit_ramps_no_dq(detector_data, use_jump): Since no jumps are simulated in the data, jump detection shouldn't pick up any jumps. """ - resultants, read_noise, read_pattern, n_pixels = detector_data + resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) - assert len(fits) == n_pixels # sanity check that a fit is output for each pixel + assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel # Check that the chi2 for the resulting fit relative to the assumed flux is ~1 chi2 = 0 @@ -311,7 +367,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): total_var = fit['average']['read_var'] + fit['average']['poisson_var'] chi2 += (fit['average']['slope'] - FLUX)**2 / total_var - chi2 /= n_pixels + chi2 /= N_PIXELS assert np.abs(chi2 - 1) < CHI2_TOL @@ -323,7 +379,7 @@ def test_fit_ramps_dq(detector_data, use_jump): Since no jumps are simulated in the data, jump detection shouldn't pick up any jumps. """ - resultants, read_noise, read_pattern, n_pixels = detector_data + resultants, read_noise, read_pattern = detector_data dq = (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) # only use okay ramps @@ -332,7 +388,7 @@ def test_fit_ramps_dq(detector_data, use_jump): okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) - assert len(fits) == n_pixels # sanity check that a fit is output for each pixel + assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel chi2 = 0 for fit, use in zip(fits, okay): @@ -351,81 +407,82 @@ def test_fit_ramps_dq(detector_data, use_jump): @pytest.fixture(scope="module") -def jump_data(): +def jump_data(detector_data): """ - Generate a set of data were jumps are simulated in each possible read. - - jumps should occur in read of same index as the pixel index. + Generate resultants with single jumps in them for testing jump detection. + Note this specifically checks that we can detect jumps in any read, meaning + it has an insurance check that a jump has been placed in every single + read position. """ + resultants, read_noise, read_pattern = detector_data + + # Choose read to place a single jump in for each pixel + num_reads = read_pattern[-1][-1] + jump_reads = RNG.integers(num_reads - 1, size=N_PIXELS) + + # This shows that a jump as been placed in every single possible + # read position. Technically, this check can fail; however, + # N_PIXELS >> num_reads so it is very unlikely in practice since + # all reads are equally likely to be chosen for a jump. + # It is a good check that we can detect a jump occurring in any read except + # the first read. + assert set(jump_reads) == set(range(num_reads - 1)) + + # Fill out jump reads with jump values + jump_flux = np.zeros((num_reads, N_PIXELS), dtype=np.float32) + for index, jump in enumerate(jump_reads): + jump_flux[jump:, index] = JUMP_VALUE + + # Average the reads into the resultants + jump_resultants = np.zeros(N_PIXELS, dtype=np.int32) + for index, reads in enumerate(read_pattern): + indices = np.array(reads) - 1 + resultants[index, :] += np.mean(jump_flux[indices, :], axis=0) + for read in reads: + jump_resultants[np.where(jump_reads == read)] = index - # Generate a read pattern with 8 reads per resultant - shape = (8, 8) - read_pattern = np.arange(np.prod(shape)).reshape(shape).tolist() - - resultants = np.zeros((len(read_pattern), np.prod(shape)), dtype=np.float32) - jumps = np.zeros((len(read_pattern), np.prod(shape)), dtype=bool) - jump_res = -1 - for jump_index in range(np.prod(shape)): - read_values = np.zeros(np.prod(shape), dtype=np.float32) - for index in range(np.prod(shape)): - if index >= jump_index: - read_values[index] = JUMP_VALUE - - if jump_index % shape[1] == 0: - # Start indicating a new resultant - jump_res += 1 - jumps[jump_res, jump_index] = True - - resultants[:, jump_index] = np.mean(read_values.reshape(shape), axis=1).astype(np.float32) - - n_pixels = np.prod(shape) - read_noise = np.ones(n_pixels, dtype=np.float32) * READ_NOISE - - # Add actual ramp data in addition to the jump data - resultants += _generate_resultants(read_pattern, n_pixels=n_pixels) - - return resultants, read_noise, read_pattern, n_pixels, jumps.transpose() + return resultants, read_noise, read_pattern, jump_reads, jump_resultants def test_find_jumps(jump_data): """ - Check that we can locate all the jumps in a given ramp + Full unit tests to demonstrate that we can detect jumps in any read (except + the first one) and that we correctly remove these reads from the fit to recover + the correct FLUX/slope. """ - resultants, read_noise, read_pattern, n_pixels, jumps = jump_data + resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + assert len(fits) == len(jump_reads) # sanity check that a fit/jump is set for every pixel + + chi2 = 0 + for fit, jump_index, resultant_index in zip(fits, jump_reads, jump_resultants): - # Check that all the jumps have been located per the algorithm's constraints - for index, (fit, jump) in enumerate(zip(fits, jumps)): - print(f"{index=}, {fit['jumps']=}, {jump=}") - # sanity check that only one jump should have been added - assert np.where(jump)[0].shape == (1,) - if index == 0: + # Check that the jumps are detected correctly + if jump_index == 0: # There is no way to detect a jump if it is in the very first read # The very first pixel in this case has a jump in the first read assert len(fit['jumps']) == 0 - assert jump[0] # sanity check that the jump is in the first resultant still - assert not np.all(jump[1:]) + assert resultant_index == 0 # sanity check that the jump is indeed in the first resultant - # Test that the correct index was recorded + # Test the correct ramp_index was recorded: assert len(fit['index']) == 1 assert fit['index'][0]['start'] == 0 assert fit['index'][0]['end'] == len(read_pattern) - 1 else: - # Select the single jump and check that it is recorded as a jump - assert np.where(jump)[0][0] in fit['jumps'] - - # In all cases here we have to exclude two resultants + # There should be a single jump detected; however, this results in + # two resultants being excluded. assert len(fit['jumps']) == 2 + assert resultant_index in fit['jumps'] - # Test that all the jumps recorded are +/- 1 of the real jump - # This is due to the need to exclude two resultants - for jump_index in fit['jumps']: - assert jump[jump_index] or jump[jump_index - 1] or jump[jump_index + 1] + # The two resultants excluded should be adjacent + for jump in fit['jumps']: + assert jump == resultant_index or jump == resultant_index - 1 or jump == resultant_index + 1 - # Test that the correct indexes are recorded + # Test the correct ramp indexes are recorded ramp_indices = [] - for ramp_index in fit["index"]: + for ramp_index in fit['index']: # Note start/end of a ramp_index are inclusive meaning that end # is an index included in the ramp_index so the range is to end + 1 new_indices = list(range(ramp_index["start"], ramp_index["end"] + 1)) @@ -438,14 +495,13 @@ def test_find_jumps(jump_data): # check that no ramp_index is a jump assert set(ramp_indices).isdisjoint(fit['jumps']) - # check that all resultant indicies are either in a ramp or listed as a jump + # check that all resultant indices are either in a ramp or listed as a jump assert set(ramp_indices).union(fit['jumps']) == set(range(len(read_pattern))) - # Check that the slopes have been estimated reasonably well - # There are not that many pixels to test this against and many resultants - # have been thrown out due to the jumps. Thus we only check the slope is - # "fairly close" to the expected value. This is purposely a loose check - # because the main purpose of this test is to verify that the jumps are - # being detected correctly, above. - for fit in fits: - assert_allclose(fit['average']['slope'], FLUX, rtol=3) + # Compute the chi2 for the fit and add it to a running "total chi2" + total_var = fit['average']['read_var'] + fit['average']['poisson_var'] + chi2 += (fit['average']['slope'] - FLUX)**2 / total_var + + # Check that the average chi2 is ~1. + chi2 /= N_PIXELS + assert np.abs(chi2 - 1) < CHI2_TOL From 39eabb55010763595223c9e4e7b588fc7e5c6b61 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 17:38:32 -0400 Subject: [PATCH 78/90] Remove pure python implementation of cas22 ramp fitting --- src/stcal/ramp_fitting/ols_cas22_fit.py | 96 ------------------------- tests/test_jump_cas22.py | 22 ++++-- 2 files changed, 17 insertions(+), 101 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index afc5b908..bf496356 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -110,99 +110,3 @@ def fit_ramps_casertano( parameters = parameters * resultants_unit return parameters, variances - - -def fit_ramps_casertano_no_dq(resultants, read_noise, ma_table): - """Fit ramps following Casertano+2022, only using full ramps. - - This is a simpler implementation of fit_ramps_casertano, which doesn't - address the case of partial ramps broken by CRs. This case is easier - and can be done reasonably efficiently in pure python; results can be - compared with fit_ramps_casertano in for the case of unbroken ramps. - - Parameters - ---------- - resultants : np.ndarry[nresultants, npixel] - the resultants in electrons - read noise: float - the read noise in electrons - ma_table : list[list[int]] - the ma table prescription - - Returns - ------- - par : np.ndarray[nx, ny, 2] (float) - the best fit pedestal and slope for each pixel - var : np.ndarray[nx, ny, 3, 2, 2] (float) - the covariance matrix of par, for each of three noise terms: - the read noise, Poisson source noise, and total noise. - """ - nadd = len(resultants.shape) - 1 - if np.ndim(read_noise) <= 1: - read_noise = np.array(read_noise).reshape((1,) * nadd) - smax = resultants[-1] - s = smax / np.sqrt(read_noise**2 + smax) # Casertano+2022 Eq. 44 - ptable = np.array([ # Casertano+2022, Table 2 - [-np.inf, 0], [5, 0.4], [10, 1], [20, 3], [50, 6], [100, 10]]) - pp = ptable[np.searchsorted(ptable[:, 0], s) - 1, 1] - nn = np.array([x[1] for x in ma_table]) # number of reads in each resultant - tbar = ma_table_to_tbar(ma_table) - tau = ma_table_to_tau(ma_table) - tbarmid = (tbar[0] + tbar[-1]) / 2 - if nadd > 0: - newshape = ((-1,) + (1,) * nadd) - nn = nn.reshape(*newshape) - tbar = tbar.reshape(*newshape) - tau = tau.reshape(*newshape) - tbarmid = tbarmid.reshape(*newshape) - ww = ( # Casertano+22, Eq. 45 - (1 + pp)[None, ...] * nn - / (1 + pp[None, ...] * nn) - * np.abs(tbar - tbarmid) ** pp[None, ...]) - - # Casertano+22 Eq. 35 - f0 = np.sum(ww, axis=0) - f1 = np.sum(ww * tbar, axis=0) - f2 = np.sum(ww * tbar**2, axis=0) - # Casertano+22 Eq. 36 - dd = f2 * f0 - f1 ** 2 - bad = dd == 0 - dd[bad] = 1 - # Casertano+22 Eq. 37 - kk = (f0[None, ...] * tbar - f1[None, ...]) * ww / ( - dd[None, ...]) - # shape: [n_resultant, ny, nx] - ff = np.sum(kk * resultants, axis=0) # Casertano+22 Eq. 38 - # Casertano+22 Eq. 39 - vr = np.sum(kk**2 / nn, axis=0) * read_noise**2 - # Casertano+22 Eq. 40 - vs1 = np.sum(kk**2 * tau, axis=0) - vs2inner = np.cumsum(kk * tbar, axis=0) - vs2inner = np.concatenate([0 * vs2inner[0][None, ...], vs2inner[:-1, ...]], axis=0) - vs2 = 2 * np.sum(vs2inner * kk, axis=0) - # sum_{i=1}^{j-1} K_i \bar{t}_i - # this is the inner of the two sums in the 2nd term of Eq. 40 - # Casertano+22 has some discussion of whether it's more efficient to do - # this as an explicit double sum or to construct the inner sum separately. - # We've made a lot of other quantities that are [nr, ny, nx] in size, - # so I don't feel bad about making another. Clearly a memory optimized - # code would work a lot harder to reuse a lot of variables above! - - vs = (vs1 + vs2) * ff - vs = np.clip(vs, 0, np.inf) - # we can estimate negative flux, but we really shouldn't add variance for - # that case! - - # match return values from RampFitInterpolator.fit_ramps - # we haven't explicitly calculated here the pedestal, its - # uncertainty, or covariance terms. We just fill - # with zeros. - - par = np.zeros(ff.shape + (2,), dtype='f4') - var = np.zeros(ff.shape + (3, 2, 2), dtype='f4') - par[..., 1] = ff - var[..., 0, 1, 1] = vr - var[..., 1, 1, 1] = vs - var[..., 2, 1, 1] = vr + vs - - return par, var diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index f5adc45b..a7acf010 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -118,7 +118,7 @@ def ramp_data(base_ramp_data): The read pattern used for testing t_bar: The t_bar values for the read pattern - tau: + tau: The tau values for the read pattern n_reads: The number of reads for the read pattern @@ -243,7 +243,7 @@ def pixel_data(ramp_data): Resultants for a single pixel t_bar: The t_bar values for the read pattern used for the resultants - tau: + tau: The tau values for the read pattern used for the resultants n_reads: The number of reads for the read pattern used for the resultants @@ -277,7 +277,8 @@ def test_make_pixel(pixel_data, use_jump): double_gen = zip(pixel['local_slopes'][Diff.double], pixel['var_read_noise'][Diff.double]) for index, (local_slope_1, var_read_noise_1) in enumerate(single_gen): - assert local_slope_1 == (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index]) + assert local_slope_1 == ( + (resultants[index + 1] - resultants[index]) / (t_bar[index + 1] - t_bar[index])) assert var_read_noise_1 == READ_NOISE * ( np.float32(1 / n_reads[index + 1]) + np.float32(1 / n_reads[index]) ) @@ -314,7 +315,6 @@ def detector_data(ramp_data): The read noise vector for those pixels read_pattern: The read pattern used for the resultants - """ read_pattern, *_ = ramp_data read_noise = np.ones(N_PIXELS, dtype=np.float32) * READ_NOISE @@ -367,7 +367,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): total_var = fit['average']['read_var'] + fit['average']['poisson_var'] chi2 += (fit['average']['slope'] - FLUX)**2 / total_var - chi2 /= N_PIXELS + chi2 /= N_PIXELS assert np.abs(chi2 - 1) < CHI2_TOL @@ -413,6 +413,18 @@ def jump_data(detector_data): Note this specifically checks that we can detect jumps in any read, meaning it has an insurance check that a jump has been placed in every single read position. + + Returns: + resultants + The resultants for a large number of pixels + read_noise: + The read noise vector for those pixels + read_pattern: + The read pattern used for the resultants + jump_reads: + Index of read where a jump occurs for each pixel + jump_resultants: + Index of resultant where a jump occurs for each pixel """ resultants, read_noise, read_pattern = detector_data From 8e1b70c65b86731a58dd4d2284b8b00341f7c3f3 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 20:24:07 -0400 Subject: [PATCH 79/90] Add ability to override jump detection parameters from defaults --- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 21 ++++++++++++++---- src/stcal/ramp_fitting/ols_cas22_fit.py | 22 +++++++++++++++++-- tests/test_jump_cas22.py | 15 +++++++++++++ tests/test_ramp_fitting_cas22.py | 11 ++++++++-- 4 files changed, 61 insertions(+), 8 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 3b8b1a38..32031f7f 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -6,12 +6,19 @@ from libcpp.list cimport list as cpp_list from libcpp.deque cimport deque cimport cython -from stcal.ramp_fitting.ols_cas22._core cimport ( - RampFits, RampIndex, Thresh, metadata_from_read_pattern, init_ramps, Parameter, Variance) +from stcal.ramp_fitting.ols_cas22._core cimport (RampFits, RampIndex, Thresh, + metadata_from_read_pattern, init_ramps, + Parameter, Variance) from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata, FixedValues from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel +# Fix the default Threshold values at compile time these values cannot be overridden +# dynamically at runtime. +DEF DefaultIntercept = 5.5 +DEF DefaultConstant = 1/3.0 + + @cython.boundscheck(False) @cython.wraparound(False) def fit_ramps(np.ndarray[float, ndim=2] resultants, @@ -19,7 +26,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, np.ndarray[float, ndim=1] read_noise, float read_time, list[list[int]] read_pattern, - bool use_jump=False): + bool use_jump=False, + float intercept=DefaultIntercept, + float constant=DefaultConstant): """Fit ramps using the Casertano+22 algorithm. This implementation fits all ramp segments between bad pixels @@ -43,6 +52,10 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, use_jump : bool If True, use the jump detection algorithm to identify CRs. If False, use the DQ array to identify CRs. + intercept : float + The intercept value for the threshold function. Default=5.5 + constant : float + The constant value for the threshold function. Default=1/3.0 Returns ------- @@ -58,7 +71,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, # Pre-compute data for all pixels cdef FixedValues fixed = fixed_values_from_metadata(metadata_from_read_pattern(read_pattern, read_time), - Thresh(5.5, 1/3.0), + Thresh(intercept, constant), use_jump) # Compute all the initial sets of ramps diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index bf496356..df3a8b4e 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -41,7 +41,10 @@ def fit_ramps_casertano( read_noise, read_time, read_pattern, - use_jump=False + use_jump=False, + *, + threshold_intercept=None, + threshold_constant=None, ): """Fit ramps following Casertano+2022, including averaging partial ramps. @@ -66,6 +69,12 @@ def fit_ramps_casertano( use_jump : bool If True, use the jump detection algorithm to identify CRs. If False, use the DQ array to identify CRs. + threshold_intercept : float (optional, keyword-only) + Override the intercept parameter for threshold for the jump detection + algorithm. + theshold_constant : float (optional, keyword-only) + Override the constant parameter for threshold for the jump detection + algorithm. Returns ------- @@ -76,6 +85,14 @@ def fit_ramps_casertano( the read noise, Poisson source noise, and total noise. """ + # Trickery to avoid having to specify the defaults for the threshold + # parameters outside the cython code. + kwargs = {} + if threshold_intercept is not None: + kwargs['intercept'] = threshold_intercept + if threshold_constant is not None: + kwargs['constant'] = threshold_constant + resultants_unit = getattr(resultants, 'unit', None) if resultants_unit is not None: resultants = resultants.to(u.electron).value @@ -100,7 +117,8 @@ def fit_ramps_casertano( read_noise.reshape(-1), read_time, read_pattern, - use_jump) + use_jump, + **kwargs) if resultants.shape != orig_shape: parameters = parameters[0] diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index a7acf010..9c10c4c3 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -517,3 +517,18 @@ def test_find_jumps(jump_data): # Check that the average chi2 is ~1. chi2 /= N_PIXELS assert np.abs(chi2 - 1) < CHI2_TOL + + +def test_override_default_threshold(jump_data): + """This tests that we can override the default jump detection threshold constants""" + resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data + dq = np.zeros(resultants.shape, dtype=np.int32) + + _, standard, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + _, override, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True, + intercept=0, constant=0) + + # All this is intended to do is show that with all other things being equal passing non-default + # threshold parameters changes the results. + assert (standard != override).any() + diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 9ffe0992..0ed481bd 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -21,7 +21,7 @@ def test_simulated_ramps(): read_noise = np.ones(resultants.shape[1], dtype=np.float32) * read_noise par, var = ramp.fit_ramps_casertano( - resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern=read_pattern) + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern) chi2dof_slope = np.sum((par[:, 1] - flux)**2 / var[:, 2]) / ntrial assert np.abs(chi2dof_slope - 1) < 0.03 @@ -30,7 +30,14 @@ def test_simulated_ramps(): bad = np.random.uniform(size=resultants.shape) > 0.7 dq |= bad par, var = ramp.fit_ramps_casertano( - resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern=read_pattern) + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, + threshold_constant=0, threshold_intercept=0) # set the threshold parameters + # to demo the interface. This + # will raise an error if + # the interface changes, but + # does not effect the computation + # since jump detection is off in + # this case. # only use okay ramps # ramps passing the below criterion have at least two adjacent valid reads # i.e., we can make a measurement from them. From 208fef771be51901b6a80c8d8618a12b7575c09c Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Thu, 5 Oct 2023 20:52:46 -0400 Subject: [PATCH 80/90] Add ability to get output dq array marking jump resultants --- src/stcal/ramp_fitting/ols_cas22/__init__.py | 4 +-- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 4 +++ .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 10 ++++-- src/stcal/ramp_fitting/ols_cas22_fit.py | 2 +- tests/test_jump_cas22.py | 31 ++++++++++++++----- 5 files changed, 39 insertions(+), 12 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py index 4a5480d5..8b2a69ab 100644 --- a/src/stcal/ramp_fitting/ols_cas22/__init__.py +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -1,4 +1,4 @@ from ._fit_ramps import fit_ramps -from ._core import Parameter, Variance, Diff +from ._core import Parameter, Variance, Diff, RampJumpDQ -__all__ = ['fit_ramps', 'Parameter', 'Variance', 'Diff'] +__all__ = ['fit_ramps', 'Parameter', 'Variance', 'Diff', 'RampJumpDQ'] diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 8b5494e3..9f14c757 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -48,6 +48,10 @@ cpdef enum Variance: total_var = 2 +cpdef enum RampJumpDQ: + JUMP_DET = 4 + + cdef float threshold(Thresh thresh, float slope) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 32031f7f..65f40d90 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -8,7 +8,7 @@ cimport cython from stcal.ramp_fitting.ols_cas22._core cimport (RampFits, RampIndex, Thresh, metadata_from_read_pattern, init_ramps, - Parameter, Variance) + Parameter, Variance, RampJumpDQ) from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata, FixedValues from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel @@ -85,6 +85,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef np.ndarray[float, ndim=2] parameters = np.zeros((n_pixels, 2), dtype=np.float32) cdef np.ndarray[float, ndim=2] variances = np.zeros((n_pixels, 3), dtype=np.float32) + # Copy the dq array so we can modify it without fear + cdef np.ndarray[int, ndim=2] fit_dq = dq.copy() + # Perform all of the fits cdef RampFits fit cdef int index @@ -99,6 +102,9 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, variances[index, Variance.poisson_var] = fit.average.poisson_var variances[index, Variance.total_var] = fit.average.read_var + fit.average.poisson_var + for jump in fit.jumps: + fit_dq[jump, index] = RampJumpDQ.JUMP_DET + ramp_fits.push_back(fit) - return ramp_fits, parameters, variances + return ramp_fits, parameters, variances, fit_dq diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index df3a8b4e..94d38242 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -111,7 +111,7 @@ def fit_ramps_casertano( dq = dq.reshape(orig_shape + (1,)) read_noise = read_noise.reshape(orig_shape[1:] + (1,)) - _, parameters, variances = ols_cas22.fit_ramps( + _, parameters, variances, _ = ols_cas22.fit_ramps( resultants.reshape(resultants.shape[0], -1), dq.reshape(resultants.shape[0], -1), read_noise.reshape(-1), diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 9c10c4c3..cf34a989 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -6,7 +6,7 @@ from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, fixed_values_from_metadata, make_pixel -from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff +from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff, RampJumpDQ RNG = np.random.default_rng(619) @@ -331,7 +331,7 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, parameters, variances = fit_ramps( + fits, parameters, variances, _ = fit_ramps( resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump ) @@ -356,7 +356,7 @@ def test_fit_ramps_no_dq(detector_data, use_jump): resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel # Check that the chi2 for the resulting fit relative to the assumed flux is ~1 @@ -387,7 +387,7 @@ def test_fit_ramps_dq(detector_data, use_jump): # i.e., we can make a measurement from them. okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 - fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel chi2 = 0 @@ -465,7 +465,7 @@ def test_find_jumps(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, _, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) assert len(fits) == len(jump_reads) # sanity check that a fit/jump is set for every pixel chi2 = 0 @@ -524,11 +524,28 @@ def test_override_default_threshold(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - _, standard, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - _, override, _ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True, + _, standard, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + _, override, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True, intercept=0, constant=0) # All this is intended to do is show that with all other things being equal passing non-default # threshold parameters changes the results. assert (standard != override).any() + + +def test_jump_dq_set(jump_data): + # Check the DQ flag value to start + assert RampJumpDQ.JUMP_DET == 2**2 + + resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data + dq = np.zeros(resultants.shape, dtype=np.int32) + + fits, *_, fit_dq = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + + for fit, pixel_dq in zip(fits, fit_dq.transpose()): + # Check that all jumps found get marked + assert (pixel_dq[fit['jumps']] == RampJumpDQ.JUMP_DET).all() + + # Check that dq flags for jumps are only set if the jump is marked + assert set(np.where(pixel_dq == RampJumpDQ.JUMP_DET)[0]) == set(fit['jumps']) From f7d25f535351cdba9ae97fbdc626acd8fe40a4d9 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 6 Oct 2023 10:27:06 -0400 Subject: [PATCH 81/90] Clean up essentially duplicated tests --- tests/test_jump_cas22.py | 92 ++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 51 deletions(-) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index cf34a989..f372179a 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -323,75 +323,40 @@ def detector_data(ramp_data): return resultants, read_noise, read_pattern -@pytest.mark.parametrize("use_jump", [True, False]) -def test_fit_ramps_array_outputs(detector_data, use_jump): - """ - Test that the array outputs line up with the dictionary output - """ - resultants, read_noise, read_pattern = detector_data - dq = np.zeros(resultants.shape, dtype=np.int32) - - fits, parameters, variances, _ = fit_ramps( - resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump - ) - - for fit, par, var in zip(fits, parameters, variances): - assert par[Parameter.intercept] == 0 - assert par[Parameter.slope] == fit['average']['slope'] - - assert var[Variance.read_var] == fit['average']['read_var'] - assert var[Variance.poisson_var] == fit['average']['poisson_var'] - assert var[Variance.total_var] == np.float32( - fit['average']['read_var'] + fit['average']['poisson_var'] - ) - - -@pytest.mark.parametrize("use_jump", [True, False]) -def test_fit_ramps_no_dq(detector_data, use_jump): - """ - Test fitting ramps with no dq flags set on data which has no jumps - Since no jumps are simulated in the data, jump detection shouldn't pick - up any jumps. - """ - resultants, read_noise, read_pattern = detector_data - dq = np.zeros(resultants.shape, dtype=np.int32) - - fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) - assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel - - # Check that the chi2 for the resulting fit relative to the assumed flux is ~1 - chi2 = 0 - for fit in fits: - assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump - - total_var = fit['average']['read_var'] + fit['average']['poisson_var'] - chi2 += (fit['average']['slope'] - FLUX)**2 / total_var - - chi2 /= N_PIXELS - - assert np.abs(chi2 - 1) < CHI2_TOL - @pytest.mark.parametrize("use_jump", [True, False]) -def test_fit_ramps_dq(detector_data, use_jump): +@pytest.mark.parametrize("use_dq", [True, False]) +def test_fit_ramps(detector_data, use_jump, use_dq): """ - Test fitting ramps with dq flags set + Test fitting ramps Since no jumps are simulated in the data, jump detection shouldn't pick up any jumps. """ resultants, read_noise, read_pattern = detector_data - dq = (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) + dq = ( + (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) if use_dq else + np.zeros(resultants.shape, dtype=np.int32) + ) # only use okay ramps # ramps passing the below criterion have at least two adjacent valid reads # i.e., we can make a measurement from them. okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 + assert okay.dtype == bool + + # Note that for use_dq = False, okay == True for all ramps, so we perform + # a sanity check that the above criterion is correct + if not use_dq: + assert okay.all() fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel chi2 = 0 for fit, use in zip(fits, okay): + if not use_dq: + assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump in this case + if use: # Add okay ramps to chi2 total_var = fit['average']['read_var'] + fit['average']['poisson_var'] @@ -402,10 +367,35 @@ def test_fit_ramps_dq(detector_data, use_jump): assert fit['average']['read_var'] == 0 assert fit['average']['poisson_var'] == 0 + assert use_dq # sanity check that this branch is only encountered when use_dq = True + chi2 /= np.sum(okay) assert np.abs(chi2 - 1) < CHI2_TOL +@pytest.mark.parametrize("use_jump", [True, False]) +def test_fit_ramps_array_outputs(detector_data, use_jump): + """ + Test that the array outputs line up with the dictionary output + """ + resultants, read_noise, read_pattern = detector_data + dq = np.zeros(resultants.shape, dtype=np.int32) + + fits, parameters, variances, _ = fit_ramps( + resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump + ) + + for fit, par, var in zip(fits, parameters, variances): + assert par[Parameter.intercept] == 0 + assert par[Parameter.slope] == fit['average']['slope'] + + assert var[Variance.read_var] == fit['average']['read_var'] + assert var[Variance.poisson_var] == fit['average']['poisson_var'] + assert var[Variance.total_var] == np.float32( + fit['average']['read_var'] + fit['average']['poisson_var'] + ) + + @pytest.fixture(scope="module") def jump_data(detector_data): """ From f9ca47f9d9017982e4f27a98f50c6f95a19f6190 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 6 Oct 2023 10:51:32 -0400 Subject: [PATCH 82/90] Clean up ramp_fit outputs so they are named tuples --- src/stcal/ramp_fitting/ols_cas22/__init__.py | 4 +-- .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 21 +++++++++++- src/stcal/ramp_fitting/ols_cas22_fit.py | 12 ++++--- tests/test_jump_cas22.py | 31 ++++++++--------- tests/test_ramp_fitting_cas22.py | 33 +++++++++++++++---- 5 files changed, 69 insertions(+), 32 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/__init__.py b/src/stcal/ramp_fitting/ols_cas22/__init__.py index 8b2a69ab..d07f4ddf 100644 --- a/src/stcal/ramp_fitting/ols_cas22/__init__.py +++ b/src/stcal/ramp_fitting/ols_cas22/__init__.py @@ -1,4 +1,4 @@ -from ._fit_ramps import fit_ramps +from ._fit_ramps import fit_ramps, RampFitOutputs from ._core import Parameter, Variance, Diff, RampJumpDQ -__all__ = ['fit_ramps', 'Parameter', 'Variance', 'Diff', 'RampJumpDQ'] +__all__ = ['fit_ramps', 'RampFitOutputs', 'Parameter', 'Variance', 'Diff', 'RampJumpDQ'] diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 65f40d90..8f0d132e 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -12,12 +12,31 @@ from stcal.ramp_fitting.ols_cas22._core cimport (RampFits, RampIndex, Thresh, from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata, FixedValues from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel +from typing import NamedTuple + # Fix the default Threshold values at compile time these values cannot be overridden # dynamically at runtime. DEF DefaultIntercept = 5.5 DEF DefaultConstant = 1/3.0 +class RampFitOutputs(NamedTuple): + """ + Simple tuple wrapper for outputs from the ramp fitting algorithm + This clarifies the meaning of the outputs via naming them something + descriptive. + """ + + fits: list + parameters: np.ndarray + variances: np.ndarray + dq: np.ndarray + # def __init__(self, fits, parameters, variances, dq): + # self.fits = fits + # self.parameters = parameters + # self.variances = variances + # self.dq = dq + @cython.boundscheck(False) @cython.wraparound(False) @@ -107,4 +126,4 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, ramp_fits.push_back(fit) - return ramp_fits, parameters, variances, fit_dq + return RampFitOutputs(ramp_fits, parameters, variances, fit_dq) diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 94d38242..945797e8 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -111,7 +111,7 @@ def fit_ramps_casertano( dq = dq.reshape(orig_shape + (1,)) read_noise = read_noise.reshape(orig_shape[1:] + (1,)) - _, parameters, variances, _ = ols_cas22.fit_ramps( + output = ols_cas22.fit_ramps( resultants.reshape(resultants.shape[0], -1), dq.reshape(resultants.shape[0], -1), read_noise.reshape(-1), @@ -120,11 +120,13 @@ def fit_ramps_casertano( use_jump, **kwargs) + parameters = output.parameters + variances = output.variances if resultants.shape != orig_shape: - parameters = parameters[0] - variances = variances[0] + parameters = output.parameters[0] + variances = output.variances[0] if resultants_unit is not None: - parameters = parameters * resultants_unit + parameters = output.parameters * resultants_unit - return parameters, variances + return ols_cas22.RampFitOutputs(output.fits, parameters, variances, output.dq) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index f372179a..70c699da 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -349,11 +349,11 @@ def test_fit_ramps(detector_data, use_jump, use_dq): if not use_dq: assert okay.all() - fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) - assert len(fits) == N_PIXELS # sanity check that a fit is output for each pixel + output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + assert len(output.fits) == N_PIXELS # sanity check that a fit is output for each pixel chi2 = 0 - for fit, use in zip(fits, okay): + for fit, use in zip(output.fits, okay): if not use_dq: assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump in this case @@ -381,11 +381,9 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, parameters, variances, _ = fit_ramps( - resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump - ) + output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) - for fit, par, var in zip(fits, parameters, variances): + for fit, par, var in zip(output.fits, output.parameters, output.variances): assert par[Parameter.intercept] == 0 assert par[Parameter.slope] == fit['average']['slope'] @@ -455,11 +453,11 @@ def test_find_jumps(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - assert len(fits) == len(jump_reads) # sanity check that a fit/jump is set for every pixel + output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + assert len(output.fits) == len(jump_reads) # sanity check that a fit/jump is set for every pixel chi2 = 0 - for fit, jump_index, resultant_index in zip(fits, jump_reads, jump_resultants): + for fit, jump_index, resultant_index in zip(output.fits, jump_reads, jump_resultants): # Check that the jumps are detected correctly if jump_index == 0: @@ -514,13 +512,13 @@ def test_override_default_threshold(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - _, standard, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - _, override, *_ = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True, - intercept=0, constant=0) + standard = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + override = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True, + intercept=0, constant=0) # All this is intended to do is show that with all other things being equal passing non-default # threshold parameters changes the results. - assert (standard != override).any() + assert (standard.parameters != override.parameters).any() def test_jump_dq_set(jump_data): @@ -530,12 +528,11 @@ def test_jump_dq_set(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - fits, *_, fit_dq = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - for fit, pixel_dq in zip(fits, fit_dq.transpose()): + for fit, pixel_dq in zip(output.fits, output.dq.transpose()): # Check that all jumps found get marked assert (pixel_dq[fit['jumps']] == RampJumpDQ.JUMP_DET).all() # Check that dq flags for jumps are only set if the jump is marked assert set(np.where(pixel_dq == RampJumpDQ.JUMP_DET)[0]) == set(fit['jumps']) - diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 0ed481bd..60c6ff0c 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -2,7 +2,9 @@ """ Unit tests for ramp-fitting functions. """ +import astropy.units as u import numpy as np +import pytest from stcal.ramp_fitting import ols_cas22_fit as ramp @@ -13,23 +15,33 @@ ROMAN_READ_TIME = 3.04 -def test_simulated_ramps(): +@pytest.mark.parametrize("use_unit", [True, False]) +def test_simulated_ramps(use_unit): ntrial = 100000 read_pattern, flux, read_noise, resultants = simulate_many_ramps(ntrial=ntrial) + if use_unit: + resultants = resultants * u.electron + dq = np.zeros(resultants.shape, dtype=np.int32) read_noise = np.ones(resultants.shape[1], dtype=np.float32) * read_noise - par, var = ramp.fit_ramps_casertano( + output = ramp.fit_ramps_casertano( resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern) - chi2dof_slope = np.sum((par[:, 1] - flux)**2 / var[:, 2]) / ntrial + if use_unit: + assert output.parameters.unit == u.electron + parameters = output.parameters.value + else: + parameters = output.parameters + + chi2dof_slope = np.sum((parameters[:, 1] - flux)**2 / output.variances[:, 2]) / ntrial assert np.abs(chi2dof_slope - 1) < 0.03 # now let's mark a bunch of the ramps as compromised. bad = np.random.uniform(size=resultants.shape) > 0.7 dq |= bad - par, var = ramp.fit_ramps_casertano( + output = ramp.fit_ramps_casertano( resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, threshold_constant=0, threshold_intercept=0) # set the threshold parameters # to demo the interface. This @@ -42,10 +54,17 @@ def test_simulated_ramps(): # ramps passing the below criterion have at least two adjacent valid reads # i.e., we can make a measurement from them. m = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 - chi2dof_slope = np.sum((par[m, 1] - flux)**2 / var[m, 2]) / np.sum(m) + + if use_unit: + assert output.parameters.unit == u.electron + parameters = output.parameters.value + else: + parameters = output.parameters + + chi2dof_slope = np.sum((parameters[m, 1] - flux)**2 / output.variances[m, 2]) / np.sum(m) assert np.abs(chi2dof_slope - 1) < 0.03 - assert np.all(par[~m, 1] == 0) - assert np.all(var[~m, 1] == 0) + assert np.all(parameters[~m, 1] == 0) + assert np.all(output.variances[~m, 1] == 0) # ######### From d63b8054e075ffa7a4454b42c6f91d92f20efd14 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 6 Oct 2023 11:15:32 -0400 Subject: [PATCH 83/90] Update docstrings a little --- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 5 +++ .../ramp_fitting/ols_cas22/_fit_ramps.pyx | 35 ++++++++++++------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index f8a3adf3..64376be1 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -52,6 +52,11 @@ Enums poisson_var: poisson variance computed total_var: total variance computed (read_var + poisson_var) + RampJumpDQ + This enum is to specify the DQ flags for Ramp/Jump detection + + JUMP_DET: jump detected + Functions --------- get_power diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 8f0d132e..041454c9 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -25,17 +25,27 @@ class RampFitOutputs(NamedTuple): Simple tuple wrapper for outputs from the ramp fitting algorithm This clarifies the meaning of the outputs via naming them something descriptive. - """ + Attributes + ---------- + fits: list of RampFits + the raw ramp fit outputs, these are all structs which will get mapped to + python dictionaries. + parameters: np.ndarray[n_pixel, 2] + the slope and intercept for each pixel's ramp fit. see Parameter enum + for indexing indicating slope/intercept in the second dimension. + variances: np.ndarray[n_pixel, 3] + the read, poisson, and total variances for each pixel's ramp fit. + see Variance enum for indexing indicating read/poisson/total in the + second dimension. + dq: np.ndarray[n_resultants, n_pixel] + the dq array, with additional flags set for jumps detected by the + jump detection algorithm. + """ fits: list parameters: np.ndarray variances: np.ndarray dq: np.ndarray - # def __init__(self, fits, parameters, variances, dq): - # self.fits = fits - # self.parameters = parameters - # self.variances = variances - # self.dq = dq @cython.boundscheck(False) @@ -49,12 +59,11 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, float intercept=DefaultIntercept, float constant=DefaultConstant): """Fit ramps using the Casertano+22 algorithm. - - This implementation fits all ramp segments between bad pixels - marked in the dq image with values not equal to zero. So the - number of fit ramps can be larger than the number of pixels. - The derived slopes, corresponding variances, and the locations of - the ramps in each pixel are given in the returned dictionary. + This implementation uses the Cas22 algorithm to fit ramps, where + ramps are fit between bad resultants marked by dq flags for each pixel + which are not equal to zero. If use_jump is True, it additionally uses + jump detection to mark additional resultants for each pixel as bad if + a jump is suspected in them. Parameters ---------- @@ -78,7 +87,7 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, Returns ------- - A list of RampFits objects, one for each pixel. + A RampFitOutputs tuple """ cdef int n_pixels, n_resultants n_resultants = resultants.shape[0] From d364ac83685d9f2375343f5788de77bf66ea7a25 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 6 Oct 2023 11:48:09 -0400 Subject: [PATCH 84/90] Fix dq test to actually produce dq flags --- tests/test_jump_cas22.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 70c699da..e8c56602 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -16,6 +16,7 @@ FLUX = 100 JUMP_VALUE = 10_000 CHI2_TOL = 0.03 +GOOD_PROB = 0.7 @pytest.fixture(scope="module") @@ -334,7 +335,7 @@ def test_fit_ramps(detector_data, use_jump, use_dq): """ resultants, read_noise, read_pattern = detector_data dq = ( - (RNG.uniform(size=resultants.shape) > 1).astype(np.int32) if use_dq else + (RNG.uniform(size=resultants.shape) > GOOD_PROB).astype(np.int32) if use_dq else np.zeros(resultants.shape, dtype=np.int32) ) From 4541e2213da0c817970b4be843ec359972422826 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 6 Oct 2023 15:03:05 -0400 Subject: [PATCH 85/90] Add further documentation --- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 18 +++++---- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 10 +++-- tests/test_jump_cas22.py | 45 +++++++++++++++------ 3 files changed, 49 insertions(+), 24 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index fe0dbdd5..6296bbaf 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -55,24 +55,26 @@ cdef class FixedValues: the jump detection statistics. These are formed from the reciprocal sum of the number of reads. single sum of reciprocal n_reads: - recip[Diff.single, :] = ((1/n_reads[i+1]) + (1/n_reads[i])) + read_recip_coeffs[Diff.single, :] = ((1/n_reads[i+1]) + (1/n_reads[i])) double sum of reciprocal n_reads: - recip[Diff.double, :] = ((1/n_reads[i+2]) + (1/n_reads[i])) + read_recip_coeffs[Diff.double, :] = ((1/n_reads[i+2]) + (1/n_reads[i])) var_slope_coeffs : float[:, :] Coefficients for the slope portion of the variance used to compute the jump detection statistics, which happend to be fixed for any given ramp fit. single of slope variance term: - slope_var[Diff.single, :] = ([tau[i] + tau[i+1] - min(t_bar[i], t_bar[i+1])) + var_slope_coeffs[Diff.single, :] = (tau[i] + tau[i+1] + - min(t_bar[i], t_bar[i+1])) double of slope variance term: - slope_var[Diff.double, :] = ([tau[i] + tau[i+2] - min(t_bar[i], t_bar[i+2])) + var_slope_coeffs[Diff.double, :] = (tau[i] + tau[i+2] + - min(t_bar[i], t_bar[i+2])) Notes ----- - - t_bar_diffs, read_recip_coeffs, var_slope_coeffs are only computed if - use_jump is True. These values represent reused computations for jump - detection which are used by every pixel for jump detection. They are - computed once and stored in the FixedValues for reuse by all pixels. + - t_bar_diffs, t_bar_diff_sqrs, read_recip_coeffs, var_slope_coeffs are only + computed if use_jump is True. These values represent reused computations + for jump detection which are used by every pixel for jump detection. They + are computed once and stored in the FixedValues for reuse by all pixels. - The computations are done using vectorized operations for some performance increases. However, this is marginal compaired with the performance increase from pre-computing the values and reusing them. diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index e376123e..6991811b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -46,15 +46,17 @@ cdef class Pixel: local_slopes : float [:, :] These are the local slopes between the resultants for the pixel. single difference local slope: - delta[Diff.single, :] = (resultants[i+1] - resultants[i]) / (t_bar[i+1] - t_bar[i]) + local_slopes[Diff.single, :] = (resultants[i+1] - resultants[i]) + / (t_bar[i+1] - t_bar[i]) double difference local slope: - delta[Diff.double, :] = (resultants[i+2] - resultants[i]) / (t_bar[i+2] - t_bar[i]) + local_slopes[Diff.double, :] = (resultants[i+2] - resultants[i]) + / (t_bar[i+2] - t_bar[i]) var_read_noise : float [:, :] The read noise variance term of the jump statistics single difference read noise variance: - sigma[Diff.single, :] = read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) + var_read_noise[Diff.single, :] = read_noise * ((1/n_reads[i+1]) + (1/n_reads[i])) double difference read_noise variance: - sigma[Diff.doule, :] = read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) + var_read_noise[Diff.doule, :] = read_noise * ((1/n_reads[i+2]) + (1/n_reads[i])) Notes ----- diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index e8c56602..347e253f 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -9,12 +9,27 @@ from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff, RampJumpDQ +# Purposefully set a fixed seed so that the tests in this module are deterministic RNG = np.random.default_rng(619) -ROMAN_READ_TIME = 3.04 -READ_NOISE = np.float32(5) -N_PIXELS = 100_000 + +# The read time is constant for the given telescope/instrument so we set it here +# to be the one for Roman as it is known to be a reasonable value +READ_TIME = 3.04 + +# Choose small read noise relative to the flux to make it extremely unlikely +# that the random process will "accidentally" generate a set of data, which +# can trigger jump detection. This makes it easier to cleanly test jump +# detection is doing what we expect. FLUX = 100 +READ_NOISE = np.float32(5) + +# Set a value for jumps which makes them obvious relative to the normal flux JUMP_VALUE = 10_000 + +# Choose reasonable values for arbitrary test parameters, these are kept the same +# across all tests to make it easier to isolate the effects of something using +# multiple tests. +N_PIXELS = 100_000 CHI2_TOL = 0.03 GOOD_PROB = 0.7 @@ -40,7 +55,7 @@ def base_ramp_data(): [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36] ] - yield read_pattern, metadata_from_read_pattern(read_pattern, ROMAN_READ_TIME) + yield read_pattern, metadata_from_read_pattern(read_pattern, READ_TIME) def test_metadata_from_read_pattern(base_ramp_data): @@ -215,7 +230,7 @@ def _generate_resultants(read_pattern, n_pixels=1): for _ in reads: # Compute the next value of the ramp # Using a Poisson process for the flux - ramp_value += RNG.poisson(FLUX * ROMAN_READ_TIME, size=n_pixels).astype(np.float32) + ramp_value += RNG.poisson(FLUX * READ_TIME, size=n_pixels).astype(np.float32) # Add to running total for the resultant resultant_total += ramp_value @@ -350,13 +365,19 @@ def test_fit_ramps(detector_data, use_jump, use_dq): if not use_dq: assert okay.all() - output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=use_jump) assert len(output.fits) == N_PIXELS # sanity check that a fit is output for each pixel chi2 = 0 for fit, use in zip(output.fits, okay): if not use_dq: - assert len(fit['fits']) == 1 # only one fit per pixel since no dq/jump in this case + # Check that the data generated does not generate any false positives + # for jumps as this data is reused for `test_find_jumps` below. + # This guarantees that all jumps detected in that test are the + # purposefully placed ones which we know about. So the `test_find_jumps` + # can focus on checking that the jumps found are the correct ones, + # and that all jumps introduced are detected properly. + assert len(fit['fits']) == 1 if use: # Add okay ramps to chi2 @@ -382,7 +403,7 @@ def test_fit_ramps_array_outputs(detector_data, use_jump): resultants, read_noise, read_pattern = detector_data dq = np.zeros(resultants.shape, dtype=np.int32) - output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=use_jump) + output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=use_jump) for fit, par, var in zip(output.fits, output.parameters, output.variances): assert par[Parameter.intercept] == 0 @@ -454,7 +475,7 @@ def test_find_jumps(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True) assert len(output.fits) == len(jump_reads) # sanity check that a fit/jump is set for every pixel chi2 = 0 @@ -513,8 +534,8 @@ def test_override_default_threshold(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - standard = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) - override = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True, + standard = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True) + override = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True, intercept=0, constant=0) # All this is intended to do is show that with all other things being equal passing non-default @@ -529,7 +550,7 @@ def test_jump_dq_set(jump_data): resultants, read_noise, read_pattern, jump_reads, jump_resultants = jump_data dq = np.zeros(resultants.shape, dtype=np.int32) - output = fit_ramps(resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, use_jump=True) + output = fit_ramps(resultants, dq, read_noise, READ_TIME, read_pattern, use_jump=True) for fit, pixel_dq in zip(output.fits, output.dq.transpose()): # Check that all jumps found get marked From 3b9b68d4c60cbf9945c9a2d2ce1ab53f9986ccee Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Fri, 6 Oct 2023 15:05:15 -0400 Subject: [PATCH 86/90] Remove a copy --- src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx index 041454c9..ad19fb28 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fit_ramps.pyx @@ -113,9 +113,6 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, cdef np.ndarray[float, ndim=2] parameters = np.zeros((n_pixels, 2), dtype=np.float32) cdef np.ndarray[float, ndim=2] variances = np.zeros((n_pixels, 3), dtype=np.float32) - # Copy the dq array so we can modify it without fear - cdef np.ndarray[int, ndim=2] fit_dq = dq.copy() - # Perform all of the fits cdef RampFits fit cdef int index @@ -131,8 +128,8 @@ def fit_ramps(np.ndarray[float, ndim=2] resultants, variances[index, Variance.total_var] = fit.average.read_var + fit.average.poisson_var for jump in fit.jumps: - fit_dq[jump, index] = RampJumpDQ.JUMP_DET + dq[jump, index] = RampJumpDQ.JUMP_DET ramp_fits.push_back(fit) - return RampFitOutputs(ramp_fits, parameters, variances, fit_dq) + return RampFitOutputs(ramp_fits, parameters, variances, dq) From 46bb0283cf122a924d11febf18242e196ecd4160 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 11 Oct 2023 09:22:41 -0400 Subject: [PATCH 87/90] Clarify variable name --- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 64376be1..055b07bc 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -87,13 +87,13 @@ cdef float[2][6] PTABLE = [ @cython.boundscheck(False) @cython.wraparound(False) -cdef inline float get_power(float s): +cdef inline float get_power(float signal): """ Return the power from Casertano+22, Table 2 Parameters ---------- - s: float + signal: float signal from the resultants Returns @@ -102,7 +102,7 @@ cdef inline float get_power(float s): """ cdef int i for i in range(6): - if s < PTABLE[0][i]: + if signal < PTABLE[0][i]: return PTABLE[1][i - 1] return PTABLE[1][i] From 987790af14f952836a2d6a6b1dfb1fd18c6570f7 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 11 Oct 2023 09:50:14 -0400 Subject: [PATCH 88/90] Remove all core function wrappers --- src/stcal/ramp_fitting/ols_cas22/_core.pxd | 4 +- src/stcal/ramp_fitting/ols_cas22/_core.pyx | 40 +++++++++++++++++- .../ramp_fitting/ols_cas22/_wrappers.pyx | 36 +--------------- tests/test_jump_cas22.py | 42 ++++++++++++++----- 4 files changed, 73 insertions(+), 49 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pxd b/src/stcal/ramp_fitting/ols_cas22/_core.pxd index 9f14c757..f7fe7877 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pxd @@ -52,7 +52,7 @@ cpdef enum RampJumpDQ: JUMP_DET = 4 -cdef float threshold(Thresh thresh, float slope) +cpdef float threshold(Thresh thresh, float slope) cdef float get_power(float s) cdef deque[stack[RampIndex]] init_ramps(int[:, :] dq) -cdef ReadPatternMetadata metadata_from_read_pattern(list[list[int]] read_pattern, float read_time) +cpdef ReadPatternMetadata metadata_from_read_pattern(list[list[int]] read_pattern, float read_time) diff --git a/src/stcal/ramp_fitting/ols_cas22/_core.pyx b/src/stcal/ramp_fitting/ols_cas22/_core.pyx index 055b07bc..5c10cb4b 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_core.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_core.pyx @@ -63,10 +63,16 @@ Functions Return the power from Casertano+22, Table 2 threshold Compute jump threshold + - cpdef gives a python wrapper, but the python version of this method + is considered private, only to be used for testing init_ramps Find initial ramps for each pixel, accounts for DQ flags + - A python wrapper, _init_ramps_list, that adjusts types so they can + be directly inspected in python exists for testing purposes only. metadata_from_read_pattern Read the read pattern and derive the baseline metadata parameters needed + - cpdef gives a python wrapper, but the python version of this method + is considered private, only to be used for testing """ from libcpp.stack cimport stack from libcpp.deque cimport deque @@ -108,7 +114,7 @@ cdef inline float get_power(float signal): return PTABLE[1][i] -cdef inline float threshold(Thresh thresh, float slope): +cpdef inline float threshold(Thresh thresh, float slope): """ Compute jump threshold @@ -198,9 +204,39 @@ cdef inline deque[stack[RampIndex]] init_ramps(int[:, :] dq): return pixel_ramps +def _init_ramps_list(np.ndarray[int, ndim=2] dq): + """ + This is a wrapper for init_ramps so that it can be fully inspected from pure + python. A cpdef cannot be used in that case becase a stack has no direct python + analog. Instead this function turns that stack into a list ordered in the same + order as the stack; meaning that, the first element of the list is the top of + the stack. + Note this function is for testing purposes only and so is marked as private + within this private module + """ + cdef deque[stack[RampIndex]] raw = init_ramps(dq) + + # Have to turn deque and stack into python compatible objects + cdef RampIndex index + cdef stack[RampIndex] ramp + cdef list out = [] + cdef list stack_out + for ramp in raw: + stack_out = [] + while not ramp.empty(): + index = ramp.top() + ramp.pop() + # So top of stack is first item of list + stack_out = [index] + stack_out + + out.append(stack_out) + + return out + + @cython.boundscheck(False) @cython.wraparound(False) -cdef ReadPatternMetadata metadata_from_read_pattern(list[list[int]] read_pattern, float read_time): +cpdef ReadPatternMetadata metadata_from_read_pattern(list[list[int]] read_pattern, float read_time): """ Derive the input data from the the read pattern diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx index f2824d1a..53f1470c 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx @@ -2,12 +2,8 @@ import numpy as np cimport numpy as np from libcpp cimport bool -from libcpp.stack cimport stack -from libcpp.deque cimport deque -from stcal.ramp_fitting.ols_cas22._core cimport RampIndex, ReadPatternMetadata, Thresh, threshold -from stcal.ramp_fitting.ols_cas22._core cimport metadata_from_read_pattern as c_metadata_from_read_pattern -from stcal.ramp_fitting.ols_cas22._core cimport init_ramps as c_init_ramps +from stcal.ramp_fitting.ols_cas22._core cimport ReadPatternMetadata, Thresh from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata as c_fixed_values_from_metadata @@ -16,36 +12,6 @@ from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel as c_make_pixel -def metadata_from_read_pattern(list[list[int]] read_pattern, float read_time): - return c_metadata_from_read_pattern(read_pattern, read_time) - - -def init_ramps(np.ndarray[int, ndim=2] dq): - cdef deque[stack[RampIndex]] raw = c_init_ramps(dq) - - # Have to turn deque and stack into python compatible objects - cdef RampIndex index - cdef stack[RampIndex] ramp - cdef list out = [] - cdef list stack_out - for ramp in raw: - stack_out = [] - while not ramp.empty(): - index = ramp.top() - ramp.pop() - # So top of stack is first item of list - stack_out = [index] + stack_out - - out.append(stack_out) - - return out - - -def run_threshold(float intercept, float constant, float slope): - cdef Thresh thresh = Thresh(intercept, constant) - return threshold(thresh, slope) - - def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, np.ndarray[float, ndim=1] tau, np.ndarray[int, ndim=1] n_reads, diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 347e253f..82e54e68 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -2,9 +2,8 @@ import pytest from numpy.testing import assert_allclose -from stcal.ramp_fitting.ols_cas22._wrappers import metadata_from_read_pattern -from stcal.ramp_fitting.ols_cas22._wrappers import init_ramps -from stcal.ramp_fitting.ols_cas22._wrappers import run_threshold, fixed_values_from_metadata, make_pixel +from stcal.ramp_fitting.ols_cas22._core import metadata_from_read_pattern, threshold +from stcal.ramp_fitting.ols_cas22._wrappers import fixed_values_from_metadata, make_pixel from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff, RampJumpDQ @@ -76,13 +75,20 @@ def test_metadata_from_read_pattern(base_ramp_data): def test_init_ramps(): - """Test turning dq flags into initial ramp splits""" + """ + Test turning dq flags into initial ramp splits + Note that because `init_ramps` itself returns a stack, which does not have + a direct python equivalent, we call the wrapper for `init_ramps` which + converts that stack into a list ordered in the same fashion as the stack + """ + from stcal.ramp_fitting.ols_cas22._core import _init_ramps_list + dq = np.array([[0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1], [0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1], [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1]], dtype=np.int32) - ramps = init_ramps(dq) + ramps = _init_ramps_list(dq) assert len(ramps) == dq.shape[1] == 16 # Check that the ramps are correct @@ -115,12 +121,28 @@ def test_init_ramps(): def test_threshold(): - """Test the threshold object/fucnction)""" - intercept = np.float32(5.5) - constant = np.float32(1/3) + """ + Test the threshold object/fucnction + intercept - constant * log10(slope) = threshold + """ - assert intercept == run_threshold(intercept, constant, 1.0) # check intercept - assert np.float32(intercept - constant) == run_threshold(intercept, constant, 10.0) # check constant + # Create the python analog of the threshold struct + # Note that structs get mapped to/from python as dictionary objects with + # the keys being the struct members. + thresh = { + 'intercept': np.float32(5.5), + 'constant': np.float32(1/3) + } + + # Check the 'intercept' is correctly interpreted. + # Since the log of the input slope is taken, log10(1) = 0, meaning that + # we should directly recover the intercept value in that case. + assert thresh['intercept'] == threshold(thresh, 1.0) + + # Check the 'constant' is correctly interpreted. + # Since we know that the intercept is correctly identified and that `log10(10) = 1`, + # we can use that to check that the constant is correctly interpreted. + assert np.float32(thresh['intercept'] - thresh['constant']) == threshold(thresh, 10.0) @pytest.fixture(scope="module") From 366d2b043f2a61177a2364965c30cdfa805df9ff Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 11 Oct 2023 10:45:22 -0400 Subject: [PATCH 89/90] Remove all remaining wrappers. --- setup.py | 6 - src/stcal/ramp_fitting/ols_cas22/_fixed.pxd | 2 +- src/stcal/ramp_fitting/ols_cas22/_fixed.pyx | 66 +++++++++- src/stcal/ramp_fitting/ols_cas22/_pixel.pxd | 2 +- src/stcal/ramp_fitting/ols_cas22/_pixel.pyx | 48 ++++++- .../ramp_fitting/ols_cas22/_wrappers.pyx | 121 ------------------ tests/test_jump_cas22.py | 51 ++++++-- 7 files changed, 150 insertions(+), 146 deletions(-) delete mode 100644 src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx diff --git a/setup.py b/setup.py index 03eedc6f..b34e7dfa 100644 --- a/setup.py +++ b/setup.py @@ -31,12 +31,6 @@ include_dirs=[np.get_include()], language='c++' ), - Extension( - 'stcal.ramp_fitting.ols_cas22._wrappers', - ['src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx'], - include_dirs=[np.get_include()], - language='c++' - ), ] setup(ext_modules=cythonize(extensions)) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd index ed99c7de..72087051 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pxd @@ -18,4 +18,4 @@ cdef class FixedValues: cdef float[:, :] var_slope_vals(FixedValues self) -cdef FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump) +cpdef FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump) diff --git a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx index 6296bbaf..6bd72b07 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_fixed.pyx @@ -10,8 +10,10 @@ FixedValues : class Functions --------- -fixed_values_from_metadata : function - Fast constructor for FixedValues from the read pattern metadata + fixed_values_from_metadata : function + Fast constructor for FixedValues from the read pattern metadata + - cpdef gives a python wrapper, but the python version of this method + is considered private, only to be used for testing """ import numpy as np cimport numpy as np @@ -175,8 +177,64 @@ cdef class FixedValues: return var_slope_vals - -cdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump): + def _to_dict(FixedValues self): + """ + This is a private method to convert the FixedValues object to a dictionary, + so that attributes can be directly accessed in python. Note that this + is needed because class attributes cannot be accessed on cython classes + directly in python. Instead they need to be accessed or set using a + python compatible method. This method is a pure puthon method bound + to to the cython class and should not be used by any cython code, and + only exists for testing purposes. + """ + cdef np.ndarray[float, ndim=2] t_bar_diffs + cdef np.ndarray[float, ndim=2] t_bar_diff_sqrs + cdef np.ndarray[float, ndim=2] read_recip_coeffs + cdef np.ndarray[float, ndim=2] var_slope_coeffs + + if self.use_jump: + t_bar_diffs = np.array(self.t_bar_diffs, dtype=np.float32) + t_bar_diff_sqrs = np.array(self.t_bar_diff_sqrs, dtype=np.float32) + read_recip_coeffs = np.array(self.read_recip_coeffs, dtype=np.float32) + var_slope_coeffs = np.array(self.var_slope_coeffs, dtype=np.float32) + else: + try: + self.t_bar_diffs + except AttributeError: + t_bar_diffs = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("t_bar_diffs should not exist") + + try: + self.t_bar_diff_sqrs + except AttributeError: + t_bar_diff_sqrs = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("t_bar_diff_sqrs should not exist") + + try: + self.read_recip_coeffs + except AttributeError: + read_recip_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("read_recip_coeffs should not exist") + + try: + self.var_slope_coeffs + except AttributeError: + var_slope_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("var_slope_coeffs should not exist") + + return dict(data=self.data, + threshold=self.threshold, + t_bar_diffs=t_bar_diffs, + t_bar_diff_sqrs=t_bar_diff_sqrs, + read_recip_coeffs=read_recip_coeffs, + var_slope_coeffs=var_slope_coeffs) + + +cpdef inline FixedValues fixed_values_from_metadata(ReadPatternMetadata data, Thresh threshold, bool use_jump): """ Fast constructor for FixedValues class Use this instead of an __init__ because it does not incure the overhead diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd index 326c1d1f..bf390419 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pxd @@ -20,4 +20,4 @@ cdef class Pixel: cdef RampFits fit_ramps(Pixel self, stack[RampIndex] ramps) -cdef Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants) +cpdef Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants) diff --git a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx index 6991811b..88544243 100644 --- a/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx +++ b/src/stcal/ramp_fitting/ols_cas22/_pixel.pyx @@ -9,8 +9,10 @@ Pixel : class Functions --------- -make_pixel : function - Fast constructor for a Pixel class from input data. + make_pixel : function + Fast constructor for a Pixel class from input data. + - cpdef gives a python wrapper, but the python version of this method + is considered private, only to be used for testing """ from libc.math cimport sqrt, fabs from libcpp.vector cimport vector @@ -473,10 +475,50 @@ cdef class Pixel: return ramp_fits + def _to_dict(Pixel self): + """ + This is a private method to convert the Pixel object to a dictionary, so + that attributes can be directly accessed in python. Note that this is + needed because class attributes cannot be accessed on cython classes + directly in python. Instead they need to be accessed or set using a + python compatible method. This method is a pure puthon method bound + to to the cython class and should not be used by any cython code, and + only exists for testing purposes. + """ + + cdef np.ndarray[float, ndim=1] resultants_ = np.array(self.resultants, dtype=np.float32) + + cdef np.ndarray[float, ndim=2] local_slopes + cdef np.ndarray[float, ndim=2] var_read_noise + + if self.fixed.use_jump: + local_slopes = np.array(self.local_slopes, dtype=np.float32) + var_read_noise = np.array(self.var_read_noise, dtype=np.float32) + else: + try: + self.local_slopes + except AttributeError: + local_slopes = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("local_slopes should not exist") + + try: + self.var_read_noise + except AttributeError: + var_read_noise = np.array([[np.nan],[np.nan]], dtype=np.float32) + else: + raise AttributeError("var_read_noise should not exist") + + return dict(fixed=self.fixed._to_dict(), + resultants=resultants_, + read_noise=self.read_noise, + local_slopes=local_slopes, + var_read_noise=var_read_noise) + @cython.boundscheck(False) @cython.wraparound(False) -cdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants): +cpdef inline Pixel make_pixel(FixedValues fixed, float read_noise, float [:] resultants): """ Fast constructor for the Pixel C class. This creates a Pixel object for a single pixel from the input data. diff --git a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx b/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx deleted file mode 100644 index 53f1470c..00000000 --- a/src/stcal/ramp_fitting/ols_cas22/_wrappers.pyx +++ /dev/null @@ -1,121 +0,0 @@ -import numpy as np -cimport numpy as np - -from libcpp cimport bool - -from stcal.ramp_fitting.ols_cas22._core cimport ReadPatternMetadata, Thresh - -from stcal.ramp_fitting.ols_cas22._fixed cimport FixedValues -from stcal.ramp_fitting.ols_cas22._fixed cimport fixed_values_from_metadata as c_fixed_values_from_metadata - -from stcal.ramp_fitting.ols_cas22._pixel cimport Pixel -from stcal.ramp_fitting.ols_cas22._pixel cimport make_pixel as c_make_pixel - - -def fixed_values_from_metadata(np.ndarray[float, ndim=1] t_bar, - np.ndarray[float, ndim=1] tau, - np.ndarray[int, ndim=1] n_reads, - float intercept, - float constant, - bool use_jump): - - cdef ReadPatternMetadata data = ReadPatternMetadata(t_bar, tau, n_reads) - cdef Thresh threshold = Thresh(intercept, constant) - - cdef FixedValues fixed = c_fixed_values_from_metadata(data, threshold, use_jump) - - cdef float intercept_ = fixed.threshold.intercept - cdef float constant_ = fixed.threshold.constant - - cdef np.ndarray[float, ndim=2] t_bar_diffs - cdef np.ndarray[float, ndim=2] t_bar_diff_sqrs - cdef np.ndarray[float, ndim=2] read_recip_coeffs - cdef np.ndarray[float, ndim=2] var_slope_coeffs - - if use_jump: - t_bar_diffs = np.array(fixed.t_bar_diffs, dtype=np.float32) - t_bar_diff_sqrs = np.array(fixed.t_bar_diff_sqrs, dtype=np.float32) - read_recip_coeffs = np.array(fixed.read_recip_coeffs, dtype=np.float32) - var_slope_coeffs = np.array(fixed.var_slope_coeffs, dtype=np.float32) - else: - try: - fixed.t_bar_diffs - except AttributeError: - t_bar_diffs = np.array([[np.nan],[np.nan]], dtype=np.float32) - else: - raise AttributeError("t_bar_diffs should not exist") - - try: - fixed.t_bar_diff_sqrs - except AttributeError: - t_bar_diff_sqrs = np.array([[np.nan],[np.nan]], dtype=np.float32) - else: - raise AttributeError("t_bar_diff_sqrs should not exist") - - try: - fixed.read_recip_coeffs - except AttributeError: - read_recip_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) - else: - raise AttributeError("read_recip_coeffs should not exist") - - try: - fixed.var_slope_coeffs - except AttributeError: - var_slope_coeffs = np.array([[np.nan],[np.nan]], dtype=np.float32) - else: - raise AttributeError("var_slope_coeffs should not exist") - - return dict(data=fixed.data, - intercept=intercept_, - constant=constant_, - t_bar_diffs=t_bar_diffs, - t_bar_diff_sqrs=t_bar_diff_sqrs, - read_recip_coeffs=read_recip_coeffs, - var_slope_coeffs=var_slope_coeffs) - - -def make_pixel(np.ndarray[float, ndim=1] resultants, - np.ndarray[float, ndim=1] t_bar, - np.ndarray[float, ndim=1] tau, - np.ndarray[int, ndim=1] n_reads, - float read_noise, - float intercept, - float constant, - bool use_jump): - - cdef ReadPatternMetadata data = ReadPatternMetadata(t_bar, tau, n_reads) - cdef Thresh threshold = Thresh(intercept, constant) - - cdef FixedValues fixed = c_fixed_values_from_metadata(data, threshold, use_jump) - - cdef Pixel pixel = c_make_pixel(fixed, read_noise, resultants) - - cdef np.ndarray[float, ndim=1] resultants_ = np.array(pixel.resultants, dtype=np.float32) - - cdef np.ndarray[float, ndim=2] local_slopes - cdef np.ndarray[float, ndim=2] var_read_noise - - if use_jump: - local_slopes = np.array(pixel.local_slopes, dtype=np.float32) - var_read_noise = np.array(pixel.var_read_noise, dtype=np.float32) - else: - try: - pixel.local_slopes - except AttributeError: - local_slopes = np.array([[np.nan],[np.nan]], dtype=np.float32) - else: - raise AttributeError("local_slopes should not exist") - - try: - pixel.var_read_noise - except AttributeError: - var_read_noise = np.array([[np.nan],[np.nan]], dtype=np.float32) - else: - raise AttributeError("var_read_noise should not exist") - - # only return computed values (assume fixed is correct) - return dict(resultants=resultants_, - read_noise=pixel.read_noise, - local_slopes=local_slopes, - var_read_noise=var_read_noise) diff --git a/tests/test_jump_cas22.py b/tests/test_jump_cas22.py index 82e54e68..5d49597f 100644 --- a/tests/test_jump_cas22.py +++ b/tests/test_jump_cas22.py @@ -3,7 +3,8 @@ from numpy.testing import assert_allclose from stcal.ramp_fitting.ols_cas22._core import metadata_from_read_pattern, threshold -from stcal.ramp_fitting.ols_cas22._wrappers import fixed_values_from_metadata, make_pixel +from stcal.ramp_fitting.ols_cas22._fixed import fixed_values_from_metadata +from stcal.ramp_fitting.ols_cas22._pixel import make_pixel from stcal.ramp_fitting.ols_cas22 import fit_ramps, Parameter, Variance, Diff, RampJumpDQ @@ -126,7 +127,7 @@ def test_threshold(): intercept - constant * log10(slope) = threshold """ - # Create the python analog of the threshold struct + # Create the python analog of the Threshold struct # Note that structs get mapped to/from python as dictionary objects with # the keys being the struct members. thresh = { @@ -174,17 +175,33 @@ def test_fixed_values_from_metadata(ramp_data, use_jump): """Test computing the fixed data for all pixels""" _, t_bar, tau, n_reads = ramp_data - intercept = np.float32(5.5) - constant = np.float32(1/3) + # Create the python analog of the ReadPatternMetadata struct + # Note that structs get mapped to/from python as dictionary objects with + # the keys being the struct members. + data = { + "t_bar": t_bar, + "tau": tau, + "n_reads": n_reads, + } + + # Create the python analog of the Threshold struct + # Note that structs get mapped to/from python as dictionary objects with + # the keys being the struct members. + thresh = { + 'intercept': np.float32(5.5), + 'constant': np.float32(1/3) + } - fixed = fixed_values_from_metadata(t_bar, tau, n_reads, intercept, constant, use_jump) + # Note this is converted to a dictionary so we can directly interrogate the + # variables in question + fixed = fixed_values_from_metadata(data, thresh, use_jump)._to_dict() # Basic sanity checks that data passed in survives assert (fixed['data']['t_bar'] == t_bar).all() assert (fixed['data']['tau'] == tau).all() assert (fixed['data']['n_reads'] == n_reads).all() - assert fixed["intercept"] == intercept - assert fixed["constant"] == constant + assert fixed['threshold']["intercept"] == thresh['intercept'] + assert fixed['threshold']["constant"] == thresh['constant'] # Check the computed data # These are computed via vectorized operations in the main code, here we @@ -298,15 +315,29 @@ def test_make_pixel(pixel_data, use_jump): """Test computing the initial pixel data""" resultants, t_bar, tau, n_reads = pixel_data - intercept = np.float32(5.5) - constant = np.float32(1/3) + # Create a fixed object to pass into the constructor + # This requires setting up some structs as dictionaries + data = { + "t_bar": t_bar, + "tau": tau, + "n_reads": n_reads, + } + thresh = { + 'intercept': np.float32(5.5), + 'constant': np.float32(1/3) + } + fixed = fixed_values_from_metadata(data, thresh, use_jump) - pixel = make_pixel(resultants, t_bar, tau, n_reads, READ_NOISE, intercept, constant, use_jump) + # Note this is converted to a dictionary so we can directly interrogate the + # variables in question + pixel = make_pixel(fixed, READ_NOISE, resultants)._to_dict() # Basic sanity checks that data passed in survives assert (pixel['resultants'] == resultants).all() assert READ_NOISE == pixel['read_noise'] + # the "fixed" data is not checked as this is already done above + # Check the computed data # These are computed via vectorized operations in the main code, here we # check using item-by-item operations From 40303d20745ee2717cb515e0b5eb8fc52f7663f8 Mon Sep 17 00:00:00 2001 From: William Jamieson Date: Wed, 11 Oct 2023 13:59:34 -0400 Subject: [PATCH 90/90] Fix reshaping of data in/out of fitting routine --- src/stcal/ramp_fitting/ols_cas22_fit.py | 14 +++--- tests/test_ramp_fitting_cas22.py | 61 +++++++++++++++---------- 2 files changed, 45 insertions(+), 30 deletions(-) diff --git a/src/stcal/ramp_fitting/ols_cas22_fit.py b/src/stcal/ramp_fitting/ols_cas22_fit.py index 945797e8..9584970e 100644 --- a/src/stcal/ramp_fitting/ols_cas22_fit.py +++ b/src/stcal/ramp_fitting/ols_cas22_fit.py @@ -120,13 +120,15 @@ def fit_ramps_casertano( use_jump, **kwargs) - parameters = output.parameters - variances = output.variances + parameters = output.parameters.reshape(orig_shape[1:] + (2,)) + variances = output.variances.reshape(orig_shape[1:] + (3,)) + dq = output.dq.reshape(orig_shape) + if resultants.shape != orig_shape: - parameters = output.parameters[0] - variances = output.variances[0] + parameters = parameters[0] + variances = variances[0] if resultants_unit is not None: - parameters = output.parameters * resultants_unit + parameters = parameters * resultants_unit - return ols_cas22.RampFitOutputs(output.fits, parameters, variances, output.dq) + return ols_cas22.RampFitOutputs(output.fits, parameters, variances, dq) diff --git a/tests/test_ramp_fitting_cas22.py b/tests/test_ramp_fitting_cas22.py index 60c6ff0c..72a7b6d1 100644 --- a/tests/test_ramp_fitting_cas22.py +++ b/tests/test_ramp_fitting_cas22.py @@ -16,31 +16,28 @@ @pytest.mark.parametrize("use_unit", [True, False]) -def test_simulated_ramps(use_unit): - ntrial = 100000 +@pytest.mark.parametrize("use_dq", [True, False]) +def test_simulated_ramps(use_unit, use_dq): + # Perfect square like the detector, this is so we can test that the code + # reshapes the data correctly for the computation and then reshapes it back + # to the original shape. + ntrial = 320 * 320 read_pattern, flux, read_noise, resultants = simulate_many_ramps(ntrial=ntrial) + # So we get a detector-like input shape + resultants = resultants.reshape((len(read_pattern), 320, 320)) + if use_unit: resultants = resultants * u.electron dq = np.zeros(resultants.shape, dtype=np.int32) - read_noise = np.ones(resultants.shape[1], dtype=np.float32) * read_noise - - output = ramp.fit_ramps_casertano( - resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern) - - if use_unit: - assert output.parameters.unit == u.electron - parameters = output.parameters.value - else: - parameters = output.parameters + read_noise = np.ones(resultants.shape[1:], dtype=np.float32) * read_noise - chi2dof_slope = np.sum((parameters[:, 1] - flux)**2 / output.variances[:, 2]) / ntrial - assert np.abs(chi2dof_slope - 1) < 0.03 + # now let's mark a bunch of the ramps as compromised. When using dq flags + if use_dq: + bad = np.random.uniform(size=resultants.shape) > 0.7 + dq |= bad - # now let's mark a bunch of the ramps as compromised. - bad = np.random.uniform(size=resultants.shape) > 0.7 - dq |= bad output = ramp.fit_ramps_casertano( resultants, dq, read_noise, ROMAN_READ_TIME, read_pattern, threshold_constant=0, threshold_intercept=0) # set the threshold parameters @@ -50,21 +47,37 @@ def test_simulated_ramps(use_unit): # does not effect the computation # since jump detection is off in # this case. - # only use okay ramps - # ramps passing the below criterion have at least two adjacent valid reads - # i.e., we can make a measurement from them. - m = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 + # Check that the output shapes are correct + assert output.parameters.shape == (320, 320, 2) == resultants.shape[1:] + (2,) + assert output.variances.shape == (320, 320, 3) == resultants.shape[1:] + (3,) + assert output.dq.shape == dq.shape + + # check the unit if use_unit: assert output.parameters.unit == u.electron parameters = output.parameters.value else: parameters = output.parameters - chi2dof_slope = np.sum((parameters[m, 1] - flux)**2 / output.variances[m, 2]) / np.sum(m) + # Turn into single dimension arrays to make the indexing for the math easier + parameters = parameters.reshape((320 * 320, 2)) + variances = output.variances.reshape((320 * 320, 3)) + + # only use okay ramps + # ramps passing the below criterion have at least two adjacent valid reads + # i.e., we can make a measurement from them. + okay = np.sum((dq[1:, :] == 0) & (dq[:-1, :] == 0), axis=0) != 0 + okay = okay.reshape((320 * 320)) + + # Sanity check that when no dq is used, all ramps are used + if not use_dq: + assert np.all(okay) + + chi2dof_slope = np.sum((parameters[okay, 1] - flux)**2 / variances[okay, 2]) / np.sum(okay) assert np.abs(chi2dof_slope - 1) < 0.03 - assert np.all(parameters[~m, 1] == 0) - assert np.all(output.variances[~m, 1] == 0) + assert np.all(parameters[~okay, 1] == 0) + assert np.all(variances[~okay, 1] == 0) # #########