Skip to content

Commit

Permalink
add unit test for varying pixel scale ratio
Browse files Browse the repository at this point in the history
  • Loading branch information
mcara committed Dec 12, 2024
1 parent 6cf0b3a commit 3c70cc7
Show file tree
Hide file tree
Showing 4 changed files with 134 additions and 61 deletions.
2 changes: 1 addition & 1 deletion docs/jwst/resample/arguments.rst
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ image.
If `weight_type=ivm` (the default), the scaling value
will be determined per-pixel using the inverse of the read noise
(VAR_RNOISE) array stored in each input image. If the VAR_RNOISE array does
not exist, the variance is set to 1 for all pixels (equal weighting).
not exist, the weight is set to 1 for all pixels (equal weighting).
If `weight_type=exptime`, the scaling value will be set equal to the
measurement time (TMEASURE) found in the image header if available;
if unavailable, the scaling will be set equal to the exposure time (EFFEXPTM).
Expand Down
48 changes: 5 additions & 43 deletions docs/jwst/resample/main.rst
Original file line number Diff line number Diff line change
Expand Up @@ -36,56 +36,18 @@ drizzling to create the output product.
Error Propagation
-----------------

The error associated with each resampled pixel can in principle be derived
The error associated with each resampled pixel is derived
from the variance components associated with each input pixel, weighted by
the square of the input user weights and the square of the overlap between
the input and output pixels. In practice, the cdriz routine does not currently
support propagating variance data alongside science images, so the output
error cannot be precisely calculated.

To approximate the error on a resampled pixel, the variance arrays associated
with each input image are resampled individually, then combined with a weighted
sum. The process is:

#. For each input image, take the square root of each of the read noise variance
array to make an error image.

#. Drizzle the read noise error image onto the output WCS, with drizzle
parameters matching those used for the science data.

#. Square the resampled read noise to make a variance array.

a. If the resampling `weight_type` is an inverse variance map (`ivm`), weight
the resampled variance by the square of its own inverse.

#. If the `weight_type` is the exposure time (`exptime`), weight the
resampled variance by the square of the exposure time for the image.

#. Add the weighted, resampled read noise variance to a running sum across all
images. Add the weights (unsquared) to a separate running sum across
all images.

#. Perform the same steps for the Poisson noise variance and the flat variance.
For these components, the weight for the sum is either the resampled read
noise variance or else the exposure time.

#. For each variance component (read noise, Poisson, and flat), divide the
weighted variance sum by the total weight, squared.
the square of the input user weights, pixel scale ratio, and the square of
the overlap between the input and output pixels. Essentialy the code performs
[propagation of uncertainty](https://en.wikipedia.org/wiki/Propagation_of_uncertainty)
for each of the variance arrays in the input models.

After each variance component is resampled and summed, the final error
array is computed as the square root of the sum of the three independent
variance components. This error image is stored in the ``err`` attribute
in the output data model or the ``'ERR'`` extension of the FITS file.

It is expected that the output errors computed in this way will
generally overestimate the true error on the resampled data. The magnitude
of the overestimation depends on the details of the pixel weights
and error images. Note, however, that drizzling error images produces
a much better estimate of the output error than directly drizzling
the variance images, since the kernel overlap weights do not need to be
squared for combining error values.


Context Image
-------------

Expand Down
28 changes: 17 additions & 11 deletions jwst/resample/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,11 +296,16 @@ def resample_group(self, input_models, indices, compute_error=False):
img.data.shape,
)

if compute_error and img.err is not None:
data2 = np.square(img.err)
else:
data2 = None

driz.add_image(
data=data,
exptime=img.meta.exposure.exposure_time, # GWCSDrizzle.add_image param default was 1.0
pixmap=pixmap,
data2=np.square(img.err) if compute_error else None,
data2=data2,
scale=iscale,
weight_map=inwht,
wht_scale=1.0, # hard-coded for JWST count-rate data
Expand All @@ -319,7 +324,7 @@ def resample_group(self, input_models, indices, compute_error=False):
output_model.data = driz.out_img
output_model.wht = driz.out_wht
# copy the drizzled error into the output model
if compute_error:
if compute_error and driz.out_img2 is not None:
output_model.err = np.sqrt(driz.out_img2[0])
del driz

Expand Down Expand Up @@ -388,8 +393,8 @@ def resample_many_to_one(self, input_models):

log.info("Resampling science and variance data")

invalid_var = 4 * [False]
var_list = ["var_rnoise", "var_flat", "var_poisson", "err"]
var_list = ["var_rnoise", "var_flat", "var_poisson"]
invalid_var = len(var_list) * [False]

leading_group_idx = [v[0] for v in input_models.group_indices.values()]
with input_models:
Expand Down Expand Up @@ -452,10 +457,7 @@ def resample_many_to_one(self, input_models):
f"{repr(img.meta.filename)}. Skipping ..."
)
else:
if name == "err":
data2.append(np.square(var))
else:
data2.append(var)
data2.append(var)
del var

driz.add_image(
Expand Down Expand Up @@ -484,15 +486,19 @@ def resample_many_to_one(self, input_models):
output_model.wht = driz.out_wht
if driz.out_ctx is not None:
output_model.con = driz.out_ctx

valid_var = []
for k, name in enumerate(var_list):
if invalid_var[k]:
var = np.full_like(output_model.data, np.nan)
elif name == "err":
var = np.sqrt(driz.out_img2[k])
else:
var = driz.out_img2[k]
valid_var.append(var)
setattr(output_model, name, var)
del driz

err = np.sum(valid_var, axis=0).astype(np.float32)
output_model.err = err
del driz, err, valid_var, var

if self.blendheaders:
blender.finalize_model(output_model)
Expand Down
117 changes: 111 additions & 6 deletions jwst/resample/tests/test_resample_step.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pytest
from itertools import product

from gwcs.wcstools import grid_from_bounding_box
from numpy.testing import assert_allclose
Expand Down Expand Up @@ -803,22 +804,31 @@ def test_resample_variance(nircam_rate, n_images, weight_type):
# Verify that the combined uncertainty goes as 1 / sqrt(N)
mask = np.isfinite(result.err)

twht = np.sum(result.wht[mask])
twht1 = np.sum(result1.wht[mask])

assert np.all((result1.err[mask] / err) <= 1.0)
assert_allclose(
result.err[mask].mean(),
result1.err[mask].mean() / np.sqrt(n_images), atol=1e-5
np.sum(result.err[mask]**2) * twht**2,
np.sum(result1.err[mask]**2) * twht1**2,
rtol=1e-6,
atol=0.0,
)

assert np.all((result1.var_rnoise[mask] / var_rnoise) <= 1.0)
assert_allclose(
result.var_rnoise[mask].mean(),
result1.var_rnoise[mask].mean() / n_images, atol=1e-5
np.sum(result.var_rnoise[mask]) * twht,
np.sum(result1.var_rnoise[mask]) * twht1,
rtol=1e-6,
atol=0.0,
)

assert np.all((result1.var_poisson[mask] / var_poisson) <= 1.0)
assert_allclose(
result.var_poisson[mask].mean(),
result1.var_poisson[mask].mean() / n_images, atol=1e-5
np.sum(result.var_poisson[mask]) * twht,
np.sum(result1.var_poisson[mask]) * twht1,
rtol=1e-6,
atol=0.0,
)

im.close()
Expand Down Expand Up @@ -1454,3 +1464,98 @@ def test_nirspec_lamp_pixscale(nirspec_lamp, tmp_path):
result2.close()
result3.close()
result4.close()


@pytest.mark.filterwarnings("ignore:Kernel '")
@pytest.mark.parametrize(
'kernel_fc, ps_ratio, weights',
(
x for x in product(
[
('square', True),
('point', True),
('gaussian', False),
],
[0.25, 0.5, 1, 1.2],
[(0.99, 0.01), (0.9, 1.5), (467, 733)],
)
)
)
def test_variance_arrays(kernel_fc, ps_ratio, weights, nircam_rate):

# check that if both 'pixel_scale_ratio' and 'pixel_scale' are passed in,
# that 'pixel_scale' overrides correctly
im1 = AssignWcsStep.call(nircam_rate, sip_approx=False)
_set_photom_kwd(im1)
im2 = AssignWcsStep.call(nircam_rate, sip_approx=False)
_set_photom_kwd(im2)

shape = im1.data.shape
xc = shape[1] // 2
yc = shape[0] // 2

# unpack parameters:
kernel, fc = kernel_fc

# pixel values in input data:
dataval = [1.0, 7.0]

# pixel values in input variance:
varval = [0.5, 50.0]

sl = np.s_[yc - 4: yc + 5, xc - 4: xc + 5]
im1.data[sl] = dataval[0]
im2.data[sl] = dataval[1]

im1.var_poisson[:, :] = 0.0
im1.var_poisson[sl] = varval[0]
im2.var_poisson[:, :] = 0.0
im2.var_poisson[sl] = varval[1]

im1.meta.exposure.exposure_time = weights[0]
im1.meta.exposure.measurement_time = weights[0]
im2.meta.exposure.exposure_time = weights[1]
im2.meta.exposure.measurement_time = weights[1]

library = ModelLibrary([im1, im2])

# check when both pixel_scale and pixel_scale_ratio are passed in
res = ResampleStep.call(
library,
pixel_scale_ratio=ps_ratio,
kernel=kernel,
weight_type="exptime",
blendheaders=False,
save_results=True,
)

assert np.any(np.isfinite(res.var_poisson))

mask = res.con[0] > 0
n_nonzero = np.sum(im1.data > 0.0)
res.var_poisson[np.logical_not(mask)] = 0.0

rtol = 1.0e-6 if fc else 0.15

ideal_output = np.dot(dataval, weights) * n_nonzero
ideal_output2 = np.dot(varval, np.square(weights)) / np.sum(weights)**2

tflux = np.sum(res.data[mask] * res.wht[mask])
tflux2 = np.max(res.var_poisson)

# check output flux:
assert np.allclose(
tflux,
ideal_output,
rtol=rtol,
atol=0.0
)

# check output variance:
# less restrictive (to account for pixel overlap variations):
assert (np.max(tflux2) <= ideal_output2 * (1 + rtol) and
np.max(tflux2) >= 0.25 * ideal_output2 * (1 - rtol))

im1.close()
im2.close()
res.close()

0 comments on commit 3c70cc7

Please sign in to comment.