From ebdabfb31f1372a508462cc1acb215beba9b4df5 Mon Sep 17 00:00:00 2001 From: Chris Lowder Date: Tue, 3 Dec 2024 12:44:39 -0700 Subject: [PATCH 1/3] satisifies ruff --- punchbowl/data/wcs.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/punchbowl/data/wcs.py b/punchbowl/data/wcs.py index db805c95..8e8e698d 100644 --- a/punchbowl/data/wcs.py +++ b/punchbowl/data/wcs.py @@ -28,7 +28,7 @@ def extract_crota_from_wcs(wcs: WCS) -> u.deg: def calculate_helio_wcs_from_celestial(wcs_celestial: WCS, date_obs: datetime, - data_shape: tuple[int, int]) -> (WCS, float): + data_shape: tuple[int, int]) -> tuple[WCS, float]: """Calculate the helio WCS from a celestial WCS.""" is_3d = len(data_shape) == 3 @@ -80,13 +80,13 @@ def calculate_helio_wcs_from_celestial(wcs_celestial: WCS, return wcs_helio, p_angle -def get_sun_ra_dec(dt: datetime) -> (float, float): +def get_sun_ra_dec(dt: datetime) -> tuple[float, float]: """Get the position of the Sun in right ascension and declination.""" position = get_sun(Time(str(dt), scale="utc")) return position.ra.value, position.dec.value -def calculate_pc_matrix(crota: float, cdelt: (float, float)) -> np.ndarray: +def calculate_pc_matrix(crota: float, cdelt: tuple[float, float]) -> np.ndarray: """ Calculate a PC matrix given CROTA and CDELT. @@ -311,7 +311,7 @@ def calculate_celestial_wcs_from_helio(wcs_helio: WCS, date_obs: datetime, data_ return wcs_celestial -def load_trefoil_wcs() -> (astropy.wcs.WCS, (int, int)): +def load_trefoil_wcs() -> tuple[astropy.wcs.WCS, tuple[int, int]]: """Load Level 2 trefoil world coordinate system and shape.""" trefoil_wcs = WCS(os.path.join(_ROOT, "data", "trefoil_wcs.fits")) trefoil_wcs.wcs.ctype = "HPLN-ARC", "HPLT-ARC" # TODO: figure out why this is necessary, seems like a bug @@ -319,7 +319,7 @@ def load_trefoil_wcs() -> (astropy.wcs.WCS, (int, int)): return trefoil_wcs, trefoil_shape -def load_quickpunch_mosaic_wcs() -> (astropy.wcs.WCS, (int, int)): +def load_quickpunch_mosaic_wcs() -> tuple[astropy.wcs.WCS, tuple[int, int]]: """Load Level quickPUNCH mosaic world coordinate system and shape.""" quickpunch_mosaic_shape = (1024, 1024) quickpunch_mosaic_wcs = WCS(naxis=2) @@ -332,7 +332,7 @@ def load_quickpunch_mosaic_wcs() -> (astropy.wcs.WCS, (int, int)): return quickpunch_mosaic_wcs, quickpunch_mosaic_shape -def load_quickpunch_nfi_wcs() -> (astropy.wcs.WCS, (int, int)): +def load_quickpunch_nfi_wcs() -> tuple[astropy.wcs.WCS, tuple[int, int]]: """Load Level quickPUNCH NFI world coordinate system and shape.""" quickpunch_nfi_shape = (1024, 1024) quickpunch_nfi_wcs = WCS(naxis=2) From 5372fd8b68fc6c4fe6e2e5d27c1dad673551a269 Mon Sep 17 00:00:00 2001 From: Chris Lowder Date: Tue, 3 Dec 2024 12:47:58 -0700 Subject: [PATCH 2/3] adds wcs array shape to pre-built wcs functions --- punchbowl/data/wcs.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/punchbowl/data/wcs.py b/punchbowl/data/wcs.py index 8e8e698d..c05119b9 100644 --- a/punchbowl/data/wcs.py +++ b/punchbowl/data/wcs.py @@ -316,6 +316,9 @@ def load_trefoil_wcs() -> tuple[astropy.wcs.WCS, tuple[int, int]]: trefoil_wcs = WCS(os.path.join(_ROOT, "data", "trefoil_wcs.fits")) trefoil_wcs.wcs.ctype = "HPLN-ARC", "HPLT-ARC" # TODO: figure out why this is necessary, seems like a bug trefoil_shape = (4096, 4096) + + trefoil_wcs.array_shape = trefoil_shape + return trefoil_wcs, trefoil_shape @@ -329,6 +332,8 @@ def load_quickpunch_mosaic_wcs() -> tuple[astropy.wcs.WCS, tuple[int, int]]: quickpunch_mosaic_wcs.wcs.cdelt = 0.045, 0.045 quickpunch_mosaic_wcs.wcs.ctype = "HPLN-ARC", "HPLT-ARC" + quickpunch_mosaic_wcs.array_shape = quickpunch_mosaic_shape + return quickpunch_mosaic_wcs, quickpunch_mosaic_shape @@ -342,4 +347,6 @@ def load_quickpunch_nfi_wcs() -> tuple[astropy.wcs.WCS, tuple[int, int]]: quickpunch_nfi_wcs.wcs.cdelt = 30 / 3600 * 2, 30 / 3600 * 2 quickpunch_nfi_wcs.wcs.ctype = "HPLN-TAN", "HPLT-TAN" + quickpunch_nfi_wcs.array_shape = quickpunch_nfi_shape + return quickpunch_nfi_wcs, quickpunch_nfi_shape From ce8f44421fd374864c6ad9c84444f8df20c7827a Mon Sep 17 00:00:00 2001 From: Chris Lowder Date: Tue, 3 Dec 2024 13:59:03 -0700 Subject: [PATCH 3/3] satisfies ruff and adds wcs array shape --- punchbowl/data/tests/test_history.py | 1 - punchbowl/data/tests/test_io.py | 2 +- punchbowl/data/tests/test_meta.py | 32 +++---- punchbowl/data/tests/test_units.py | 2 + .../level1/tests/test_deficient_pixel.py | 9 +- punchbowl/level1/tests/test_sqrt.py | 1 + .../level2/tests/test_bright_structure.py | 86 +++++++++---------- punchbowl/level2/tests/test_merge.py | 2 + punchbowl/level2/tests/test_resample.py | 2 +- punchbowl/level3/tests/test_f_corona_model.py | 4 + punchbowl/level3/tests/test_stellar.py | 1 + punchbowl/level3/tests/test_velocity.py | 4 +- punchbowl/level3/velocity.py | 1 + scripts/generate_templates.py | 1 + 14 files changed, 75 insertions(+), 73 deletions(-) diff --git a/punchbowl/data/tests/test_history.py b/punchbowl/data/tests/test_history.py index f0eb19e9..6f0f655a 100644 --- a/punchbowl/data/tests/test_history.py +++ b/punchbowl/data/tests/test_history.py @@ -1,7 +1,6 @@ from datetime import datetime import pytest -from astropy.wcs import WCS from pytest import fixture from punchbowl.data.history import History, HistoryEntry diff --git a/punchbowl/data/tests/test_io.py b/punchbowl/data/tests/test_io.py index 7b0afcd6..e671baa6 100644 --- a/punchbowl/data/tests/test_io.py +++ b/punchbowl/data/tests/test_io.py @@ -204,7 +204,7 @@ def test_load_punchdata_with_history(tmpdir): os.remove(file_path) -def make_empty_distortion_model(num_bins: int, image: np.ndarray) -> (DistortionLookupTable, DistortionLookupTable): +def make_empty_distortion_model(num_bins: int, image: np.ndarray) -> tuple: """ Create an empty distortion table Parameters diff --git a/punchbowl/data/tests/test_meta.py b/punchbowl/data/tests/test_meta.py index a329c9a9..a965af20 100644 --- a/punchbowl/data/tests/test_meta.py +++ b/punchbowl/data/tests/test_meta.py @@ -1,20 +1,10 @@ import os from datetime import datetime -import astropy -import astropy.units as u -import numpy as np import pytest -from astropy.coordinates import GCRS, ICRS, EarthLocation, SkyCoord, get_sun from astropy.io import fits -from astropy.nddata import StdDevUncertainty -from astropy.time import Time -from astropy.wcs import WCS -from ndcube import NDCube -from pytest import fixture -from sunpy.coordinates import frames - -from punchbowl.data.history import History, HistoryEntry + +from punchbowl.data.history import HistoryEntry from punchbowl.data.meta import MetaField, NormalizedMetadata, load_spacecraft_def TESTDATA_DIR = os.path.dirname(__file__) @@ -29,19 +19,19 @@ def test_metafield_creation_keyword_too_long(): """ cannot create an invalid metafield""" with pytest.raises(ValueError): - mf = MetaField("TOO LONG KEYWORD", - "What's up?", 3, int, False, True, -99) + MetaField("TOO LONG KEYWORD", + "What's up?", 3, int, False, True, -99) def test_metafield_creation_kinds_do_not_match(): """the value, default, and kind must all agree""" with pytest.raises(TypeError): - mf = MetaField("HI", - "What's up?", 3, str, False, True, "hi there") + MetaField("HI", + "What's up?", 3, str, False, True, "hi there") with pytest.raises(TypeError): - mf = MetaField("TOO LONG KEYWORD", - "What's up?", "hi there", str, False, True, -99) + MetaField("TOO LONG KEYWORD", + "What's up?", "hi there", str, False, True, -99) def test_metafield_update(): @@ -50,7 +40,7 @@ def test_metafield_update(): assert mf.keyword == "HI" assert mf.comment == "What's up?" assert mf.value == 3 - assert mf._datatype == int + assert mf._datatype is int assert not mf.nullable assert mf._mutable assert mf.default == -99 @@ -68,7 +58,7 @@ def test_metafield_not_mutable(): assert mf.keyword == "HI" assert mf.comment == "What's up?" assert mf.value == 3 - assert mf._datatype == int + assert mf._datatype is int assert not mf.nullable assert not mf._mutable assert mf.default == -99 @@ -83,7 +73,7 @@ def test_metafield_wrong_kind_for_update(): assert mf.keyword == "HI" assert mf.comment == "What's up?" assert mf.value == 3 - assert mf._datatype == int + assert mf._datatype is int assert not mf.nullable assert mf._mutable assert mf.default == -99 diff --git a/punchbowl/data/tests/test_units.py b/punchbowl/data/tests/test_units.py index ae3b582b..d1b990b0 100644 --- a/punchbowl/data/tests/test_units.py +++ b/punchbowl/data/tests/test_units.py @@ -17,6 +17,7 @@ def test_calculate_image_pixel_area_rotated_input(): wcs.wcs.crval = 0, 0 wcs.wcs.crpix = 256, 256 wcs.wcs.cdelt = 0.05, 0.05 + wcs.array_shape = shape area_maps.append(units.calculate_image_pixel_area(wcs, shape)) @@ -33,6 +34,7 @@ def test_calculate_image_pixel_area_output_shape(): wcs.wcs.crval = 0, 0 wcs.wcs.crpix = 512, 480 wcs.wcs.cdelt = 0.05, 0.05 + wcs.array_shape = shape area_map = units.calculate_image_pixel_area(wcs, shape) assert area_map.shape == shape diff --git a/punchbowl/level1/tests/test_deficient_pixel.py b/punchbowl/level1/tests/test_deficient_pixel.py index c3a34226..daaca542 100644 --- a/punchbowl/level1/tests/test_deficient_pixel.py +++ b/punchbowl/level1/tests/test_deficient_pixel.py @@ -5,10 +5,9 @@ from astropy.nddata import StdDevUncertainty from astropy.wcs import WCS from ndcube import NDCube -from prefect.logging import disable_run_logger from punchbowl.data import NormalizedMetadata -from punchbowl.level1.deficient_pixel import remove_deficient_pixels, remove_deficient_pixels_task +from punchbowl.level1.deficient_pixel import remove_deficient_pixels THIS_DIRECTORY = pathlib.Path(__file__).parent.resolve() @@ -34,6 +33,7 @@ def sample_bad_pixel_map(shape: tuple = (2048, 2048), n_bad_pixels: int = 20) -> wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=bad_pixel_map, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -56,6 +56,7 @@ def perfect_pixel_map(shape: tuple = (2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=bad_pixel_map, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -80,6 +81,7 @@ def one_bad_pixel_map(shape: tuple = (2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=bad_pixel_map, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -112,6 +114,7 @@ def nine_bad_pixel_map(shape: tuple = (2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=bad_pixel_map, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -134,6 +137,7 @@ def increasing_pixel_data(shape: tuple = (2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -154,6 +158,7 @@ def sample_punchdata(shape: tuple = (2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) diff --git a/punchbowl/level1/tests/test_sqrt.py b/punchbowl/level1/tests/test_sqrt.py index 4902f46c..d0024658 100644 --- a/punchbowl/level1/tests/test_sqrt.py +++ b/punchbowl/level1/tests/test_sqrt.py @@ -28,6 +28,7 @@ def sample_punchdata(): wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 0 wcs.wcs.cname = "HPC lon", "HPC lat" + wcs.array_shape = data.shape meta = NormalizedMetadata.load_template("PM1", "0") meta['DATE-OBS'] = str(datetime(2023, 1, 1, 0, 0, 1)) diff --git a/punchbowl/level2/tests/test_bright_structure.py b/punchbowl/level2/tests/test_bright_structure.py index 02313400..b0ff8d9c 100644 --- a/punchbowl/level2/tests/test_bright_structure.py +++ b/punchbowl/level2/tests/test_bright_structure.py @@ -1,18 +1,13 @@ -# Core Python imports -import pathlib - # Third party imports import numpy as np import pytest from astropy.nddata import StdDevUncertainty from astropy.wcs import WCS from ndcube import NDCube -from prefect.logging import disable_run_logger # punchbowl imports from punchbowl.data import NormalizedMetadata -from punchbowl.level1.deficient_pixel import remove_deficient_pixels_task -from punchbowl.level2.bright_structure import identify_bright_structures_task, run_zspike +from punchbowl.level2.bright_structure import run_zspike @pytest.fixture() @@ -37,6 +32,7 @@ def sample_bad_pixel_map(shape: tuple = (2048, 2048), n_bad_pixels: int = 20) -> wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape[-2:] meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=bad_pixel_map, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -56,6 +52,7 @@ def sample_punchdata(shape: tuple = (5, 2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape[-2:] meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -76,6 +73,7 @@ def even_sample_punchdata(shape: tuple = (6, 2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape[-2:] meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -96,6 +94,7 @@ def sample_zero_punchdata(shape: tuple = (5, 2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape[-2:] meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -121,6 +120,7 @@ def one_bright_point_sample_punchdata(shape: tuple = (7, 2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape[-2:] meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -146,6 +146,7 @@ def two_bright_point_sample_punchdata(shape: tuple = (7, 2048, 2048)) -> NDCube: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape[-2:] meta = NormalizedMetadata({"TYPECODE": "CL", "LEVEL": "1", "OBSRVTRY": "0", "DATE-OBS": "2008-01-03 08:57:00"}) return NDCube(data=data, uncertainty=uncertainty, wcs=wcs, meta=meta) @@ -169,12 +170,7 @@ def test_zero_threshold(sample_punchdata: NDCube): diff_method='abs') assert np.sum(result) == 0 - result2 = run_zspike(sample_punchdata.data, - sample_punchdata.uncertainty.array, - threshold=threshold, - diff_method='sigma') assert np.sum(result) == 0 - #assert np.array_equal(result_abs, result_sigma) def test_diff_methods(sample_zero_punchdata: NDCube): @@ -244,10 +240,10 @@ def test_single_bright_point_2(one_bright_point_sample_punchdata: NDCube): veto_limit=1) # test cell of interest is set to 'True' - assert result_2[x_interest, y_interest] == True + assert bool(result_2[x_interest, y_interest]) is True # test other cells are set to 'False' - assert result_2[x_interest+1, y_interest] == False + assert bool(result_2[x_interest+1, y_interest]) is False def test_veto(two_bright_point_sample_punchdata: NDCube): @@ -263,7 +259,7 @@ def test_veto(two_bright_point_sample_punchdata: NDCube): veto_limit=1) # test cell of interest is set to 'False' with veto - assert result_2[x_interest, y_interest] == True + assert bool(result_2[x_interest, y_interest]) is True two_bright_point_sample_punchdata.uncertainty.array[:, :, :] = 0 @@ -277,7 +273,7 @@ def test_veto(two_bright_point_sample_punchdata: NDCube): index_of_interest=-1) # test cell of interest is set to 'False' with veto - assert result_3[x_interest, y_interest] == False + assert bool(result_3[x_interest, y_interest]) is False def test_uncertainty(sample_punchdata: NDCube): @@ -299,7 +295,7 @@ def test_uncertainty(sample_punchdata: NDCube): index_of_interest=index_of_interest) # test cell of interest is set to 'True' - assert result_0[y_test_px, x_test_px] == False + assert bool(result_0[y_test_px, x_test_px]) is False # set pixel of interest to high value sample_punchdata.data[index_of_interest, y_test_px, x_test_px] = 1000 @@ -313,7 +309,7 @@ def test_uncertainty(sample_punchdata: NDCube): index_of_interest=index_of_interest) # test cell of interest is set to 'True' - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # make bad pixels adjacent to cell of interest also high # set pixel of interest to high value @@ -328,7 +324,7 @@ def test_uncertainty(sample_punchdata: NDCube): index_of_interest=index_of_interest) # test cell of interest is set to 'False' - assert result_2[y_test_px, x_test_px] == False + assert bool(result_2[y_test_px, x_test_px]) is False # set surrounding values to uncertain @@ -341,7 +337,7 @@ def test_uncertainty(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) # test cell of interest is set to 'True' due to uncertainty flag set on the surrounding pixels - assert result_3[y_test_px, x_test_px] == True + assert bool(result_3[y_test_px, x_test_px]) is True @@ -365,7 +361,7 @@ def test_threshold_abs(sample_punchdata: NDCube): index_of_interest=index_of_interest) - assert result_0[y_test_px, x_test_px] == False + assert bool(result_0[y_test_px, x_test_px]) is False # set pixel of interest to high value sample_punchdata.data[index_of_interest, y_test_px, x_test_px]=100 @@ -378,7 +374,7 @@ def test_threshold_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # make bad pixel threshold high @@ -391,7 +387,7 @@ def test_threshold_abs(sample_punchdata: NDCube): index_of_interest=index_of_interest) # test cell of interest is set to 'False' - assert result_2[y_test_px, x_test_px] == False + assert bool(result_2[y_test_px, x_test_px]) is False def test_threshold_sigma(sample_punchdata: NDCube): @@ -414,7 +410,7 @@ def test_threshold_sigma(sample_punchdata: NDCube): index_of_interest=index_of_interest) - assert result_0[y_test_px, x_test_px] == False + assert bool(result_0[y_test_px, x_test_px]) is False # set pixel of interest to high value sample_punchdata.data[index_of_interest, y_test_px, x_test_px]=100 @@ -428,7 +424,7 @@ def test_threshold_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # make bad pixel threshold high @@ -441,7 +437,7 @@ def test_threshold_sigma(sample_punchdata: NDCube): index_of_interest=index_of_interest) # test cell of interest is set to 'False' - assert result_2[y_test_px, x_test_px] == False + assert bool(result_2[y_test_px, x_test_px]) is False @@ -465,7 +461,7 @@ def test_required_yes_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result[y_test_px, x_test_px] == False + assert bool(result[y_test_px, x_test_px]) is False #sample_punchdata.data[0:3, y_test_px, x_test_px]=100 sample_punchdata.data[index_of_interest, y_test_px, x_test_px]=100 @@ -480,7 +476,7 @@ def test_required_yes_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # change the number adjacent elements that have high values, this # reduces the number of available yes voters, making the cell of @@ -498,7 +494,7 @@ def test_required_yes_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_2[y_test_px, x_test_px] == True + assert bool(result_2[y_test_px, x_test_px]) is True # change the number adjacent elements that have high values sample_punchdata.data[0:1, y_test_px, x_test_px]=100 @@ -514,7 +510,7 @@ def test_required_yes_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_3[y_test_px, x_test_px] == False + assert bool(result_3[y_test_px, x_test_px]) is False def test_required_yes_sigma(sample_punchdata: NDCube): @@ -535,7 +531,7 @@ def test_required_yes_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result[y_test_px, x_test_px] == False + assert bool(result[y_test_px, x_test_px]) is False #sample_punchdata.data[0:3, y_test_px, x_test_px]=100 sample_punchdata.data[index_of_interest, y_test_px, x_test_px]=10 @@ -550,7 +546,7 @@ def test_required_yes_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # change the number adjacent elements that have high values, this # reduces the number of available yes voters, making the cell of @@ -568,7 +564,7 @@ def test_required_yes_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_2[y_test_px, x_test_px] == True + assert bool(result_2[y_test_px, x_test_px]) is True # change the number adjacent elements that have high values sample_punchdata.data[0:1, y_test_px, x_test_px]=10 @@ -584,7 +580,7 @@ def test_required_yes_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_3[y_test_px, x_test_px] == False + assert bool(result_3[y_test_px, x_test_px]) is False def test_dilation_abs(sample_punchdata: NDCube): # create an uncertainty and data array of 0's @@ -604,7 +600,7 @@ def test_dilation_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result[y_test_px, x_test_px] == False + assert bool(result[y_test_px, x_test_px]) is False #sample_punchdata.data[0:3, y_test_px, x_test_px]=100 sample_punchdata.data[index_of_interest, y_test_px, x_test_px]=10 @@ -619,7 +615,7 @@ def test_dilation_abs(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # change the number adjacent elements that have high values, this # reduces the number of available yes voters, making the cell of @@ -638,9 +634,9 @@ def test_dilation_abs(sample_punchdata: NDCube): index_of_interest=index_of_interest) - assert result_2[y_test_px, x_test_px] == True + assert bool(result_2[y_test_px, x_test_px]) is True # with no dilation a an adjacent pixel is false - assert result_2[y_test_px+1, x_test_px] == False + assert bool(result_2[y_test_px+1, x_test_px]) is False result_3 = run_zspike(sample_punchdata.data, sample_punchdata.uncertainty.array, @@ -652,9 +648,9 @@ def test_dilation_abs(sample_punchdata: NDCube): dilation=10) - assert result_3[y_test_px, x_test_px] == True + assert bool(result_3[y_test_px, x_test_px]) is True # with dilation an adjacent pixel is true - assert result_3[y_test_px+1, x_test_px] == True + assert bool(result_3[y_test_px+1, x_test_px]) is True @@ -682,7 +678,7 @@ def test_dilation_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result[y_test_px, x_test_px] == False + assert bool(result[y_test_px, x_test_px]) is False #sample_punchdata.data[0:3, y_test_px, x_test_px]=100 sample_punchdata.data[index_of_interest, y_test_px, x_test_px]=10 @@ -697,7 +693,7 @@ def test_dilation_sigma(sample_punchdata: NDCube): veto_limit=1, index_of_interest=index_of_interest) - assert result_1[y_test_px, x_test_px] == True + assert bool(result_1[y_test_px, x_test_px]) is True # change the number adjacent elements that have high values, this # reduces the number of available yes voters, making the cell of @@ -716,9 +712,9 @@ def test_dilation_sigma(sample_punchdata: NDCube): index_of_interest=index_of_interest) - assert result_2[y_test_px, x_test_px] == True + assert bool(result_2[y_test_px, x_test_px]) is True # with no dilation a an adjacent pixel is false - assert result_2[y_test_px+1, x_test_px] == False + assert bool(result_2[y_test_px+1, x_test_px]) is False result_3 = run_zspike(sample_punchdata.data, sample_punchdata.uncertainty.array, @@ -730,9 +726,9 @@ def test_dilation_sigma(sample_punchdata: NDCube): dilation=10) - assert result_3[y_test_px, x_test_px] == True + assert bool(result_3[y_test_px, x_test_px]) is True # with dilation an adjacent pixel is true - assert result_3[y_test_px+1, x_test_px] == True + assert bool(result_3[y_test_px+1, x_test_px]) is True diff --git a/punchbowl/level2/tests/test_merge.py b/punchbowl/level2/tests/test_merge.py index 85ff11ec..d46b1294 100644 --- a/punchbowl/level2/tests/test_merge.py +++ b/punchbowl/level2/tests/test_merge.py @@ -33,6 +33,8 @@ def test_merge_many_task(sample_data_list): """ trefoil_wcs = WCS("level2/data/trefoil_hdr.fits") trefoil_wcs.wcs.ctype = "HPLN-ARC", "HPLT-ARC" + trefoil_shape = (4096, 4096) + trefoil_wcs.array_shape = trefoil_shape output_punchdata = merge_many_polarized_task.fn(sample_data_list, trefoil_wcs) assert isinstance(output_punchdata, NDCube) diff --git a/punchbowl/level2/tests/test_resample.py b/punchbowl/level2/tests/test_resample.py index 70bbf742..55430735 100644 --- a/punchbowl/level2/tests/test_resample.py +++ b/punchbowl/level2/tests/test_resample.py @@ -21,8 +21,8 @@ def sample_punchdata_list(sample_ndcube): def test_reproject_many_flow(sample_punchdata_list): trefoil_wcs = WCS("level2/data/trefoil_hdr.fits") trefoil_wcs.wcs.ctype = "HPLN-ARC", "HPLT-ARC" # TODO: figure out why this is necessary, seems like a bug - trefoil_shape = (128, 128) + trefoil_wcs.array_shape = trefoil_shape with prefect_test_harness(): output = reproject_many_flow(sample_punchdata_list, trefoil_wcs, trefoil_shape) for result in output: diff --git a/punchbowl/level3/tests/test_f_corona_model.py b/punchbowl/level3/tests/test_f_corona_model.py index 1553a290..2c12540d 100644 --- a/punchbowl/level3/tests/test_f_corona_model.py +++ b/punchbowl/level3/tests/test_f_corona_model.py @@ -51,6 +51,7 @@ def one_data(shape: tuple = (2048, 2048)) -> np.ndarray: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"SECTION": { "TYPECODE": MetaField("TYPECODE", "", "CL", str, True, True, ""), @@ -74,6 +75,7 @@ def observation_data(shape: tuple = (2048, 2048)) -> np.ndarray: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"SECTION": { "TYPECODE": MetaField("TYPECODE", "", "CL", str, True, True, ""), @@ -98,6 +100,7 @@ def zero_data(shape: tuple = (2048, 2048)) -> np.ndarray: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 1024, 1024 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"SECTION": { "TYPECODE": MetaField("TYPECODE", "", "CL", str, True, True, ""), @@ -122,6 +125,7 @@ def incorrect_shape_data(shape: tuple = (512, 512)) -> np.ndarray: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 256, 256 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape meta = NormalizedMetadata({"SECTION": { "TYPECODE": MetaField("TYPECODE", "", "CL", str, True, True, ""), diff --git a/punchbowl/level3/tests/test_stellar.py b/punchbowl/level3/tests/test_stellar.py index 7fa9bea8..6361933d 100644 --- a/punchbowl/level3/tests/test_stellar.py +++ b/punchbowl/level3/tests/test_stellar.py @@ -41,6 +41,7 @@ def zero_starfield_data(shape: tuple = (256, 256)) -> Starfield: wcs.wcs.cdelt = 0.02, 0.02 wcs.wcs.crpix = 128, 128 wcs.wcs.crval = 0, 24.75 + wcs.array_shape = shape return Starfield(starfield=starfield, wcs=wcs) diff --git a/punchbowl/level3/tests/test_velocity.py b/punchbowl/level3/tests/test_velocity.py index 7f25e2bd..e86dcd63 100644 --- a/punchbowl/level3/tests/test_velocity.py +++ b/punchbowl/level3/tests/test_velocity.py @@ -3,7 +3,6 @@ import numpy as np import pytest -from astropy.io import fits from astropy.nddata import StdDevUncertainty from astropy.wcs import WCS from ndcube import NDCube @@ -44,6 +43,7 @@ def synthetic_data(tmpdir): wcs.wcs.cdelt = (0.02, 0.02) wcs.wcs.crpix = (64, 64) wcs.wcs.crval = (0, 24.75) + wcs.array_shape = data.shape # Define metadata for the NDCube meta = NormalizedMetadata.load_template('PTM', '3') @@ -118,7 +118,7 @@ def test_with_bad_data(tmpdir): cube = NDCube(data=data, wcs=wcs, meta=meta, uncertainty=uncertainty) # Write NDCube to a compressed FITS file using your custom function - file_path = os.path.join(str(tmpdir), f"bad_file.fits") + file_path = os.path.join(str(tmpdir), "bad_file.fits") write_ndcube_to_fits(cube, file_path) with pytest.raises(ValueError): diff --git a/punchbowl/level3/velocity.py b/punchbowl/level3/velocity.py index b0883067..408299fd 100644 --- a/punchbowl/level3/velocity.py +++ b/punchbowl/level3/velocity.py @@ -546,6 +546,7 @@ def track_velocity(files: list[str], wcs.wcs.crpix = 0, 0 wcs.wcs.crval = 0, 0 wcs.wcs.cname = "solar radii", "azimuth" + wcs.array_shape = avg_speeds.shape return NDCube(data = avg_speeds, uncertainty=StdDevUncertainty(sigmas), diff --git a/scripts/generate_templates.py b/scripts/generate_templates.py index 5ade81e5..df3693a2 100644 --- a/scripts/generate_templates.py +++ b/scripts/generate_templates.py @@ -25,6 +25,7 @@ def sample_ndcube(shape, code="PM1", level="0"): if level in ["2", "3"] and code[0] == "P": wcs = add_stokes_axis_to_wcs(wcs, 2) + wcs.array_shape = shape meta = NormalizedMetadata.load_template(code, level) meta['DATE-OBS'] = str(datetime(2024, 1, 1, 0, 0, 0))