Skip to content

Commit

Permalink
Merge branch 'dev' of github.com:IMAP-Science-Operations-Center/imap_…
Browse files Browse the repository at this point in the history
…processing into codice-lo-sw-angular-validation
  • Loading branch information
bourque committed Jan 8, 2025
2 parents 0365396 + a5dc94a commit 5ec109f
Show file tree
Hide file tree
Showing 10 changed files with 87 additions and 82 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- uses: Gr1N/setup-poetry@v8
with:
poetry-version: "1.8.0"


- name: Install dependencies and app
Expand Down
84 changes: 54 additions & 30 deletions imap_processing/hi/l1c/hi_l1c.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.cdf.utils import parse_filename_like
from imap_processing.hi.utils import full_dataarray
from imap_processing.hi.utils import create_dataset_variables, full_dataarray

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -53,7 +53,7 @@ def hi_l1c(dependencies: list, data_version: str) -> xr.Dataset:

def generate_pset_dataset(de_dataset: xr.Dataset) -> xr.Dataset:
"""
Will process IMAP-Hi l1b product into a l1c pset xarray dataset.
Generate IMAP-Hi l1c pset xarray dataset from l1b product.
Parameters
----------
Expand All @@ -66,19 +66,38 @@ def generate_pset_dataset(de_dataset: xr.Dataset) -> xr.Dataset:
Ready to be written to CDF.
"""
logical_source_parts = parse_filename_like(de_dataset.attrs["Logical_source"])
n_esa_step = de_dataset.esa_step.data.size
pset_dataset = allocate_pset_dataset(n_esa_step, logical_source_parts["sensor"])
# TODO: Stored epoch value needs to be consistent across ENA instruments.
# SPDF says this should be the center of the time bin, but instrument
# teams may disagree.
pset_dataset.epoch.data[0] = de_dataset.epoch.data[0]
n_esa_step = len(np.unique(de_dataset.esa_step.data))
pset_dataset = empty_pset_dataset(n_esa_step, logical_source_parts["sensor"])
# For ISTP, epoch should be the center of the time bin.
pset_dataset.epoch.data[0] = np.mean(de_dataset.epoch.data[[0, -1]]).astype(
np.int64
)

pset_dataset.update(pset_geometry())

# TODO: The following section will go away as PSET algorithms to populate
# these variables are written.
attr_mgr = ImapCdfAttributes()
attr_mgr.add_instrument_global_attrs("hi")
attr_mgr.add_instrument_variable_attrs(instrument="hi", level=None)
for var_name in [
"counts",
"exposure_times",
"background_rates",
"background_rates_uncertainty",
]:
pset_dataset[var_name] = full_dataarray(
var_name,
attr_mgr.get_variable_attributes(f"hi_pset_{var_name}", check_schema=False),
pset_dataset.coords,
)

return pset_dataset


def allocate_pset_dataset(n_esa_steps: int, sensor_str: str) -> xr.Dataset:
def empty_pset_dataset(n_esa_steps: int, sensor_str: str) -> xr.Dataset:
"""
Allocate an empty xarray.Dataset.
Allocate an empty xarray.Dataset with appropriate pset coordinates.
Parameters
----------
Expand Down Expand Up @@ -141,27 +160,8 @@ def allocate_pset_dataset(n_esa_steps: int, sensor_str: str) -> xr.Dataset:
attrs=attrs,
)

# Allocate the variables
# Allocate the coordinate label variables
data_vars = dict()
# despun_z is a 1x3 unit vector that does not have a DEPEND_1.
# Define this dict to override the shape produced in full_dataarray
var_shapes = {"despun_z": (1, 3)}
for var_name in [
"despun_z",
"hae_latitude",
"hae_longitude",
"counts",
"exposure_times",
"background_rates",
"background_rates_uncertainty",
]:
data_vars[var_name] = full_dataarray(
var_name,
attr_mgr.get_variable_attributes(f"hi_pset_{var_name}", check_schema=False),
coords,
shape=var_shapes.get(var_name, None),
)

# Generate label variables
data_vars["esa_energy_step_label"] = xr.DataArray(
coords["esa_energy_step"].values.astype(str),
Expand Down Expand Up @@ -202,3 +202,27 @@ def allocate_pset_dataset(n_esa_steps: int, sensor_str: str) -> xr.Dataset:
)
dataset = xr.Dataset(data_vars=data_vars, coords=coords, attrs=pset_global_attrs)
return dataset


def pset_geometry() -> dict[str, xr.DataArray]:
"""
Calculate PSET geometry variables.
Returns
-------
geometry_vars : dict[str, xarray.DataArray]
Keys are variable names and values are data arrays.
"""
geometry_vars = create_dataset_variables(
["despun_z"], (1, 3), att_manager_lookup_str="hi_pset_{0}"
)
# TODO: Calculate despun_z
geometry_vars.update(
create_dataset_variables(
["hae_latitude", "hae_longitude"],
(1, 3600),
att_manager_lookup_str="hi_pset_{0}",
)
)
# TODO: Calculate HAE Lat/Lon
return geometry_vars
4 changes: 2 additions & 2 deletions imap_processing/tests/hi/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ def hi_l0_test_data_path(hi_test_data_path):


@pytest.fixture(scope="session")
def hi_l1a_test_data_path(hi_test_data_path):
return hi_test_data_path / "l1a"
def hi_l1_test_data_path(hi_test_data_path):
return hi_test_data_path / "l1"


def create_metaevent(esa_step, met_subseconds, met_seconds):
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
4 changes: 2 additions & 2 deletions imap_processing/tests/hi/test_hi_l1b.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ def test_hi_l1b_hk(hi_l0_test_data_path):
@pytest.mark.external_kernel()
@pytest.mark.use_test_metakernel("imap_ena_sim_metakernel.template")
def test_hi_l1b_de(
hi_l1a_test_data_path, spice_test_data_path, use_fake_spin_data_for_time
hi_l1_test_data_path, spice_test_data_path, use_fake_spin_data_for_time
):
"""Test coverage for imap_processing.hi.hi_l1b.hi_l1b() with
direct events L1A as input"""
# Start MET time of spin for simulated input data is 482372988
use_fake_spin_data_for_time(482372988)
l1a_test_file_path = (
hi_l1a_test_data_path / "imap_hi_l1a_45sensor-de_20250415_v000.cdf"
hi_l1_test_data_path / "imap_hi_l1a_45sensor-de_20250415_v999.cdf"
)
# Process using test data
data_version = "001"
Expand Down
62 changes: 26 additions & 36 deletions imap_processing/tests/hi/test_hi_l1c.py
Original file line number Diff line number Diff line change
@@ -1,56 +1,46 @@
"""Test coverage for imap_processing.hi.l1c.hi_l1c.py"""

import numpy as np
import pytest

from imap_processing.cdf.utils import write_cdf
from imap_processing.hi.l1a.hi_l1a import hi_l1a
from imap_processing.hi.l1b.hi_l1b import hi_l1b
from imap_processing.cdf.utils import load_cdf, write_cdf
from imap_processing.hi.l1c import hi_l1c
from imap_processing.hi.utils import HIAPID


@pytest.mark.skip(
reason="See TODO in test comments. Need to convert this test"
"to use a test L1B file rather than running L1B on fake"
"data."
)
def test_generate_pset_dataset(create_de_data):
def test_generate_pset_dataset(hi_l1_test_data_path):
"""Test coverage for generate_pset_dataset function"""
# TODO: once things are more stable, check in an L1B DE file as test data?
# For now, test using false de data run through l1a and l1b processing
bin_data_path = create_de_data(HIAPID.H45_SCI_DE.value)
processed_data = hi_l1a(bin_data_path, "002")
l1b_dataset = hi_l1b(processed_data[0], "002")

l1b_de_path = hi_l1_test_data_path / "imap_hi_l1b_45sensor-de_20250415_v999.cdf"
l1b_dataset = load_cdf(l1b_de_path)
l1c_dataset = hi_l1c.generate_pset_dataset(l1b_dataset)

assert l1c_dataset.epoch.data[0] == l1b_dataset.epoch.data[0]
assert l1c_dataset.epoch.data[0] == np.mean(l1b_dataset.epoch.data[[0, -1]]).astype(
np.int64
)

np.testing.assert_array_equal(l1c_dataset.despun_z.data.shape, (1, 3))
np.testing.assert_array_equal(l1c_dataset.hae_latitude.data.shape, (1, 3600))
np.testing.assert_array_equal(l1c_dataset.hae_longitude.data.shape, (1, 3600))
for var in [
"counts",
"exposure_times",
"background_rates",
"background_rates_uncertainty",
]:
np.testing.assert_array_equal(l1c_dataset[var].data.shape, (1, 9, 5, 3600))

# Test ISTP compliance by writing CDF
l1c_dataset.attrs["Data_version"] = 1
write_cdf(l1c_dataset)

def test_allocate_pset_dataset():
"""Test coverage for allocate_pset_dataset function"""
n_esa_steps = 10

def test_empty_pset_dataset():
"""Test coverage for empty_pset_dataset function"""
n_esa_steps = 9
n_calibration_prods = 5
sensor_str = HIAPID.H90_SCI_DE.sensor
dataset = hi_l1c.allocate_pset_dataset(n_esa_steps, sensor_str)
dataset = hi_l1c.empty_pset_dataset(n_esa_steps, sensor_str)

assert dataset.epoch.size == 1
assert dataset.spin_angle_bin.size == 3600
assert dataset.esa_energy_step.size == n_esa_steps
assert dataset.calibration_prod.size == n_calibration_prods
np.testing.assert_array_equal(dataset.despun_z.data.shape, (1, 3))
np.testing.assert_array_equal(dataset.hae_latitude.data.shape, (1, 3600))
np.testing.assert_array_equal(dataset.hae_longitude.data.shape, (1, 3600))
for var in [
"counts",
"exposure_times",
"background_rates",
"background_rates_uncertainty",
]:
np.testing.assert_array_equal(
dataset[var].data.shape, (1, n_esa_steps, n_calibration_prods, 3600)
)
# Verify resulting CDF is ISTP compliant by writing to disk
dataset.attrs["Data_version"] = 1
write_cdf(dataset)
11 changes: 0 additions & 11 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[build-system]
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
requires = ["poetry-core>=1.0.0,<2.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
build-backend = "poetry_dynamic_versioning.backend"

[tool.poetry]
Expand Down

0 comments on commit 5ec109f

Please sign in to comment.