Skip to content

Commit

Permalink
Merge branch 'openradar:main' into auto-read
Browse files Browse the repository at this point in the history
  • Loading branch information
syedhamidali authored Nov 4, 2024
2 parents 162828e + ccc1c9d commit 749fc5e
Show file tree
Hide file tree
Showing 22 changed files with 696 additions and 289 deletions.
34 changes: 26 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,30 @@ Xradar is considered stable for the implemented readers and writers which have b

## Features

* Import/Export CfRadial1 data
* Import/Export CfRadial2 data
* Import/Export ODIM_H5 data
* Import GAMIC HDF5
* Import Rainbow5
* Import Iris/Sigmet
* Import Furuno SCN/SCNX
* Georeferencing (AEQD)
### Import/Export Capabilities
* CfRadial1 and CfRadial2
* ODIM_H5 format

### Import-Only Capabilities
* DataMet
* Furuno
* Gamic
* HPL
* Iris
* MRR
* NexradLevel2
* Rainbow

### Data Transformation and Alignment
* Georeferencing (AEQD projection)
* Angle Reindexing
* Format Transformation support to CfRadial1 and CfRadial2

> ***Note**: All formats load into CfRadial2, so converting to CfRadial1 is seamless.*
## Contributors

Thanks to our many contributors!

[![Contributors](https://contrib.rocks/image?repo=openradar/xradar)](https://github.com/openradar/xradar/graphs/contributors)

5 changes: 5 additions & 0 deletions docs/history.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

This is the first version which uses datatree directly from xarray. Thus, xarray is pinned to version >= 2024.10.0.

* FIX: Convert volumes to_cfradial1 containing sweeps with different range and azimuth shapes, raise for different range bin sizes ({issue}`233`) by [@syedhamidali](https://github.com/syedhamidali), ({pull}`234`) by [@kmuehlbauer](https://github.com/kmuehlbauer).
* FIX: Correctly handle 8bit/16bit, big-endian/little-endian in nexrad reader (PHI and ZDR) ({issue}`230`) by [@syedhamidali](https://github.com/syedhamidali), ({pull}`231`) by [@kmuehlbauer](https://github.com/kmuehlbauer).
* ENH: Refactoring all xradar backends to use `from_dict` datatree constructor. Test for `_get_required_root`, `_get_subgroup`, and `_get_radar_calibration` were also added ({pull}`221`) by [@aladinor](https://github.com/aladinor)
* ENH: Added pytests to the missing functions in the `test_xradar` and `test_iris` in order to increase codecov in ({pull}`228`) by [@syedhamidali](https://github.com/syedhamidali).
* ENH: Updated Readme ({pull}`226`) by [@syedhamidali](https://github.com/syedhamidali).
* ADD: Added new module `transform` for transforming CF1 data to CF2 and vice versa ({pull}`224`) by [@syedhamidali](https://github.com/syedhamidali).
* Use DataTree from xarray and add xarray nightly run ({pull}`213`, {pull}`214`, {pull}`215`, {pull}`218`) by [@kmuehlbauer](https://github.com/kmuehlbauer).
* ADD: Added new accessor `map_over_sweeps` for volume operations on DataTrees and a matching decorator ({pull}`203`) by [@syedhamidali](https://github.com/syedhamidali).
Expand Down
111 changes: 55 additions & 56 deletions tests/io/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def test_open_odim_datatree_sweep(odim_file, sweep):
lswp = len([sweep])
else:
lswp = len(sweep)
assert len(dtree.groups[1:]) == lswp
assert len(dtree.match("sweep_*")) == lswp


def test_open_odim_datatree(odim_file):
Expand Down Expand Up @@ -164,7 +164,7 @@ def test_open_odim_datatree(odim_file):
200,
200,
]
for i, grp in enumerate(dtree.groups[1:]):
for i, grp in enumerate(dtree.match("sweep_*")):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
Expand All @@ -183,7 +183,7 @@ def test_open_odim_datatree(odim_file):
"range",
}
assert np.round(ds.elevation.mean().values.item(), 1) == elevations[i]
assert ds.sweep_number.values == int(grp[7:])
assert ds.sweep_number.values == int(grp[6:])


@pytest.mark.parametrize("first_dim", ["auto", "time"])
Expand Down Expand Up @@ -258,7 +258,7 @@ def test_open_gamic_datatree_sweep(gamic_file, sweep):
lswp = len([sweep])
else:
lswp = len(sweep)
assert len(dtree.groups[1:]) == lswp
assert len(dtree.match("sweep_*")) == lswp


def test_open_gamic_datatree(gamic_file):
Expand Down Expand Up @@ -319,7 +319,7 @@ def test_open_gamic_datatree(gamic_file):
1000,
1000,
]
for i, grp in enumerate(dtree.groups[1:]):
for i, grp in enumerate(dtree.match("sweep_*")):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
Expand Down Expand Up @@ -545,7 +545,7 @@ def test_open_rainbow_datatree(rainbow_file):
]
azimuths = [361] * 14
ranges = [400] * 14
for i, grp in enumerate(dtree.groups[1:]):
for i, grp in enumerate(dtree.match("sweep_*")):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
Expand Down Expand Up @@ -641,28 +641,27 @@ def test_open_iris_datatree(iris0_file):
azimuths = [360] * 10
ranges = [664] * 10
i = 0
for grp in dtree.groups:
if grp.startswith("/sweep_"):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
sweep_dataset_vars | non_standard_sweep_dataset_vars
) == set(moments)
assert set(ds.data_vars) & (required_sweep_metadata_vars) == set(
required_sweep_metadata_vars ^ {"azimuth", "elevation"}
)
assert set(ds.coords) == {
"azimuth",
"elevation",
"time",
"latitude",
"longitude",
"altitude",
"range",
}
assert np.round(ds.elevation.mean().values.item(), 1) == elevations[i]
assert ds.sweep_number == i
i += 1
for grp in dtree.match("sweep_*"):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
sweep_dataset_vars | non_standard_sweep_dataset_vars
) == set(moments)
assert set(ds.data_vars) & (required_sweep_metadata_vars) == set(
required_sweep_metadata_vars ^ {"azimuth", "elevation"}
)
assert set(ds.coords) == {
"azimuth",
"elevation",
"time",
"latitude",
"longitude",
"altitude",
"range",
}
assert np.round(ds.elevation.mean().values.item(), 1) == elevations[i]
assert ds.sweep_number == i
i += 1


def test_open_iris0_dataset(iris0_file):
Expand Down Expand Up @@ -879,36 +878,35 @@ def test_open_datamet_datatree(datamet_file):
azimuths = [360] * 11
ranges = [493, 493, 493, 664, 832, 832, 1000, 1000, 1332, 1332, 1332]
i = 0
for grp in dtree.groups:
if grp.startswith("/sweep_"):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
sweep_dataset_vars | non_standard_sweep_dataset_vars
) == set(moments)
assert set(ds.data_vars) & (required_sweep_metadata_vars) == set(
required_sweep_metadata_vars ^ {"azimuth", "elevation"}
)
assert set(ds.coords) == {
"azimuth",
"elevation",
"time",
"latitude",
"longitude",
"altitude",
"range",
}
assert np.round(ds.elevation.mean().values.item(), 1) == elevations[i]
assert ds.sweep_number == i
i += 1
for grp in dtree.match("sweep_*"):
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
assert set(ds.data_vars) & (
sweep_dataset_vars | non_standard_sweep_dataset_vars
) == set(moments)
assert set(ds.data_vars) & (required_sweep_metadata_vars) == set(
required_sweep_metadata_vars ^ {"azimuth", "elevation"}
)
assert set(ds.coords) == {
"azimuth",
"elevation",
"time",
"latitude",
"longitude",
"altitude",
"range",
}
assert np.round(ds.elevation.mean().values.item(), 1) == elevations[i]
assert ds.sweep_number == i
i += 1

# Try to reed single sweep
dtree = open_datamet_datatree(datamet_file, sweep=1)
assert len(dtree.groups) == 2
assert len(dtree.groups) == 5

# Try to read list of sweeps
dtree = open_datamet_datatree(datamet_file, sweep=[1, 2])
assert len(dtree.groups) == 3
assert len(dtree.groups) == 6


@pytest.mark.parametrize("first_dim", ["time", "auto"])
Expand Down Expand Up @@ -993,6 +991,7 @@ def test_cfradial_n_points_file(cfradial1n_file):
assert ds.sweep_mode == "azimuth_surveillance"


@pytest.mark.run(order=1)
@pytest.mark.parametrize("sweep", ["sweep_0", 0, [0, 1], ["sweep_0", "sweep_1"]])
@pytest.mark.parametrize(
"nexradlevel2_files", ["nexradlevel2_gzfile", "nexradlevel2_bzfile"], indirect=True
Expand All @@ -1003,7 +1002,7 @@ def test_open_nexradlevel2_datatree_sweep(nexradlevel2_files, sweep):
lswp = len([sweep])
else:
lswp = len(sweep)
assert len(dtree.groups[1:]) == lswp
assert len(dtree.match("sweep*")) == lswp


@pytest.mark.parametrize(
Expand Down Expand Up @@ -1080,8 +1079,8 @@ def test_open_nexradlevel2_datatree(nexradlevel2_files):
308,
232,
]
assert len(dtree.groups[1:]) == 11
for i, grp in enumerate(dtree.groups[1:]):
assert len(dtree.groups[1:]) == 14
for i, grp in enumerate(dtree.match("sweep_*")):
print(i)
ds = dtree[grp].ds
assert dict(ds.sizes) == {"azimuth": azimuths[i], "range": ranges[i]}
Expand All @@ -1101,4 +1100,4 @@ def test_open_nexradlevel2_datatree(nexradlevel2_files):
"range",
}
assert np.round(ds.elevation.mean().values.item(), 1) == elevations[i]
assert ds.sweep_number.values == int(grp[7:])
assert ds.sweep_number.values == int(grp[6:])
92 changes: 92 additions & 0 deletions tests/io/test_iris.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,3 +197,95 @@ def test_decode_string():
def test__get_fmt_string():
fmt = "<12sHHi12s12s12s6s12s12sHiiiiiiiiii2sH12sHB1shhiihh80s16s12s48s"
assert iris._get_fmt_string(iris.PRODUCT_CONFIGURATION) == fmt


def test_read_from_record(iris0_file, file_or_filelike):
"""Test reading a specified number of words from a record."""
with _get_data_file(iris0_file, file_or_filelike) as sigmetfile:
data = iris.IrisRecordFile(sigmetfile, loaddata=True)
data.init_record(0) # Start from the first record
record_data = data.read_from_record(10, dtype="int16")
assert len(record_data) == 10
assert isinstance(record_data, np.ndarray)


def test_decode_data(iris0_file, file_or_filelike):
"""Test decoding of data with provided product function."""

# Sample data to decode
data = np.array([0, 2, 3, 128, 255], dtype="int16")
# Sample product dict with decoding function and parameters
prod = {
"func": iris.decode_vel,
"dtype": "int16",
"fkw": {"scale": 0.5, "offset": -1},
}

# Open the file as per the testing framework
with _get_data_file(iris0_file, file_or_filelike) as sigmetfile:
iris_file = iris.IrisRawFile(sigmetfile, loaddata=False)

# Decode data using the provided product function
decoded_data = iris_file.decode_data(data, prod)

# Check that the decoded data is as expected
assert isinstance(decoded_data, np.ndarray), "Decoded data should be a numpy array"
assert decoded_data.dtype in [
np.float32,
np.float64,
], "Decoded data should have float32 or float64 type"

# Expected decoded values
expected_data = [-13.325, 13.325, 26.65, 1692.275, 3384.55]
np.testing.assert_array_almost_equal(decoded_data, expected_data, decimal=2)


def test_get_sweep(iris0_file, file_or_filelike):
"""Test retrieval of sweep data for specified moments."""

# Select the sweep number and moments to retrieve
sweep_number = 1
moments = ["DB_DBZ", "DB_VEL"]

# Open the file and load data
with _get_data_file(iris0_file, file_or_filelike) as sigmetfile:
iris_file = iris.IrisRawFile(sigmetfile, loaddata=True)

# Use get_sweep to retrieve data for the selected sweep and moments
iris_file.get_sweep(sweep_number, moments)
sweep_data = iris_file.data[sweep_number]["sweep_data"]

# Verify that sweep_data structure is populated with the selected moments
for moment in moments:
assert moment in sweep_data, f"{moment} should be in sweep_data"
moment_data = sweep_data[moment]
assert moment_data.shape == (360, 664), f"{moment} data shape mismatch"

# Check data types for moments, including masked arrays for velocity
if moment == "DB_VEL":
assert isinstance(
moment_data, np.ma.MaskedArray
), "DB_VEL should be a masked array"
else:
assert isinstance(
moment_data, np.ndarray
), f"{moment} should be a numpy array"

# Optional: check for expected placeholder/masked values
if moment == "DB_DBZ":
assert (
moment_data == -32
).sum() > 0, "DB_DBZ should contain placeholder values (-32)"
if moment == "DB_VEL":
assert moment_data.mask.sum() > 0, "DB_VEL should have masked values"


def test_array_from_file(iris0_file, file_or_filelike):
"""Test retrieving an array from a file."""
with _get_data_file(iris0_file, file_or_filelike) as sigmetfile:
data = iris.IrisRawFile(sigmetfile, loaddata=True)
array_data = data.read_from_file(5) # Adjusted to read_from_file

# Assertions for the read array
assert len(array_data) == 5
assert isinstance(array_data, np.ndarray)
Loading

0 comments on commit 749fc5e

Please sign in to comment.