From dcb80338c5794490a99db58bbcdc7c25a9dd0439 Mon Sep 17 00:00:00 2001 From: Paul Romano Date: Tue, 11 Jun 2024 21:56:31 -0500 Subject: [PATCH] Eliminate deprecation warnings from scipy and pandas (#2951) --- openmc/data/photon.py | 12 +++++++----- openmc/mgxs_library.py | 18 ++++++++++++------ tests/unit_tests/test_data_photon.py | 7 ++++--- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/openmc/data/photon.py b/openmc/data/photon.py index c9d51f06238..0d0419f0724 100644 --- a/openmc/data/photon.py +++ b/openmc/data/photon.py @@ -124,7 +124,7 @@ class AtomicRelaxation(EqualityMixin): Dictionary indicating the number of electrons in a subshell when neutral (values) for given subshells (keys). The subshells should be given as strings, e.g., 'K', 'L1', 'L2', etc. - transitions : pandas.DataFrame + transitions : dict of str to pandas.DataFrame Dictionary indicating allowed transitions and their probabilities (values) for given subshells (keys). The subshells should be given as strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as @@ -363,8 +363,9 @@ def from_hdf5(cls, group): df = pd.DataFrame(sub_group['transitions'][()], columns=columns) # Replace float indexes back to subshell strings - df[columns[:2]] = df[columns[:2]].replace( - np.arange(float(len(_SUBSHELLS))), _SUBSHELLS) + with pd.option_context('future.no_silent_downcasting', True): + df[columns[:2]] = df[columns[:2]].replace( + np.arange(float(len(_SUBSHELLS))), _SUBSHELLS) transitions[shell] = df return cls(binding_energy, num_electrons, transitions) @@ -387,8 +388,9 @@ def to_hdf5(self, group, shell): # Write transition data with replacements if shell in self.transitions: - df = self.transitions[shell].replace( - _SUBSHELLS, range(len(_SUBSHELLS))) + with pd.option_context('future.no_silent_downcasting', True): + df = self.transitions[shell].replace( + _SUBSHELLS, range(len(_SUBSHELLS))) group.create_dataset('transitions', data=df.values.astype(float)) diff --git a/openmc/mgxs_library.py b/openmc/mgxs_library.py index 642da83d170..d1b96289081 100644 --- a/openmc/mgxs_library.py +++ b/openmc/mgxs_library.py @@ -3,7 +3,7 @@ import h5py import numpy as np -from scipy.integrate import simps +import scipy.integrate from scipy.interpolate import interp1d from scipy.special import eval_legendre @@ -1823,6 +1823,12 @@ def convert_scatter_format(self, target_format, target_order=None): # Reset and re-generate XSdata.xs_shapes with the new scattering format xsdata._xs_shapes = None + # scipy 1.11+ prefers 'simpson', whereas older versions use 'simps' + if hasattr(scipy.integrate, 'simpson'): + integrate = scipy.integrate.simpson + else: + integrate = scipy.integrate.simps + for i, temp in enumerate(xsdata.temperatures): orig_data = self._scatter_matrix[i] new_shape = orig_data.shape[:-1] + (xsdata.num_orders,) @@ -1860,7 +1866,7 @@ def convert_scatter_format(self, target_format, target_order=None): table_fine[..., imu] += ((l + 0.5) * eval_legendre(l, mu_fine[imu]) * orig_data[..., l]) - new_data[..., h_bin] = simps(table_fine, mu_fine) + new_data[..., h_bin] = integrate(table_fine, mu_fine) elif self.scatter_format == SCATTER_TABULAR: # Calculate the mu points of the current data @@ -1874,7 +1880,7 @@ def convert_scatter_format(self, target_format, target_order=None): for l in range(xsdata.num_orders): y = (interp1d(mu_self, orig_data)(mu_fine) * eval_legendre(l, mu_fine)) - new_data[..., l] = simps(y, mu_fine) + new_data[..., l] = integrate(y, mu_fine) elif target_format == SCATTER_TABULAR: # Simply use an interpolating function to get the new data @@ -1893,7 +1899,7 @@ def convert_scatter_format(self, target_format, target_order=None): interp = interp1d(mu_self, orig_data) for h_bin in range(xsdata.num_orders): mu_fine = np.linspace(mu[h_bin], mu[h_bin + 1], _NMU) - new_data[..., h_bin] = simps(interp(mu_fine), mu_fine) + new_data[..., h_bin] = integrate(interp(mu_fine), mu_fine) elif self.scatter_format == SCATTER_HISTOGRAM: # The histogram format does not have enough information to @@ -1919,7 +1925,7 @@ def convert_scatter_format(self, target_format, target_order=None): mu_fine = np.linspace(-1, 1, _NMU) for l in range(xsdata.num_orders): y = interp(mu_fine) * norm * eval_legendre(l, mu_fine) - new_data[..., l] = simps(y, mu_fine) + new_data[..., l] = integrate(y, mu_fine) elif target_format == SCATTER_TABULAR: # Simply use an interpolating function to get the new data @@ -1938,7 +1944,7 @@ def convert_scatter_format(self, target_format, target_order=None): for h_bin in range(xsdata.num_orders): mu_fine = np.linspace(mu[h_bin], mu[h_bin + 1], _NMU) new_data[..., h_bin] = \ - norm * simps(interp(mu_fine), mu_fine) + norm * integrate(interp(mu_fine), mu_fine) # Remove small values resulting from numerical precision issues new_data[..., np.abs(new_data) < 1.E-10] = 0. diff --git a/tests/unit_tests/test_data_photon.py b/tests/unit_tests/test_data_photon.py index f7274e9c195..171dd52f3a2 100644 --- a/tests/unit_tests/test_data_photon.py +++ b/tests/unit_tests/test_data_photon.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from collections.abc import Mapping, Callable import os from pathlib import Path @@ -123,6 +121,8 @@ def test_reactions(element, reaction): reactions[18] +# TODO: Remove skip when support is Python 3.9+ +@pytest.mark.skipif(not hasattr(pd.options, 'future'), reason='pandas version too old') @pytest.mark.parametrize('element', ['Pu'], indirect=True) def test_export_to_hdf5(tmpdir, element): filename = str(tmpdir.join('tmp.h5')) @@ -146,8 +146,9 @@ def test_export_to_hdf5(tmpdir, element): # Export to hdf5 again element2.export_to_hdf5(filename, 'w') + def test_photodat_only(run_in_tmpdir): endf_dir = Path(os.environ['OPENMC_ENDF_DATA']) photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf' data = openmc.data.IncidentPhoton.from_endf(photoatomic_file) - data.export_to_hdf5('tmp.h5', 'w') \ No newline at end of file + data.export_to_hdf5('tmp.h5', 'w')