diff --git a/docs/source/writing-interactive-tools.rst b/docs/source/writing-interactive-tools.rst index aca81d5b..15dd7179 100644 --- a/docs/source/writing-interactive-tools.rst +++ b/docs/source/writing-interactive-tools.rst @@ -182,7 +182,7 @@ A next step is to display the original data. self.add_controls() self.update_data() - def set_data(self, data: DataType): + def set_data(self, data: XrTypes): self.data = normalize_to_spectrum(data) To display the data, we added logic in the ``before_show`` `lifecycle diff --git a/src/arpes/_typing.py b/src/arpes/_typing.py index 8c111bdd..413fc3f1 100644 --- a/src/arpes/_typing.py +++ b/src/arpes/_typing.py @@ -31,7 +31,6 @@ import numpy as np from _typeshed import Incomplete from matplotlib.artist import Artist - from matplotlib.axes import Axes from matplotlib.backend_bases import Event from matplotlib.colors import Colormap, Normalize from matplotlib.figure import Figure @@ -66,13 +65,13 @@ "DataType", "NormalizableDataType", "XrTypes", - "SPECTROMETER", + "Spectrometer", "MOMENTUM", "EMISSION_ANGLE", "ANGLE", - "CONFIGTYPE", - "WORKSPACETYPE", - "ANALYZERINFO", + "WorkSpaceType", + "ConfigType", + "AnalyzerInfo", ] @@ -130,14 +129,14 @@ class ConfigSettings(TypedDict, total=False): use_tex: bool -class WORKSPACETYPE(TypedDict, total=False): +class WorkSpaceType(TypedDict, total=False): """TypedDict for arpes.config.CONFIG["WORKSPACE"].""" path: str | Path name: str -class CURRENTCONTEXT(TypedDict, total=False): +class CurrentContext(TypedDict, total=False): selected_components: list[float] # in widget.py, selected_components is [0, 1] is default selected_indices: list[int] sum_data: Incomplete @@ -155,11 +154,11 @@ class CURRENTCONTEXT(TypedDict, total=False): axis_Y_input: mpl.widgets.TextBox -class CONFIGTYPE(TypedDict, total=False): +class ConfigType(TypedDict, total=False): """TypedDict for arpes.config.CONFIG.""" - WORKSPACE: Required[WORKSPACETYPE] - CURRENT_CONTEXT: CURRENTCONTEXT | None # see widgets.py + WORKSPACE: Required[WorkSpaceType] + CURRENT_CONTEXT: CurrentContext | None # see widgets.py ENABLE_LOGGING: Required[bool] LOGGING_STARTED: Required[bool] LOGGING_FILE: Required[str | Path | None] @@ -168,7 +167,7 @@ class CONFIGTYPE(TypedDict, total=False): # # TypedDict for ARPES.attrs # -class COORDINATES(TypedDict, total=False): +class Coordinates(TypedDict, total=False): """TypedDict for attrs.""" x: NDArray[np.float_] | float @@ -182,7 +181,7 @@ class COORDINATES(TypedDict, total=False): phi: NDArray[np.float_] | float -class ANALYZERINFO(TypedDict, total=False): +class AnalyzerInfo(TypedDict, total=False): """TypedDict for attrs. see analyzer_info in xarray_extensions.py @@ -209,7 +208,7 @@ class ANALYZERINFO(TypedDict, total=False): is_slit_vertical: bool -class _PUMPINFO(TypedDict, total=False): +class _PumpInfo(TypedDict, total=False): """TypedDict for attrs. see pump_info in xarray_extensions.py @@ -230,7 +229,7 @@ class _PUMPINFO(TypedDict, total=False): pump_polarization_alpha: float -class _PROBEINFO(TypedDict, total=False): +class _ProbeInfo(TypedDict, total=False): """TypedDict for attrs. see probe_info in xarray_extensions.py @@ -251,7 +250,7 @@ class _PROBEINFO(TypedDict, total=False): probe_polarization_alpha: float -class _BEAMLINEINFO(TypedDict, total=False): +class _BeamLineInfo(TypedDict, total=False): """TypedDict for attrs. see beamline_info in xarray_extensions.py @@ -275,7 +274,7 @@ class BeamLineSettings(TypedDict, total=False): grating: str | None -class LIGHTSOURCEINFO(_PROBEINFO, _PUMPINFO, _BEAMLINEINFO, total=False): +class LightSourceInfo(_ProbeInfo, _PumpInfo, _BeamLineInfo, total=False): polarization: float | tuple[float, float] | str photon_flux: float photocurrent: float @@ -283,7 +282,7 @@ class LIGHTSOURCEINFO(_PROBEINFO, _PUMPINFO, _BEAMLINEINFO, total=False): probe_detail: Incomplete -class SAMPLEINFO(TypedDict, total=False): +class SampleInfo(TypedDict, total=False): """TypedDict for attrs. see sample_info in xarray_extensions @@ -295,7 +294,7 @@ class SAMPLEINFO(TypedDict, total=False): reflectivity: float -class SCANINFO(TypedDict, total=False): +class ScanInfo(TypedDict, total=False): time: str date: str spectrum_type: Literal["cut", "map", "hv_map", "ucut", "spem", "xps"] @@ -307,7 +306,7 @@ class SCANINFO(TypedDict, total=False): temperature_cryotip: float -class DAQINFO(TypedDict, total=False): +class DAQInfo(TypedDict, total=False): """TypedDict for attrs. see daq_info in xarray_extensions.py @@ -325,7 +324,7 @@ class DAQINFO(TypedDict, total=False): frame_duration: float -class SPECTROMETER(ANALYZERINFO, COORDINATES, DAQINFO, total=False): +class Spectrometer(AnalyzerInfo, Coordinates, DAQInfo, total=False): name: str type: str rad_per_pixel: float @@ -336,16 +335,16 @@ class SPECTROMETER(ANALYZERINFO, COORDINATES, DAQINFO, total=False): length: float -class EXPERIMENTINFO( - SCANINFO, - LIGHTSOURCEINFO, - ANALYZERINFO, +class ExperimentInfo( + ScanInfo, + LightSourceInfo, + AnalyzerInfo, total=False, ): pass -class ARPESAttrs(SPECTROMETER, LIGHTSOURCEINFO, SAMPLEINFO, total=False): +class ARPESAttrs(Spectrometer, LightSourceInfo, SampleInfo, total=False): angle_unit: Literal["Degrees", "Radians", "deg", "rad"] energy_notation: Literal[ "Binding", diff --git a/src/arpes/analysis/decomposition.py b/src/arpes/analysis/decomposition.py index 53153743..d0067420 100644 --- a/src/arpes/analysis/decomposition.py +++ b/src/arpes/analysis/decomposition.py @@ -9,7 +9,7 @@ from sklearn.decomposition import FactorAnalysis, FastICA from arpes.constants import TWO_DIMENSION -from arpes.provenance import PROVENANCE, provenance +from arpes.provenance import Provenance, provenance from arpes.utilities import normalize_to_spectrum if TYPE_CHECKING: @@ -166,7 +166,7 @@ def decomposition_along( if stacked: into = into.unstack("fit_axis") - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "sklearn decomposition", "by": "decomposition_along", "axes": axes, diff --git a/src/arpes/analysis/deconvolution.py b/src/arpes/analysis/deconvolution.py index 66438dd8..093d4615 100644 --- a/src/arpes/analysis/deconvolution.py +++ b/src/arpes/analysis/deconvolution.py @@ -56,7 +56,7 @@ def deconvolve_ice( The PSF is the impulse response of a focused optical imaging system. Args: - data (DataType): input data + data (xr.DataArray): input data psf(NDArray[np.float_): array as point spread function n_iterations: the number of convolutions to use for the fit deg: the degree of the fitting polynominial diff --git a/src/arpes/analysis/derivative.py b/src/arpes/analysis/derivative.py index 620a0770..fe591015 100644 --- a/src/arpes/analysis/derivative.py +++ b/src/arpes/analysis/derivative.py @@ -8,7 +8,7 @@ import numpy as np import xarray as xr -from arpes.provenance import PROVENANCE, provenance, update_provenance +from arpes.provenance import Provenance, provenance, update_provenance from arpes.utilities import normalize_to_spectrum if TYPE_CHECKING: @@ -175,7 +175,7 @@ def warpped_filter(arr: xr.DataArray): if "id" in arr.attrs: filterd_arr.attrs["id"] = arr.attrs["id"] + "_CV" - provenance_context: PROVENANCE = {"what": "Maximum Curvature", "by": "1D", "alpha": alpha} + provenance_context: Provenance = {"what": "Maximum Curvature", "by": "1D", "alpha": alpha} provenance(filterd_arr, arr, provenance_context) return filterd_arr @@ -239,7 +239,7 @@ def warpped_filter(arr: xr.DataArray): if "id" in curv.attrs: del curv.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Curvature", "by": "2D_with_weight", "directions": directions, @@ -288,7 +288,7 @@ def dn_along_axis( if "id" in dn_arr.attrs: dn_arr.attrs["id"] = dn_arr.attrs["id"] + f"_dy{order}" - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": f"{order}th derivative", "by": "dn_along_axis", "axis": dim, diff --git a/src/arpes/analysis/filters.py b/src/arpes/analysis/filters.py index 8498e293..5764f2c3 100644 --- a/src/arpes/analysis/filters.py +++ b/src/arpes/analysis/filters.py @@ -8,7 +8,7 @@ import xarray as xr from scipy import ndimage -from arpes.provenance import PROVENANCE, provenance +from arpes.provenance import Provenance, provenance if TYPE_CHECKING: from collections.abc import Callable, Hashable @@ -61,7 +61,7 @@ def gaussian_filter_arr( filtered_arr = xr.DataArray(values, arr.coords, arr.dims, attrs=arr.attrs) if "id" in filtered_arr.attrs: del filtered_arr.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Gaussian filtered data", "by": "gaussian_filter_arr", "sigma": sigma, @@ -117,7 +117,7 @@ def boxcar_filter_arr( filtered_arr = xr.DataArray(array_values, arr.coords, arr.dims, attrs=arr.attrs) if "id" in arr.attrs: del filtered_arr.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Boxcar filtered data", "by": "boxcar_filter_arr", "size": size, diff --git a/src/arpes/config.py b/src/arpes/config.py index 5cf3623f..80824186 100644 --- a/src/arpes/config.py +++ b/src/arpes/config.py @@ -26,7 +26,7 @@ import pint if TYPE_CHECKING: - from ._typing import CONFIGTYPE, WORKSPACETYPE, ConfigSettings + from ._typing import ConfigSettings, ConfigType, WorkSpaceType # pylint: disable=global-statement @@ -63,7 +63,7 @@ FIGURE_PATH: str | Path | None = None DATASET_PATH: str | Path | None = None -CONFIG: CONFIGTYPE = { +CONFIG: ConfigType = { "WORKSPACE": {}, "CURRENT_CONTEXT": None, "ENABLE_LOGGING": True, @@ -127,7 +127,7 @@ def __init__(self, workspace_name: str = "") -> None: ToDo: TEST """ - self._cached_workspace: WORKSPACETYPE = {} + self._cached_workspace: WorkSpaceType = {} self._workspace_name: str = workspace_name def __enter__(self) -> None: diff --git a/src/arpes/constants.py b/src/arpes/constants.py index 8eb4da9d..a043c06e 100644 --- a/src/arpes/constants.py +++ b/src/arpes/constants.py @@ -13,7 +13,7 @@ from numpy import pi if TYPE_CHECKING: - from ._typing import SPECTROMETER + from ._typing import Spectrometer # eV, A reasonablish value if you aren't sure for the particular sample WORK_FUNCTION = 4.3 @@ -52,21 +52,21 @@ # # -SPECTROMETER_MC: SPECTROMETER = { +SPECTROMETER_MC: Spectrometer = { "name": "MC", "rad_per_pixel": (1 / 10) * (pi / 180), "type": "hemisphere", "is_slit_vertical": False, } -SPECTROMETER_MC_OLD: SPECTROMETER = { +SPECTROMETER_MC_OLD: Spectrometer = { "name": "MC_OLD", "type": "hemisphere", "rad_per_pixel": 0.125 * (pi / 180), "is_slit_vertical": False, } -SPECTROMETER_STRAIGHT_TOF: SPECTROMETER = { +SPECTROMETER_STRAIGHT_TOF: Spectrometer = { "name": "STRAIGHT_ToF", "length": STRAIGHT_TOF_LENGTH, "mstar": 1.0, @@ -75,7 +75,7 @@ "scan_dof": ["theta"], } -SPECTROMETER_SPIN_TOF: SPECTROMETER = { +SPECTROMETER_SPIN_TOF: Spectrometer = { "name": "SPIN_ToF", "length": SPIN_TOF_LENGTH, "mstar": 0.5, @@ -84,7 +84,7 @@ "scan_dof": ["theta", "beta"], } -SPECTROMETER_DLD: SPECTROMETER = { +SPECTROMETER_DLD: Spectrometer = { "name": "DLD", "length": DLD_LENGTH, "type": "tof", @@ -95,28 +95,28 @@ "scan_dof": ["theta"], } -SPECTROMETER_BL4: SPECTROMETER = { +SPECTROMETER_BL4: Spectrometer = { "name": "BL4", "is_slit_vertical": True, "type": "hemisphere", "dof": ["theta", "sample_phi"], } -SPECTROMETER_BL7: SPECTROMETER = { +SPECTROMETER_BL7: Spectrometer = { "name": "BL7", "is_slit_vertical": True, "type": "hemisphere", "dof": ["theta", "sample_phi"], } -SPECTROMETER_ANTARES: SPECTROMETER = { +SPECTROMETER_ANTARES: Spectrometer = { "name": "ANTARES", "is_slit_vertical": True, "type": "hemisphere", "dof": ["theta", "sample_phi"], } -SPECTROMETER_KAINDL: SPECTROMETER = { +SPECTROMETER_KAINDL: Spectrometer = { "name": "Kaindl", "is_slit_vertical": True, "type": "hemisphere", diff --git a/src/arpes/corrections/fermi_edge_corrections.py b/src/arpes/corrections/fermi_edge_corrections.py index 59202104..4f166fa4 100644 --- a/src/arpes/corrections/fermi_edge_corrections.py +++ b/src/arpes/corrections/fermi_edge_corrections.py @@ -10,7 +10,7 @@ from matplotlib.axes import Axes from arpes.fits import GStepBModel, LinearModel, QuadraticModel, broadcast_model -from arpes.provenance import PROVENANCE, provenance, update_provenance +from arpes.provenance import Provenance, provenance, update_provenance from arpes.utilities.math import shift_by if TYPE_CHECKING: @@ -125,7 +125,7 @@ def apply_direct_fermi_edge_correction( if "id" in corrected_arr.attrs: del corrected_arr.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Shifted Fermi edge to align at 0 along hv axis", "by": "apply_photon_energy_fermi_edge_correction", "correction": correction, # TODO: NEED check @@ -273,7 +273,7 @@ def apply_photon_energy_fermi_edge_correction( if "id" in corrected_arr.attrs: del corrected_arr.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Shifted Fermi edge to align at 0 along hv axis", "by": "apply_photon_energy_fermi_edge_correction", "correction": list(correction_values.values), @@ -326,7 +326,7 @@ def apply_quadratic_fermi_edge_correction( if "id" in corrected_arr.attrs: del corrected_arr.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Shifted Fermi edge to align at 0", "by": "apply_quadratic_fermi_edge_correction", "correction": correction.best_values, diff --git a/src/arpes/endstations/__init__.py b/src/arpes/endstations/__init__.py index 1f758092..e17baa73 100644 --- a/src/arpes/endstations/__init__.py +++ b/src/arpes/endstations/__init__.py @@ -18,7 +18,7 @@ import arpes.config import arpes.constants from arpes.load_pxt import find_ses_files_associated, read_single_pxt -from arpes.provenance import PROVENANCE, provenance_from_file +from arpes.provenance import Provenance, provenance_from_file from arpes.repair import negate_energy from arpes.utilities.dict import rename_dataarray_attrs @@ -30,7 +30,7 @@ from _typeshed import Incomplete - from arpes._typing import SPECTROMETER, DataType + from arpes._typing import DataType, Spectrometer __all__ = [ "endstation_name_from_alias", @@ -43,6 +43,7 @@ "SynchrotronEndstation", "SingleFileEndstation", "resolve_endstation", + "ScanDesc", ] LOGLEVELS = (DEBUG, INFO) @@ -61,7 +62,9 @@ _ENDSTATION_ALIASES: dict[str, type[EndstationBase]] = {} -class SCANDESC(TypedDict, total=False): +class ScanDesc(TypedDict, total=False): + """TypedDict based class for scan_desc.""" + file: str | Path location: str path: str | Path @@ -97,7 +100,7 @@ class EndstationBase: ALIASES: ClassVar[list[str]] = [] PRINCIPAL_NAME = "" ATTR_TRANSFORMS: ClassVar[dict[str, Callable[..., dict[str, float | list[str] | str]]]] = {} - MERGE_ATTRS: ClassVar[SPECTROMETER] = {} + MERGE_ATTRS: ClassVar[Spectrometer] = {} _SEARCH_DIRECTORIES: tuple[str, ...] = ( "", @@ -238,7 +241,7 @@ def find_first_file( def concatenate_frames( self, frames: list[xr.Dataset], - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Performs concatenation of frames in multi-frame scans. @@ -275,7 +278,7 @@ def concatenate_frames( frames.sort(key=lambda x: x.coords[scan_coord]) return xr.concat(frames, scan_coord) - def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Path]: + def resolve_frame_locations(self, scan_desc: ScanDesc | None = None) -> list[Path]: """Determine all files and frames associated to this piece of data. This always needs to be overridden in subclasses to handle data appropriately. @@ -288,7 +291,7 @@ def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Pat def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Hook for loading a single frame of data. @@ -335,7 +338,7 @@ def postprocess(self, frame: xr.Dataset) -> xr.Dataset: def postprocess_final( self, data: xr.Dataset, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Perform final normalization of scan data. @@ -404,7 +407,7 @@ def load_from_path(self, path: str | Path) -> xr.Dataset: }, ) - def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Dataset: + def load(self, scan_desc: ScanDesc | None = None, **kwargs: Incomplete) -> xr.Dataset: """Loads a scan from a single file or a sequence of files. This defines the contract and structure for standard data loading plugins: @@ -419,7 +422,7 @@ def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Da as appropriate for a beamline. Args: - scan_desc(SCANDESC): scan description + scan_desc(ScanDesc): scan description kwargs: pass to load_sing_frame Returns: @@ -496,7 +499,7 @@ class SingleFileEndstation(EndstationBase): file given to you in the spreadsheet or direct load calls is all there is. """ - def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Path]: + def resolve_frame_locations(self, scan_desc: ScanDesc | None = None) -> list[Path]: """Single file endstations just use the referenced file from the scan description.""" if scan_desc is None: msg = "Must pass dictionary as file scan_desc to all endstation loading code." @@ -523,7 +526,7 @@ class SESEndstation(EndstationBase): These files have special frame names, at least at the beamlines Conrad has encountered. """ - def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Path]: + def resolve_frame_locations(self, scan_desc: ScanDesc | None = None) -> list[Path]: if scan_desc is None: msg = "Must pass dictionary as file scan_desc to all endstation loading code." raise ValueError( @@ -547,7 +550,7 @@ def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Pat def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: bool, ) -> xr.Dataset: """Load the single frame fro the file. @@ -556,7 +559,7 @@ def load_single_frame( Args: frame_path: [TODO:description] - scan_desc (SCANDESC): [TODO:description] + scan_desc (ScanDesc): [TODO:description] kwargs: pass to load_SES_nc, thus only "robust_dimension_labels" can be accepted. Returns: @@ -582,7 +585,7 @@ def postprocess(self, frame: xr.Dataset) -> xr.Dataset: def load_SES_nc( self, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, *, robust_dimension_labels: bool = False, ) -> xr.Dataset: @@ -671,7 +674,7 @@ def load_SES_nc( dims=dimension_labels, attrs=attrs, ) - provenance_context: PROVENANCE = {"what": "Loaded SES dataset from HDF5.", "by": "load_SES"} + provenance_context: Provenance = {"what": "Loaded SES dataset from HDF5.", "by": "load_SES"} provenance_from_file(dataset_contents["spectrum"], str(data_loc), provenance_context) return xr.Dataset( dataset_contents, @@ -737,7 +740,7 @@ class FITSEndstation(EndstationBase): "LMOTOR6": "alpha", } - def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Path]: + def resolve_frame_locations(self, scan_desc: ScanDesc | None = None) -> list[Path]: """Determines all files associated with a given scan. [TODO:description] @@ -771,7 +774,7 @@ def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Pat def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads a scan from a single .fits file. @@ -968,7 +971,7 @@ def prep_spectrum(data: xr.DataArray) -> xr.DataArray: data = data.assign_coords(phi=phi_axis) # Always attach provenance - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Loaded MC dataset from FITS.", "by": "load_MC", } @@ -1084,7 +1087,7 @@ def resolve_endstation(*, retry: bool = True, **kwargs: Incomplete) -> type[Ends def load_scan( - scan_desc: SCANDESC, + scan_desc: ScanDesc, *, retry: bool = True, **kwargs: Incomplete, diff --git a/src/arpes/endstations/plugin/ALG_main.py b/src/arpes/endstations/plugin/ALG_main.py index 4607fd16..1018c679 100644 --- a/src/arpes/endstations/plugin/ALG_main.py +++ b/src/arpes/endstations/plugin/ALG_main.py @@ -12,7 +12,7 @@ import arpes.xarray_extensions # pylint: disable=unused-import, redefined-outer-name # noqa: F401 from arpes.config import ureg -from arpes.endstations import SCANDESC, FITSEndstation, HemisphericalEndstation +from arpes.endstations import ScanDesc, FITSEndstation, HemisphericalEndstation from arpes.laser import electrons_per_pulse if TYPE_CHECKING: @@ -20,7 +20,7 @@ import xarray as xr - from arpes.constants import SPECTROMETER + from arpes.constants import Spectrometer __all__ = ("ALGMainChamber", "electrons_per_pulse_mira") @@ -64,7 +64,7 @@ class ALGMainChamber(HemisphericalEndstation, FITSEndstation): "SFBE0": "eV_prebinning", } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { + MERGE_ATTRS: ClassVar[Spectrometer] = { "analyzer": "Specs PHOIBOS 150", "analyzer_name": "Specs PHOIBOS 150", "parallel_deflectors": False, @@ -78,7 +78,7 @@ class ALGMainChamber(HemisphericalEndstation, FITSEndstation): def postprocess_final( self, data: xr.Dataset, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Performs final normalization of scan data. diff --git a/src/arpes/endstations/plugin/ALG_spin_ToF.py b/src/arpes/endstations/plugin/ALG_spin_ToF.py index ce6a7eeb..b17ec893 100644 --- a/src/arpes/endstations/plugin/ALG_spin_ToF.py +++ b/src/arpes/endstations/plugin/ALG_spin_ToF.py @@ -14,8 +14,8 @@ from astropy.io import fits import arpes.config -from arpes.endstations import SCANDESC, EndstationBase, find_clean_coords -from arpes.provenance import PROVENANCE, provenance_from_file +from arpes.endstations import ScanDesc, EndstationBase, find_clean_coords +from arpes.provenance import Provenance, provenance_from_file from arpes.utilities import rename_keys __all__ = ("SpinToFEndstation",) @@ -74,7 +74,7 @@ class SpinToFEndstation(EndstationBase): "Phi": "phi", } - def load_SToF_hdf5(self, scan_desc: SCANDESC | None = None) -> xr.Dataset: + def load_SToF_hdf5(self, scan_desc: ScanDesc | None = None) -> xr.Dataset: """Imports a FITS file that contains ToF spectra. Args: @@ -103,7 +103,7 @@ def load_SToF_hdf5(self, scan_desc: SCANDESC | None = None) -> xr.Dataset: dims=("x_pixels", "t_pixels"), attrs=f["/PRIMARY"].attrs.items(), ) - pronance_context: PROVENANCE = { + pronance_context: Provenance = { "what": "Loaded Anton and Ping DLD dataset from HDF5.", "by": "load_DLD", } @@ -111,7 +111,7 @@ def load_SToF_hdf5(self, scan_desc: SCANDESC | None = None) -> xr.Dataset: provenance_from_file(dataset_contents["raw"], str(data_loc), pronance_context) return xr.Dataset(dataset_contents, attrs=scan_desc) - def load_SToF_fits(self, scan_desc: SCANDESC) -> xr.Dataset: + def load_SToF_fits(self, scan_desc: ScanDesc) -> xr.Dataset: """Loads FITS convention SToF data. The data acquisition software is rather old, so this has to handle data formats @@ -256,7 +256,7 @@ def load_SToF_fits(self, scan_desc: SCANDESC) -> xr.Dataset: for data_arr in dataset.data_vars.values(): if "time" in data_arr.dims: data_arr.data = data_arr.sel(time=slice(None, None, -1)).data - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Loaded Spin-ToF dataset", "by": "load_DLD", } @@ -265,7 +265,7 @@ def load_SToF_fits(self, scan_desc: SCANDESC) -> xr.Dataset: return dataset - def load(self, scan_desc: SCANDESC) -> xr.Dataset: + def load(self, scan_desc: ScanDesc) -> xr.Dataset: """Loads Lanzara group Spin-ToF data. Args: diff --git a/src/arpes/endstations/plugin/ANTARES.py b/src/arpes/endstations/plugin/ANTARES.py index a517e89d..8943be9e 100644 --- a/src/arpes/endstations/plugin/ANTARES.py +++ b/src/arpes/endstations/plugin/ANTARES.py @@ -11,7 +11,7 @@ import xarray as xr from arpes.endstations import ( - SCANDESC, + ScanDesc, HemisphericalEndstation, SingleFileEndstation, SynchrotronEndstation, @@ -115,7 +115,7 @@ class ANTARESEndstation(HemisphericalEndstation, SynchrotronEndstation, SingleFi def load_top_level_scan( self, group: dict, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, spectrum_index: int = 0, ) -> xr.Dataset: """Reads a spectrum from the top level group in a NeXuS scan format. @@ -288,7 +288,7 @@ def read_scan_data(self, group: dict) -> xr.DataArray: def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads a single ANTARES scan. @@ -321,7 +321,7 @@ def load_single_frame( **{self.RENAME_KEYS.get(k, k): v for k, v in loaded.attrs.items()}, ) - def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) -> xr.Dataset: + def postprocess_final(self, data: xr.Dataset, scan_desc: ScanDesc | None = None) -> xr.Dataset: """Performs final scan postprocessing. This mostly consists of unwrapping bytestring attributes, and @@ -331,7 +331,7 @@ def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) Args: data: [TODO:description] - scan_desc (SCANDESC): [TODO:description] + scan_desc (ScanDesc): [TODO:description] """ def check_attrs(s: xr.DataArray) -> None: diff --git a/src/arpes/endstations/plugin/BL10_SARPES.py b/src/arpes/endstations/plugin/BL10_SARPES.py index bc143285..d0a7a97b 100644 --- a/src/arpes/endstations/plugin/BL10_SARPES.py +++ b/src/arpes/endstations/plugin/BL10_SARPES.py @@ -8,7 +8,7 @@ import numpy as np from arpes.endstations import ( - SCANDESC, + ScanDesc, HemisphericalEndstation, SESEndstation, SynchrotronEndstation, @@ -73,7 +73,7 @@ class BL10012SARPESEndstation(SynchrotronEndstation, HemisphericalEndstation, SE def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads all regions for a single .pxt frame, and perform per-frame normalization.""" @@ -131,7 +131,7 @@ def load_single_region( return pxt_data.rename({k: f"{k}{num}" for k in pxt_data.data_vars}) - def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) -> xr.Dataset: + def postprocess_final(self, data: xr.Dataset, scan_desc: ScanDesc | None = None) -> xr.Dataset: """Performs final data normalization for MERLIN data. Additional steps we perform here are: diff --git a/src/arpes/endstations/plugin/Elettra_spectromicroscopy.py b/src/arpes/endstations/plugin/Elettra_spectromicroscopy.py index df02c679..08b04674 100644 --- a/src/arpes/endstations/plugin/Elettra_spectromicroscopy.py +++ b/src/arpes/endstations/plugin/Elettra_spectromicroscopy.py @@ -19,7 +19,7 @@ from _typeshed import Incomplete from numpy.typing import NDArray - from arpes.endstations import SCANDESC + from arpes.endstations import ScanDesc __all__ = ("SpectromicroscopyElettraEndstation",) @@ -164,7 +164,6 @@ def files_for_search(cls: type, directory: str | Path) -> list[Path]: "P": "psi", "Angle": "phi", } - RENAME_KEYS: ClassVar[dict[str, str]] = { "Ep (eV)": "pass_energy", "Dwell Time (s)": "dwell_time", @@ -184,7 +183,7 @@ def files_for_search(cls: type, directory: str | Path) -> list[Path]: def concatenate_frames( self, frames: list[xr.Dataset], - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Concatenates frame for spectromicroscopy at Elettra. @@ -229,7 +228,7 @@ def concatenate_frames( return xr.Dataset({"spectrum": xr.concat(fs, scan_coord)}) - def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Path]: + def resolve_frame_locations(self, scan_desc: ScanDesc | None = None) -> list[Path]: """Determines all files associated with a given scan. This beamline saves several HDF files in scan associated folders, so this @@ -254,7 +253,7 @@ def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Pat def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads a single HDF file with spectromicroscopy Elettra data.""" @@ -270,7 +269,7 @@ def load_single_frame( return xr.Dataset(arrays) - def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) -> xr.Dataset: + def postprocess_final(self, data: xr.Dataset, scan_desc: ScanDesc | None = None) -> xr.Dataset: """Performs final postprocessing of the data. This mostly amounts to: diff --git a/src/arpes/endstations/plugin/HERS.py b/src/arpes/endstations/plugin/HERS.py index a113c3f8..8e3e7ef2 100644 --- a/src/arpes/endstations/plugin/HERS.py +++ b/src/arpes/endstations/plugin/HERS.py @@ -13,13 +13,13 @@ import arpes.config from arpes.endstations import HemisphericalEndstation, SynchrotronEndstation, find_clean_coords -from arpes.provenance import PROVENANCE, provenance_from_file +from arpes.provenance import Provenance, provenance_from_file from arpes.utilities import rename_keys if TYPE_CHECKING: from _typeshed import Incomplete - from arpes.endstations import SCANDESC + from arpes.endstations import ScanDesc __all__ = ("HERSEndstation",) @@ -33,7 +33,7 @@ class HERSEndstation(SynchrotronEndstation, HemisphericalEndstation): PRINCIPAL_NAME = "ALS-BL1001" ALIASES: ClassVar[list[str]] = ["ALS-BL1001", "HERS", "ALS-HERS", "BL1001"] - def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Dataset: + def load(self, scan_desc: ScanDesc | None = None, **kwargs: Incomplete) -> xr.Dataset: """Loads HERS data from FITS files. Shares a lot in common with Lanzara group formats. Args: @@ -100,7 +100,7 @@ def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Da } dataset = xr.Dataset(data_vars, relevant_coords, scan_desc) - provenance_context: PROVENANCE = {"what": "Loaded BL10 dataset", "by": "load_DLD"} + provenance_context: Provenance = {"what": "Loaded BL10 dataset", "by": "load_DLD"} provenance_from_file(dataset, str(data_loc), provenance_context) return dataset diff --git a/src/arpes/endstations/plugin/IF_UMCS.py b/src/arpes/endstations/plugin/IF_UMCS.py index 6d912e5a..899ebd52 100644 --- a/src/arpes/endstations/plugin/IF_UMCS.py +++ b/src/arpes/endstations/plugin/IF_UMCS.py @@ -9,7 +9,7 @@ import xarray as xr from arpes.endstations import ( - SCANDESC, + ScanDesc, HemisphericalEndstation, SingleFileEndstation, add_endstation, @@ -17,8 +17,8 @@ from arpes.endstations.prodigy_xy import load_xy if TYPE_CHECKING: - from arpes._typing import SPECTROMETER - from arpes.endstations import SCANDESC + from arpes._typing import Spectrometer + from arpes.endstations import ScanDesc __all__ = ("IF_UMCS",) @@ -38,7 +38,7 @@ class IF_UMCS(HemisphericalEndstation, SingleFileEndstation): # noqa: N801 "detector_voltage": "mcp_voltage", } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { + MERGE_ATTRS: ClassVar[Spectrometer] = { "analyzer": "Specs PHOIBOS 150", "analyzer_name": "Specs PHOIBOS 150", "parallel_deflectors": False, @@ -50,7 +50,7 @@ class IF_UMCS(HemisphericalEndstation, SingleFileEndstation): # noqa: N801 def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: str | float, ) -> xr.Dataset: """Load single xy file.""" @@ -68,7 +68,7 @@ def load_single_frame( def postprocess_final( self, data: xr.Dataset, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Add missing parameters.""" if scan_desc is None: diff --git a/src/arpes/endstations/plugin/MAESTRO.py b/src/arpes/endstations/plugin/MAESTRO.py index 406abf46..833182e7 100644 --- a/src/arpes/endstations/plugin/MAESTRO.py +++ b/src/arpes/endstations/plugin/MAESTRO.py @@ -23,8 +23,8 @@ import xarray as xr from _typeshed import Incomplete - from arpes.constants import SPECTROMETER - from arpes.endstations import SCANDESC + from arpes.constants import Spectrometer + from arpes.endstations import ScanDesc __all__ = ("MAESTROMicroARPESEndstation", "MAESTRONanoARPESEndstation") @@ -36,13 +36,11 @@ class MAESTROARPESEndstationBase(SynchrotronEndstation, HemisphericalEndstation, ALIASES = [] ANALYZER_INFORMATION = None - def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Dataset: + def load(self, scan_desc: ScanDesc | None = None, **kwargs: Incomplete) -> xr.Dataset: # in the future, can use a regex in order to handle the case where we postfix coordinates # for multiple spectra """[TODO:summary]. - [TODO:description] - Args: scan_desc: [TODO:description] kwargs: [TODO:description] @@ -74,7 +72,7 @@ def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Da def fix_prebinned_coordinates(self) -> None: pass - def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) -> xr.Dataset: + def postprocess_final(self, data: xr.Dataset, scan_desc: ScanDesc | None = None) -> xr.Dataset: ls = [data, *data.S.spectra] for _ in ls: _.attrs.update(self.ANALYZER_INFORMATION) @@ -93,7 +91,7 @@ class MAESTROMicroARPESEndstation(MAESTROARPESEndstationBase): PRINCIPAL_NAME = "ALS-BL7" ALIASES: ClassVar[list[str]] = ["BL7", "BL7.0.2", "ALS-BL7.0.2", "MAESTRO"] - ANALYZER_INFORMATION: ClassVar[SPECTROMETER] = { + ANALYZER_INFORMATION: ClassVar[Spectrometer] = { "analyzer": "R4000", "analyzer_name": "Scienta R4000", "parallel_deflectors": False, @@ -150,7 +148,7 @@ class MAESTROMicroARPESEndstation(MAESTROARPESEndstationBase): }, } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { + MERGE_ATTRS: ClassVar[Spectrometer] = { "mcp_voltage": np.nan, "repetition_rate": 5e8, "undulator_type": "elliptically_polarized_undulator", @@ -267,8 +265,8 @@ class MAESTRONanoARPESEndstation(MAESTROARPESEndstationBase): }, } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { - "mcp_voltage": None, + MERGE_ATTRS: ClassVar[Spectrometer] = { + "mcp_voltage": np.nan, "beta": 0, "repetition_rate": 5e8, "undulator_type": "elliptically_polarized_undulator", @@ -361,7 +359,7 @@ def unwind_serptentine(data: xr.Dataset) -> xr.Dataset: return data - def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None): + def postprocess_final(self, data: xr.Dataset, scan_desc: ScanDesc | None = None): """Perform final preprocessing of MAESTRO nano-ARPES data. In addition to standard tasks, we need to build a single unified spatial coordinate diff --git a/src/arpes/endstations/plugin/MBS.py b/src/arpes/endstations/plugin/MBS.py index ad135178..30b8c89d 100644 --- a/src/arpes/endstations/plugin/MBS.py +++ b/src/arpes/endstations/plugin/MBS.py @@ -11,7 +11,7 @@ import xarray as xr from arpes.constants import TWO_DIMENSION -from arpes.endstations import SCANDESC, HemisphericalEndstation +from arpes.endstations import ScanDesc, HemisphericalEndstation from arpes.utilities import clean_keys if TYPE_CHECKING: @@ -53,7 +53,7 @@ class MBSEndstation(HemisphericalEndstation): def resolve_frame_locations( self, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> list[Path]: """There is only a single file for the MBS loader, so this is simple.""" if scan_desc is None: @@ -63,7 +63,7 @@ def resolve_frame_locations( def postprocess_final( self, data: xr.Dataset, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Performs final data normalization. @@ -100,7 +100,7 @@ def postprocess_final( def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Load a single frame from an MBS spectrometer. diff --git a/src/arpes/endstations/plugin/SPD_main.py b/src/arpes/endstations/plugin/SPD_main.py index fc538e24..c1ab1abe 100644 --- a/src/arpes/endstations/plugin/SPD_main.py +++ b/src/arpes/endstations/plugin/SPD_main.py @@ -16,8 +16,8 @@ from arpes.endstations.prodigy_itx import load_itx, load_sp2 if TYPE_CHECKING: - from arpes._typing import SPECTROMETER - from arpes.endstations import SCANDESC + from arpes._typing import Spectrometer + from arpes.endstations import ScanDesc __all__ = [ "SPDEndstation", @@ -67,7 +67,7 @@ class SPDEndstation(HemisphericalEndstation, SingleFileEndstation): "Igor Text File Exporter Version": "igor_text_file_exporter_version", "Lens Voltage": "lens voltage", } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { + MERGE_ATTRS: ClassVar[Spectrometer] = { "analyzer": "Specs PHOIBOS 100", "analyzer_name": "Specs PHOIBOS 100", "parallel_deflectors": False, @@ -84,13 +84,13 @@ class SPDEndstation(HemisphericalEndstation, SingleFileEndstation): def postprocess_final( self, data: xr.Dataset, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Perform final data normalization. Args: data(xr.Dataset): ARPES data - scan_desc(SCANDESC | None): scan_description. Not used currently + scan_desc(ScanDesc | None): scan_description. Not used currently Returns: xr.Dataset: pyARPES compatible. @@ -117,14 +117,14 @@ def postprocess_final( def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: str | float, ) -> xr.Dataset: """Load a single frame from an PHOIBOS 100 spectrometer with Prodigy. Args: frame_path(str | Path): _description_, by default "" - scan_desc(SCANDESC | None): _description_, by default None + scan_desc(ScanDesc | None): _description_, by default None kwargs(str | int | float): Pass to load_itx Returns: diff --git a/src/arpes/endstations/plugin/SSRF_NSRL.py b/src/arpes/endstations/plugin/SSRF_NSRL.py index 273219c1..607f5f30 100644 --- a/src/arpes/endstations/plugin/SSRF_NSRL.py +++ b/src/arpes/endstations/plugin/SSRF_NSRL.py @@ -32,13 +32,13 @@ import numpy as np import xarray as xr -from arpes.endstations import SCANDESC, SingleFileEndstation, SynchrotronEndstation +from arpes.endstations import ScanDesc, SingleFileEndstation, SynchrotronEndstation from arpes.load_pxt import read_single_pxt if TYPE_CHECKING: from _typeshed import Incomplete - from arpes._typing import SPECTROMETER + from arpes._typing import Spectrometer __all__ = ("SSRFEndstation", "NSRLEndstation") @@ -97,7 +97,7 @@ class DA30_L(SingleFileEndstation): "region_name": "spectrum_type", } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { + MERGE_ATTRS: ClassVar[Spectrometer] = { "analyzer_name": "DA30-L", "analyzer_type": "hemispherical", "detect_radius": "15 degrees", @@ -108,7 +108,7 @@ class DA30_L(SingleFileEndstation): def load_single_frame( self, fpath: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: if kwargs: diff --git a/src/arpes/endstations/plugin/SToF_DLD.py b/src/arpes/endstations/plugin/SToF_DLD.py index a32d17c9..41d372ae 100644 --- a/src/arpes/endstations/plugin/SToF_DLD.py +++ b/src/arpes/endstations/plugin/SToF_DLD.py @@ -11,8 +11,8 @@ import xarray as xr import arpes.config -from arpes.endstations import SCANDESC, EndstationBase -from arpes.provenance import PROVENANCE, provenance_from_file +from arpes.endstations import ScanDesc, EndstationBase +from arpes.provenance import Provenance, provenance_from_file if TYPE_CHECKING: from _typeshed import Incomplete @@ -25,7 +25,7 @@ class SToFDLDEndstation(EndstationBase): PRINCIPAL_NAME = "ALG-SToF-DLD" - def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Dataset: + def load(self, scan_desc: ScanDesc | None = None, **kwargs: Incomplete) -> xr.Dataset: """Load a FITS file containing run data from Ping and Anton's delay line detector ARToF. Params: @@ -61,7 +61,7 @@ def load(self, scan_desc: SCANDESC | None = None, **kwargs: Incomplete) -> xr.Da dims=("x_pixels", "t_pixels"), attrs=f["/PRIMARY"].attrs.items(), ) - proenance_context: PROVENANCE = { + proenance_context: Provenance = { "what": "Loaded Anton and Ping DLD dataset from HDF5.", "by": "load_DLD", } diff --git a/src/arpes/endstations/plugin/example_data.py b/src/arpes/endstations/plugin/example_data.py index 41f5b6cc..18d0e590 100644 --- a/src/arpes/endstations/plugin/example_data.py +++ b/src/arpes/endstations/plugin/example_data.py @@ -16,7 +16,7 @@ import xarray as xr import arpes.xarray_extensions # noqa: F401 -from arpes.endstations import SCANDESC, HemisphericalEndstation, SingleFileEndstation +from arpes.endstations import HemisphericalEndstation, ScanDesc, SingleFileEndstation if TYPE_CHECKING: from pathlib import Path @@ -49,7 +49,7 @@ class ExampleDataEndstation(SingleFileEndstation, HemisphericalEndstation): def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads single file examples. diff --git a/src/arpes/endstations/plugin/fallback.py b/src/arpes/endstations/plugin/fallback.py index 398c923b..0440f65d 100644 --- a/src/arpes/endstations/plugin/fallback.py +++ b/src/arpes/endstations/plugin/fallback.py @@ -14,7 +14,7 @@ import xarray as xr from _typeshed import Incomplete - from arpes.endstations import SCANDESC + from arpes.endstations import ScanDesc __all__ = ("FallbackEndstation",) LOGLEVELS = (DEBUG, INFO) @@ -88,7 +88,7 @@ def determine_associated_loader( def load( self, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, file: str | Path = "", **kwargs: Incomplete, ) -> xr.Dataset: diff --git a/src/arpes/endstations/plugin/igor_export.py b/src/arpes/endstations/plugin/igor_export.py index 9705d454..f14f6757 100644 --- a/src/arpes/endstations/plugin/igor_export.py +++ b/src/arpes/endstations/plugin/igor_export.py @@ -9,9 +9,9 @@ import numpy as np import xarray as xr -from arpes.endstations import SCANDESC, SESEndstation +from arpes.endstations import ScanDesc, SESEndstation from arpes.load_pxt import read_single_pxt -from arpes.provenance import PROVENANCE, provenance_from_file +from arpes.provenance import Provenance, provenance_from_file from arpes.repair import negate_energy __all__ = ("IgorExportEndstation",) @@ -31,14 +31,14 @@ class IgorExportEndstation(SESEndstation): def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: bool, ) -> xr.Dataset: """HDF files are all inclusive, so we just need to load one file per scan. Args: frame_path (str | Path): frame path - scan_desc (SCANDESC): scan description + scan_desc (ScanDesc): scan description kwargs: pass to load_SES_h5, thus, only "robust_dimension_labels" can be accepted. Returns: xr.Dataset @@ -59,7 +59,7 @@ def load_single_frame( def load_SES_h5( self, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, *, robust_dimension_labels: bool = False, ) -> xr.Dataset: @@ -143,7 +143,7 @@ def load_SES_h5( dims=dimension_labels, attrs=attrs, ) - provenance_context: PROVENANCE = {"what": "Loaded SES dataset from HDF5.", "by": "load_SES"} + provenance_context: Provenance = {"what": "Loaded SES dataset from HDF5.", "by": "load_SES"} provenance_from_file(dataset_contents["spectrum"], str(data_loc), provenance_context) return xr.Dataset( diff --git a/src/arpes/endstations/plugin/igor_plugin.py b/src/arpes/endstations/plugin/igor_plugin.py index 2ee2279e..a36103bd 100644 --- a/src/arpes/endstations/plugin/igor_plugin.py +++ b/src/arpes/endstations/plugin/igor_plugin.py @@ -12,7 +12,7 @@ import xarray as xr from arpes.endstations import ( - SCANDESC, + ScanDesc, SingleFileEndstation, ) from arpes.load_pxt import read_single_pxt @@ -23,7 +23,7 @@ from _typeshed import Incomplete - from arpes._typing import SPECTROMETER + from arpes._typing import Spectrometer __all__ = ("IgorEndstation",) @@ -72,14 +72,14 @@ class IgorEndstation(SingleFileEndstation): RENAME_KEYS: ClassVar[dict[str, str]] = {} - MERGE_ATTRS: ClassVar[SPECTROMETER] = {} + MERGE_ATTRS: ClassVar[Spectrometer] = {} ATTR_TRANSFORMS: ClassVar[dict[str, Callable[..., dict[str, float | list[str] | str]]]] = {} def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Igor .pxt and .ibws are single files so we just read the one passed here.""" diff --git a/src/arpes/endstations/plugin/kaindl.py b/src/arpes/endstations/plugin/kaindl.py index ace4eb66..73103a2a 100644 --- a/src/arpes/endstations/plugin/kaindl.py +++ b/src/arpes/endstations/plugin/kaindl.py @@ -16,7 +16,7 @@ from arpes.endstations import HemisphericalEndstation, SESEndstation if TYPE_CHECKING: - from arpes.endstations import SCANDESC + from arpes.endstations import ScanDesc __all__ = ("KaindlEndstation",) @@ -113,7 +113,7 @@ class KaindlEndstation(HemisphericalEndstation, SESEndstation): "Delay Stage": "delay", } - def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Path]: + def resolve_frame_locations(self, scan_desc: ScanDesc | None = None) -> list[Path]: """Fines .pxt files associated to a potentially multi-cut scan. This is very similar to what happens on BL4 at the ALS. You can look @@ -142,7 +142,7 @@ def resolve_frame_locations(self, scan_desc: SCANDESC | None = None) -> list[Pat def concatenate_frames( self, frames: list[xr.Dataset], - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset | None: """Concenates frames from individual .pxt files on the Kaindl setup. @@ -184,7 +184,7 @@ def concatenate_frames( logger.info(f"Exception occurs. {err=}, {type(err)=}") return None - def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) -> xr.Dataset: + def postprocess_final(self, data: xr.Dataset, scan_desc: ScanDesc | None = None) -> xr.Dataset: """Peforms final data preprocessing for the Kaindl lab Tr-ARPES setup. This is very similar to what happens at BL4/MERLIN because the code was adopted @@ -192,7 +192,7 @@ def postprocess_final(self, data: xr.Dataset, scan_desc: SCANDESC | None = None) Args: data (xr.DataSet): [TODO:description] - scan_desc (SCANDESK): [TODO:description] + scan_desc (ScanDesc): [TODO:description] """ assert scan_desc original_filename = scan_desc.get("path", scan_desc.get("file")) diff --git a/src/arpes/endstations/plugin/merlin.py b/src/arpes/endstations/plugin/merlin.py index a6a0f336..7270a3fb 100644 --- a/src/arpes/endstations/plugin/merlin.py +++ b/src/arpes/endstations/plugin/merlin.py @@ -11,7 +11,7 @@ import xarray as xr from arpes.endstations import ( - SCANDESC, + ScanDesc, HemisphericalEndstation, SESEndstation, SynchrotronEndstation, @@ -22,7 +22,7 @@ from _typeshed import Incomplete - from arpes._typing import SPECTROMETER + from arpes._typing import Spectrometer __all__ = ["BL403ARPESEndstation"] @@ -97,7 +97,7 @@ class BL403ARPESEndstation(SynchrotronEndstation, HemisphericalEndstation, SESEn "number_of_sweeps": "n_sweeps", } - MERGE_ATTRS: ClassVar[SPECTROMETER] = { + MERGE_ATTRS: ClassVar[Spectrometer] = { "analyzer": "R8000", "analyzer_name": "Scienta R8000", "parallel_deflectors": False, @@ -125,7 +125,7 @@ class BL403ARPESEndstation(SynchrotronEndstation, HemisphericalEndstation, SESEn def concatenate_frames( self, frames: list[xr.Dataset], - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Concatenates frames from different files into a single scan. @@ -187,7 +187,7 @@ def concatenate_frames( def load_single_frame( self, frame_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads all regions for a single .pxt frame, and perform per-frame normalization.""" @@ -236,7 +236,7 @@ def load_single_frame( def load_single_region( self, region_path: str | Path = "", - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, **kwargs: Incomplete, ) -> xr.Dataset: """Loads a single region for multi-region scans.""" @@ -263,7 +263,7 @@ def load_single_region( def postprocess_final( self, data: xr.Dataset, - scan_desc: SCANDESC | None = None, + scan_desc: ScanDesc | None = None, ) -> xr.Dataset: """Performs final data normalization for MERLIN data. diff --git a/src/arpes/io.py b/src/arpes/io.py index 0959e2d8..865a43d3 100644 --- a/src/arpes/io.py +++ b/src/arpes/io.py @@ -24,7 +24,7 @@ import pandas as pd import xarray as xr -from .endstations import SCANDESC, load_scan +from .endstations import ScanDesc, load_scan if TYPE_CHECKING: from _typeshed import Incomplete @@ -85,7 +85,7 @@ def load_data( assert isinstance(file, (str | Path)) file = str(Path(file).absolute()) - desc: SCANDESC = { + desc: ScanDesc = { "file": file, "location": location, } diff --git a/src/arpes/plotting/annotations.py b/src/arpes/plotting/annotations.py index 1e1d51f1..4672b121 100644 --- a/src/arpes/plotting/annotations.py +++ b/src/arpes/plotting/annotations.py @@ -19,7 +19,7 @@ from numpy.typing import NDArray - from arpes._typing import EXPERIMENTINFO, DataType, MPLTextParam, XrTypes + from arpes._typing import ExperimentInfo, MPLTextParam, XrTypes __all__ = ( "annotate_cuts", @@ -44,7 +44,7 @@ # * In order not to use data axis, set transform = ax.Transform def annotate_experimental_conditions( ax: Axes, - data: DataType, + data: XrTypes, desc: list[str | float] | float | str, *, show: bool = False, @@ -105,7 +105,7 @@ def annotate_experimental_conditions( raise RuntimeError(err_msg) delta = fontsize * delta - conditions: EXPERIMENTINFO = data.S.experimental_conditions + conditions: ExperimentInfo = data.S.experimental_conditions renderers = { "temp": lambda c: "\\textbf{T = " + "{:.3g}".format(c["temp"]) + " K}", diff --git a/src/arpes/plotting/fermi_edge.py b/src/arpes/plotting/fermi_edge.py index d91ba7b4..a9bb014f 100644 --- a/src/arpes/plotting/fermi_edge.py +++ b/src/arpes/plotting/fermi_edge.py @@ -19,7 +19,6 @@ if TYPE_CHECKING: from pathlib import Path - from _typeshed import Incomplete from numpy.typing import NDArray from arpes._typing import MPLPlotKwargs diff --git a/src/arpes/preparation/axis_preparation.py b/src/arpes/preparation/axis_preparation.py index 4a98f35e..57e20e57 100644 --- a/src/arpes/preparation/axis_preparation.py +++ b/src/arpes/preparation/axis_preparation.py @@ -10,7 +10,7 @@ import xarray as xr from scipy.ndimage import geometric_transform -from arpes.provenance import PROVENANCE, provenance, update_provenance +from arpes.provenance import Provenance, provenance, update_provenance from arpes.utilities import lift_dataarray_to_generic from arpes.utilities.normalize import normalize_to_spectrum @@ -132,7 +132,7 @@ def normalize_dim( if not keep_id and "id" in to_return.attrs: del to_return.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Normalize axis or axes", "by": "normalize_dim", "dims": dims, @@ -148,7 +148,7 @@ def normalize_total(data: XrTypes, *, total_intensity: float = 1000000) -> xr.Da """Normalizes data so that the total intensity is 1000000 (a bit arbitrary). Args: - data(DataType): Input ARPES data + data(xr.DataArray | xr.Dataset): Input ARPES data total_intensity: value for normalizaiton Returns: @@ -246,7 +246,7 @@ def transform_dataarray_axis( # noqa: PLR0913 if "id" in new_ds: del new_ds.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Transformed a Dataset coordinate axis", "by": "transform_dataarray_axis", "old_axis": old_axis_name, diff --git a/src/arpes/provenance.py b/src/arpes/provenance.py index 6b5f7803..67277f7c 100644 --- a/src/arpes/provenance.py +++ b/src/arpes/provenance.py @@ -41,25 +41,25 @@ import numpy as np from numpy.typing import NDArray - from ._typing import WORKSPACETYPE, XrTypes + from ._typing import WorkSpaceType, XrTypes -class PROVENANCE(TypedDict, total=False): +class Provenance(TypedDict, total=False): """TypedDict class for provenance. While any values can be stored in attrs["provenance"], but some rules exist. """ - record: PROVENANCE + record: Provenance jupyter_context: list[str] parent_id: str | int | None - parents_provenance: list[PROVENANCE] | PROVENANCE | str | None + parents_provenance: list[Provenance] | Provenance | str | None time: str version: str file: str what: str by: str - args: list[PROVENANCE] + args: list[Provenance] # alpha: float # derivative.curvature weight2d: float # derivative.curvature @@ -99,7 +99,7 @@ def attach_id(data: XrTypes) -> None: def provenance_from_file( child_arr: XrTypes, file: str, - record: PROVENANCE, + record: Provenance, ) -> None: """Builds a provenance entry for a dataset corresponding to loading data from a file. @@ -114,7 +114,7 @@ def provenance_from_file( if "id" not in child_arr.attrs: attach_id(child_arr) - chile_provenance_context: PROVENANCE = { + chile_provenance_context: Provenance = { "record": record, "file": file, "jupyter_context": get_recent_history(5), @@ -177,7 +177,7 @@ def func_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: if len(all_parents) > 1: provenance_fn = provenance_multiple_parents if all_parents: - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": what, "by": fn.__name__, "time": datetime.datetime.now(UTC).isoformat(), @@ -229,7 +229,7 @@ def func_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: path = plot_fn(*args, **kwargs) if isinstance(path, str) and Path(path).exists(): - workspace: WORKSPACETYPE = arpes.config.CONFIG["WORKSPACE"] + workspace: WorkSpaceType = arpes.config.CONFIG["WORKSPACE"] with contextlib.suppress(TypeError, KeyError): workspace_name: str = workspace["name"] @@ -270,7 +270,7 @@ def func_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: def provenance( child_arr: XrTypes, parents: list[XrTypes] | XrTypes, - record: PROVENANCE, + record: Provenance, *, keep_parent_ref: bool = False, ) -> None: @@ -320,7 +320,7 @@ def provenance( def provenance_multiple_parents( child_arr: XrTypes, parents: list[XrTypes] | XrTypes, - record: PROVENANCE, + record: Provenance, *, keep_parent_ref: bool = False, ) -> None: diff --git a/src/arpes/utilities/conversion/core.py b/src/arpes/utilities/conversion/core.py index a659e6dd..69cf1dfe 100644 --- a/src/arpes/utilities/conversion/core.py +++ b/src/arpes/utilities/conversion/core.py @@ -34,7 +34,7 @@ import xarray as xr from scipy.interpolate import RegularGridInterpolator -from arpes.provenance import PROVENANCE, provenance, update_provenance +from arpes.provenance import Provenance, provenance, update_provenance from arpes.utilities import normalize_to_spectrum from .fast_interp import Interpolator @@ -309,7 +309,7 @@ def interpolated_coordinate_to_raw(*coordinates: NDArray[np.float_]) -> NDArray[ if "id" in converted_ds.attrs: del converted_ds.attrs["id"] - provenance_context: PROVENANCE = { + provenance_context: Provenance = { "what": "Slice along path", "by": "slice_along_path", "parsed_interpolation_points": parsed_interpolation_points, diff --git a/src/arpes/workflow.py b/src/arpes/workflow.py index 0911c88b..b435ea4b 100644 --- a/src/arpes/workflow.py +++ b/src/arpes/workflow.py @@ -47,7 +47,7 @@ from _typeshed import Incomplete - from ._typing import WORKSPACETYPE + from ._typing import WorkSpaceType __all__ = ( "go_to_figures", @@ -113,7 +113,7 @@ def _open_path(p: Path | str) -> None: @with_workspace -def go_to_workspace(workspace: WORKSPACETYPE | None = None) -> None: +def go_to_workspace(workspace: WorkSpaceType | None = None) -> None: """Opens the workspace folder, otherwise opens the location of the running notebook.""" path = Path.cwd() @@ -237,7 +237,7 @@ def consume(self, key: str, *, subscribe: bool = True) -> object: @classmethod def from_workspace( cls: type[DataProvider], - workspace: WORKSPACETYPE | None = None, + workspace: WorkSpaceType | None = None, ) -> DataProvider: if workspace is not None: return cls(path=Path(workspace["path"]), workspace_name=workspace["name"]) @@ -287,7 +287,7 @@ def write_data(self, key: str, data: object) -> None: def publish_data( key: str, data: Incomplete, - workspace: WORKSPACETYPE, + workspace: WorkSpaceType, ) -> None: """Publish/write data to a DataProvider.""" provider = DataProvider.from_workspace(workspace) @@ -297,7 +297,7 @@ def publish_data( @with_workspace def read_data( key: str = "*", - workspace: WORKSPACETYPE | None = None, + workspace: WorkSpaceType | None = None, ) -> object: """Read/consume a summary of the available data from a DataProvider. @@ -308,14 +308,14 @@ def read_data( @with_workspace -def summarize_data(key: str = "", workspace: WORKSPACETYPE | None = None) -> None: +def summarize_data(key: str = "", workspace: WorkSpaceType | None = None) -> None: """Give a summary of the available data from a DataProvider.""" provider = DataProvider.from_workspace(workspace) provider.summarize_clients(key=key) @with_workspace -def consume_data(key: str = "*", workspace: WORKSPACETYPE | None = None) -> object: +def consume_data(key: str = "*", workspace: WorkSpaceType | None = None) -> object: """Read/consume data from a DataProvider in a given workspace.""" provider = DataProvider.from_workspace(workspace) return provider.consume(key, subscribe=True) diff --git a/src/arpes/xarray_extensions.py b/src/arpes/xarray_extensions.py index 73864930..a5616a12 100644 --- a/src/arpes/xarray_extensions.py +++ b/src/arpes/xarray_extensions.py @@ -103,20 +103,20 @@ from numpy.typing import DTypeLike, NDArray from ._typing import ( - ANALYZERINFO, ANGLE, - DAQINFO, - EXPERIMENTINFO, - LIGHTSOURCEINFO, - SAMPLEINFO, - SCANINFO, - SPECTROMETER, + AnalyzerInfo, BeamLineSettings, + DAQInfo, DataType, + ExperimentInfo, + LightSourceInfo, PColorMeshKwargs, + SampleInfo, + ScanInfo, + Spectrometer, XrTypes, ) - from .provenance import PROVENANCE + from .provenance import Provenance IncompleteMPL: TypeAlias = Incomplete @@ -233,7 +233,7 @@ def sherman_function(self) -> Incomplete: @property def experimental_conditions( self, - ) -> EXPERIMENTINFO: + ) -> ExperimentInfo: """Return experimental condition: hv, polarization, temperature. Use this property in plotting/annotations.py/conditions @@ -699,17 +699,17 @@ def iter_own_symmetry_points(self) -> Iterator[tuple[str, float]]: return _iter_groups(sym_points) @property - def history(self) -> list[PROVENANCE | None]: + def history(self) -> list[Provenance | None]: provenance_recorded = self._obj.attrs.get("provenance", None) def unlayer( - prov: PROVENANCE | None | str, - ) -> tuple[list[PROVENANCE | None], PROVENANCE | str | None]: + prov: Provenance | None | str, + ) -> tuple[list[Provenance | None], Provenance | str | None]: if prov is None: return [], None # tuple[list[Incomplete] | None] if isinstance(prov, str): return [prov], None - first_layer: PROVENANCE = copy.copy(prov) + first_layer: Provenance = copy.copy(prov) rest = first_layer.pop("parents_provenance", None) if isinstance(rest, list): @@ -722,7 +722,7 @@ def unlayer( return [first_layer], rest - def _unwrap_provenance(prov: PROVENANCE | None) -> list[PROVENANCE | None]: + def _unwrap_provenance(prov: Provenance | None) -> list[Provenance | None]: if prov is None: return [] @@ -735,7 +735,7 @@ def _unwrap_provenance(prov: PROVENANCE | None) -> list[PROVENANCE | None]: return _unwrap_provenance(provenance_recorded) @property - def spectrometer(self) -> SPECTROMETER: + def spectrometer(self) -> Spectrometer: ds = self._obj if "spectrometer_name" in ds.attrs: return arpes.constants.SPECTROMETERS.get(ds.attrs["spectrometer_name"], {}) @@ -1385,12 +1385,12 @@ def full_coords( return full_coords @property - def sample_info(self) -> SAMPLEINFO: + def sample_info(self) -> SampleInfo: """Return sample info property. Returns (dict): """ - sample_info: SAMPLEINFO = { + sample_info: SampleInfo = { "id": self._obj.attrs.get("sample_id"), "sample_name": self._obj.attrs.get("sample_name"), "source": self._obj.attrs.get("sample_source"), @@ -1399,8 +1399,8 @@ def sample_info(self) -> SAMPLEINFO: return sample_info @property - def scan_info(self) -> SCANINFO: - scan_info: SCANINFO = { + def scan_info(self) -> ScanInfo: + scan_info: ScanInfo = { "time": self._obj.attrs.get("time", None), "date": self._obj.attrs.get("date", None), "type": self.scan_type, @@ -1411,9 +1411,9 @@ def scan_info(self) -> SCANINFO: return scan_info @property - def experiment_info(self) -> EXPERIMENTINFO: + def experiment_info(self) -> ExperimentInfo: """Return experiment info property.""" - experiment_info: EXPERIMENTINFO = { + experiment_info: ExperimentInfo = { "temperature": self.temp, "temperature_cryotip": self._obj.attrs.get("temperature_cryotip", np.nan), "pressure": self._obj.attrs.get("pressure", np.nan), @@ -1428,9 +1428,9 @@ def experiment_info(self) -> EXPERIMENTINFO: return experiment_info @property - def pump_info(self) -> LIGHTSOURCEINFO: + def pump_info(self) -> LightSourceInfo: """Return pump info property.""" - pump_info: LIGHTSOURCEINFO = { + pump_info: LightSourceInfo = { "pump_wavelength": self._obj.attrs.get("pump_wavelength", np.nan), "pump_energy": self._obj.attrs.get("pump_energy", np.nan), "pump_fluence": self._obj.attrs.get("pump_fluence", np.nan), @@ -1447,12 +1447,12 @@ def pump_info(self) -> LIGHTSOURCEINFO: return pump_info @property - def probe_info(self) -> LIGHTSOURCEINFO: + def probe_info(self) -> LightSourceInfo: """Return probe info property. Returns (LIGHTSOURCEINFO): """ - probe_info: LIGHTSOURCEINFO = { + probe_info: LightSourceInfo = { "probe_wavelength": self._obj.attrs.get("probe_wavelength", np.nan), "probe_energy": self.hv, "probe_fluence": self._obj.attrs.get("probe_fluence", np.nan), @@ -1469,7 +1469,7 @@ def probe_info(self) -> LIGHTSOURCEINFO: return probe_info @property - def laser_info(self) -> LIGHTSOURCEINFO: + def laser_info(self) -> LightSourceInfo: return { **self.probe_info, **self.pump_info, @@ -1477,9 +1477,9 @@ def laser_info(self) -> LIGHTSOURCEINFO: } @property - def analyzer_info(self) -> ANALYZERINFO: + def analyzer_info(self) -> AnalyzerInfo: """General information about the photoelectron analyzer used.""" - analyzer_info: ANALYZERINFO = { + analyzer_info: AnalyzerInfo = { "lens_mode": self._obj.attrs.get("lens_mode"), "lens_mode_name": self._obj.attrs.get("lens_mode_name"), "acquisition_mode": self._obj.attrs.get("acquisition_mode", None), @@ -1495,9 +1495,9 @@ def analyzer_info(self) -> ANALYZERINFO: return analyzer_info @property - def daq_info(self) -> DAQINFO: + def daq_info(self) -> DAQInfo: """General information about the acquisition settings for an ARPES experiment.""" - daq_info: DAQINFO = { + daq_info: DAQInfo = { "daq_type": self._obj.attrs.get("daq_type"), "region": self._obj.attrs.get("daq_region"), "region_name": self._obj.attrs.get("daq_region_name"), @@ -1514,9 +1514,9 @@ def daq_info(self) -> DAQINFO: return daq_info @property - def beamline_info(self) -> LIGHTSOURCEINFO: + def beamline_info(self) -> LightSourceInfo: """Information about the beamline or light source used for a measurement.""" - beamline_info: LIGHTSOURCEINFO = { + beamline_info: LightSourceInfo = { "hv": self.hv, "linewidth": self._obj.attrs.get("probe_linewidth", np.nan), "photon_polarization": self.probe_polarization, @@ -1691,17 +1691,17 @@ def _repr_html_spectrometer_info(self) -> str: return ARPESAccessorBase.dict_to_html(ordered_settings) @staticmethod - def _repr_html_experimental_conditions(conditions: EXPERIMENTINFO) -> str: + def _repr_html_experimental_conditions(conditions: ExperimentInfo) -> str: """Return the experimental conditions with html format. Args: - conditions (EXPERIMENTINFO): self.confitions is usually used. + conditions (ExperimentInfo): self.confitions is usually used. Returns (str): html representation of the experimental conditions. """ - def _experimentalinfo_to_dict(conditions: EXPERIMENTINFO) -> dict[str, str]: + def _experimentalinfo_to_dict(conditions: ExperimentInfo) -> dict[str, str]: transformed_dict = {} for k, v in conditions.items(): if k == "polarrization": diff --git a/tests/conftest.py b/tests/conftest.py index d94473a7..11cb06db 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,13 +16,13 @@ from collections.abc import Callable, Generator import xarray as xr - from arpes._typing import SCANINFO, WORKSPACETYPE + from arpes._typing import ScanInfo, WorkSpaceType class EXPECTEDD(TypedDict, total=False): """TypedDict for expected.""" - scan_info: SCANINFO + scan_info: ScanInfo class SCENARIO(TypedDict, total=False): @@ -67,7 +67,7 @@ def sandbox_configuration() -> Generator[Sandbox, None, None]: resources_dir = Path.cwd() / "tests" / "resources" def set_workspace(name: str) -> None: - workspace: WORKSPACETYPE = { + workspace: WorkSpaceType = { "path": resources_dir / "datasets" / name, "name": name, }