Skip to content

Commit

Permalink
✨ Add new function to tarpes.py, which has been used very personally.
Browse files Browse the repository at this point in the history
🔥  Remove transform_labels in plotting/utils.py
    It's not so convenient, and isn't used on a daily basis.
💬  Update warning text in normalized_spectrum

💬  Update type hints
  • Loading branch information
arafune committed Mar 17, 2024
1 parent 73e21ec commit c294318
Show file tree
Hide file tree
Showing 16 changed files with 273 additions and 111 deletions.
2 changes: 1 addition & 1 deletion src/arpes/analysis/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@
pocket_parameters,
radial_edcs_along_pocket,
)
from .tarpes import normalized_relative_change, relative_change
from .tarpes import build_crosscorrelation, normalized_relative_change, relative_change
from .xps import approximate_core_levels
2 changes: 1 addition & 1 deletion src/arpes/analysis/decomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class FactorAnalysisParam(TypedDict, total=False):
random_state: int | None


class DecompositionParam(PCAParam, FastICAParam, NMFParam, FactorAnalysisParam):
class DecompositionParam(PCAParam, FastICAParam, NMFParam, FactorAnalysisParam): # type: ignore[misc]
pass


Expand Down
14 changes: 7 additions & 7 deletions src/arpes/analysis/deconvolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,8 @@ def deconvolve_ice(
poly = np.poly1d(coefs)
deconv[t] = poly(0)

if isinstance(data, np.ndarray):
result = deconv
else:
result = data.copy(deep=True)
result.values = deconv
result = data.copy(deep=True)
result.values = deconv
return result


Expand Down Expand Up @@ -114,7 +111,11 @@ def deconvolve_rl(


@update_provenance("Make 1D-Point Spread Function")
def make_psf1d(data: xr.DataArray, dim: str, sigma: float) -> xr.DataArray:
def make_psf1d(
data: xr.DataArray,
dim: str,
sigma: float,
) -> xr.DataArray:
"""Produces a 1-dimensional gaussian point spread function for use in deconvolve_rl.
Args:
Expand Down Expand Up @@ -184,7 +185,6 @@ def make_psf(
f" psf_coords[{k}]: ±{np.max(v):.3f}",
)
coords = np.meshgrid(*[psf_coords[dim] for dim in data.dims], indexing="ij")

coords_for_pdf_pos = np.stack(coords, axis=-1) # point distribution function (pdf)
logger.debug(f"shape of coords_for_pdf_pos: {coords_for_pdf_pos.shape}")
psf = xr.DataArray(
Expand Down
24 changes: 13 additions & 11 deletions src/arpes/analysis/savitzky_golay.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,33 +3,34 @@
from __future__ import annotations

from math import factorial
from typing import TYPE_CHECKING, Literal
from typing import TYPE_CHECKING, Literal, TypeVar

import numpy as np
import scipy.signal
import xarray as xr
from numpy.typing import NDArray

from arpes.constants import TWO_DIMENSION
from arpes.provenance import update_provenance

if TYPE_CHECKING:
from collections.abc import Hashable

from numpy.typing import NDArray


__all__ = ("savitzky_golay",)

T = TypeVar("T", xr.DataArray, NDArray[np.float_])


@update_provenance("Savitzky Golay Filter")
def savitzky_golay( # noqa: PLR0913
data: xr.DataArray,
data: T,
window_size: int,
order: int,
deriv: int | Literal["col", "row", "both", None] = 0,
deriv: int | Literal["col", "row", "both"] | None = 0,
rate: int = 1,
dim: Hashable = "",
) -> xr.DataArray:
) -> T:
"""Implements a Savitzky Golay filter with given window size.
You can specify "pass through" dimensions
Expand All @@ -38,6 +39,7 @@ def savitzky_golay( # noqa: PLR0913
Args:
data: Input data.
This should be xr.DataArray, while list[float] or np.ndarray can be accepted.
window_size: Number of points in the window that the filter uses locally.
order: The polynomial order used in the convolution.
deriv: the order of the derivative to compute (default = 0 means only smoothing)
Expand All @@ -47,12 +49,12 @@ def savitzky_golay( # noqa: PLR0913
Returns:
Smoothed data.
"""
if isinstance(
data,
list | np.ndarray,
):
if isinstance(data, list):
assert isinstance(deriv, int)
return savitzky_golay_array(data, window_size, order, deriv, rate)
if isinstance(data, np.ndarray):
assert isinstance(deriv, int)
return savitzky_golay_array(data, window_size, order, deriv, rate)

if len(data.dims) == 1:
assert isinstance(deriv, int)
transformed_data = savitzky_golay_array(data.values, window_size, order, deriv, rate)
Expand Down
21 changes: 19 additions & 2 deletions src/arpes/analysis/statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from __future__ import annotations

from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
from typing import TYPE_CHECKING

import xarray as xr
Expand All @@ -14,10 +15,26 @@

__all__ = ("mean_and_deviation",)

LOGLEVELS = (DEBUG, INFO)
LOGLEVEL = LOGLEVELS[1]
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False


@update_provenance("Calculate mean and standard deviation for observation axis")
@lift_dataarray_to_generic
def mean_and_deviation(data: XrTypes, axis: str = "", name: str = "") -> xr.Dataset:
def mean_and_deviation(
data: xr.DataArray, # data.name is used.
axis: str = "",
name: str = "",
) -> xr.Dataset:
"""Calculates the mean and standard deviation of a DataArray along an axis.
The reduced axis corresponds to individual observations of a tensor/array valued quantity.
Expand All @@ -27,7 +44,7 @@ def mean_and_deviation(data: XrTypes, axis: str = "", name: str = "") -> xr.Data
If a name is not attached to the DataArray, it should be provided.
Args:
data: The input data (Both DataArray and Dataset).
data: The input data.
axis: The name of the dimension which we should perform the reduction along.
name: The name of the variable which should be reduced. By default, uses `data.name`.
Expand Down
96 changes: 95 additions & 1 deletion src/arpes/analysis/tarpes.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,109 @@
from __future__ import annotations

import warnings
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
from typing import TYPE_CHECKING, TypeVar

import numpy as np
import xarray as xr
from numpy.typing import NDArray

from arpes.preparation import normalize_dim
from arpes.provenance import update_provenance
from arpes.utilities import normalize_to_spectrum

__all__ = ("find_t0", "relative_change", "normalized_relative_change")
if TYPE_CHECKING:
from collections.abc import Sequence

__all__ = (
"find_t0",
"relative_change",
"normalized_relative_change",
"build_crosscorrelation",
"delaytime_fs",
"position_to_delaytime",
)


LOGLEVELS = (DEBUG, INFO)
LOGLEVEL = LOGLEVELS[1]
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False


A = TypeVar("A", NDArray[np.float64], float)


def build_crosscorrelation(
datalist: Sequence[xr.DataArray],
delayline_dim: str = "position",
delayline_origin: float = 0,
*,
convert_position_to_time: bool = True,
) -> xr.DataArray:
"""Build the ('original dimnsion' + 1)D data from the series of cross-correlation measurements.
Args:
datalist (Sequence[xr.DataArray]): Data series from the cross-correlation experiments.
delayline_dim: the dimension name for "delay line", which must be in key of data.attrs
When this is the "position" dimention, the unit is assumed to be "mm". If the value has
already been converted to "time" dimension, set convert_position_to_time=True
delayline_origin (float): The value corresponding to the delay zero.
convert_position_to_time: (bool) If true, no conversion into "delay" is processed.
Returns: xr.DataArray
"""
cross_correlations = []

for spectrum in datalist:
spectrum_arr = (
spectrum if isinstance(spectrum, xr.DataArray) else normalize_to_spectrum(spectrum)
)
if convert_position_to_time:
delay_time = spectrum_arr.attrs[delayline_dim] - delayline_origin
else:
delay_time = position_to_delaytime(
float(spectrum_arr[delayline_dim]),
delayline_origin,
)
cross_correlations.append(
spectrum_arr.assign_coords({"delay": delay_time}).expand_dims("delay"),
)
return xr.concat(cross_correlations, dim="delay")


def delaytime_fs(mirror_movement_um: A) -> A:
"""Return delaytime from the mirror movement (not position).
Args:
mirror_movement_um (float): mirror movement in micron unit.
Returns: float
delay time in fs.
"""
return 3.335640951981521 * mirror_movement_um


def position_to_delaytime(position_mm: A, delayline_offset_mm: float) -> A:
"""Return delay time from the mirror position.
Args:
position_mm (np.ndarray | float): mirror position
delayline_offset_mm (float): mirror position corresponding to the zero delay
Returns: np.ndarray | float
delay time in fs unit.
"""
return delaytime_fs(2 * (position_mm - delayline_offset_mm) * 1000)


@update_provenance("Normalized subtraction map")
Expand Down
11 changes: 6 additions & 5 deletions src/arpes/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,19 @@
METERS_PER_SECOND_PER_EV_ANGSTROM = (
151927 # converts from eV * angstrom to meters/second velocity units
)
HBAR = 1.0545718176461565 * 10 ** (-34)
HBAR_PER_EV = 6.582119569 * 10 ** (
-16
) # gives the energy lifetime relationship via tau = -hbar / np.imag(self_energy)
HBAR = 1.0545718176461565e-34
HBAR_PER_EV = 6.582119569509067e-16
# gives the energy lifetime relationship via tau = -hbar / np.imag(self_energy)


BARE_ELECTRON_MASS = 9.109383701e-31 # kg
HBAR_SQ_EV_PER_ELECTRON_MASS = 0.475600805657 # hbar^2 / m0 in eV^2 s^2 / kg
HBAR_SQ_EV_PER_ELECTRON_MASS_ANGSTROM_SQ = 7.619964 # (hbar^2) / (m0 * angstrom ^2) in eV

K_BOLTZMANN_EV_KELVIN = 8.617333262145178e-5 # in units of eV / Kelvin
K_BOLTZMANN_MEV_KELVIN = 1000 * K_BOLTZMANN_EV_KELVIN # meV / Kelvin

HC = 1239.84172 # in units of eV * nm
HC = 1239.8419843320028 # in units of eV * nm

HEX_ALPHABET = "ABCDEF0123456789"
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
Expand Down
37 changes: 35 additions & 2 deletions src/arpes/laser.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,45 @@

from __future__ import annotations

from typing import TYPE_CHECKING
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
from typing import TYPE_CHECKING, TypeVar

import numpy as np
from numpy.typing import NDArray

from .constants import HC

if TYPE_CHECKING:
import pint

__all__ = ("electrons_per_pulse",)
LOGLEVELS = (DEBUG, INFO)
LOGLEVEL = LOGLEVELS[1]
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False


__all__ = ("electrons_per_pulse", "wavelength_to_energy")

A = TypeVar("A", NDArray[np.float64], float)


def wavelength_to_energy(wavelength_nm: A) -> A:
"""Return Energy of the light.
Args:
wavelength_nm (NDArray | float): wavelength of the light in nm unit.
Returns: NDArray | float
Photon energy in eV unit.
"""
return HC / wavelength_nm


def electrons_per_pulse(
Expand Down
10 changes: 5 additions & 5 deletions src/arpes/plotting/fermi_edge.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def fermi_edge_reference(
"""Fits for and plots results for the Fermi edge on a piece of data.
Args:
data: The data, this should be of type DataArray<lmfit.model.ModelResult>
data_arr: The data, this should be of type DataArray<lmfit.model.ModelResult>
title: A title to attach to the plot
ax: The axes to plot to, if not specified will be generated
out: Where to save the plot
Expand All @@ -133,7 +133,7 @@ def fermi_edge_reference(
assert isinstance(data_arr, xr.DataArray)
sum_dimensions: set[str] = {"cycle", "phi", "kp", "kx"}
sum_dimensions.intersection_update(set(data_arr.dims))
summed_data = data.sum(*list(sum_dimensions))
summed_data = data_arr.sum(*list(sum_dimensions))

broadcast_dimensions = [str(d) for d in summed_data.dims if str(d) != "eV"]
msg = f"Could not product fermi edge reference. Too many dimensions: {broadcast_dimensions}"
Expand All @@ -156,14 +156,14 @@ def fermi_edge_reference(
_, ax = plt.subplots(figsize=(8, 5))

if not title:
title = data.S.label.replace("_", " ")
title = data_arr.S.label.replace("_", " ")

centers.plot(ax=ax, **kwargs)
widths.plot(ax=ax, **kwargs)

if isinstance(ax, Axes):
ax.set_xlabel(label_for_dim(data, ax.get_xlabel()))
ax.set_ylabel(label_for_dim(data, ax.get_ylabel()))
ax.set_xlabel(label_for_dim(data_arr, ax.get_xlabel()))
ax.set_ylabel(label_for_dim(data_arr, ax.get_ylabel()))
ax.set_title(title, font_size=14)

if out:
Expand Down
4 changes: 1 addition & 3 deletions src/arpes/plotting/movie.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,7 @@ def plot_movie(
Raises:
TypeError: [TODO:description]
"""
if not isinstance(data, xr.DataArray):
msg = "You must provide a DataArray"
raise TypeError(msg)
assert isinstance(data, xr.DataArray), "You must provide a DataArray"
fig, ax = fig_ax
if ax is None:
fig, ax = plt.subplots(figsize=(7, 7))
Expand Down
Loading

0 comments on commit c294318

Please sign in to comment.