From 7fa7231e920a7c756c01294ec5f12bfbb8be6c08 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Mon, 18 Mar 2024 11:20:01 +0900 Subject: [PATCH] =?UTF-8?q?=F0=9F=94=A5=20=20Use=20ruff=20for=20formatting?= =?UTF-8?q?.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/black.yml | 13 ------------- .github/workflows/ruff.yml | 12 ++++++++++++ .pre-commit-config.yaml | 12 ++++++++---- pyproject.toml | 3 ++- src/arpes/analysis/decomposition.py | 8 ++++---- src/arpes/analysis/deconvolution.py | 2 +- src/arpes/corrections/fermi_edge_corrections.py | 4 +--- src/arpes/load_pxt.py | 3 +-- src/arpes/simulation.py | 6 +++--- 9 files changed, 32 insertions(+), 31 deletions(-) delete mode 100644 .github/workflows/black.yml create mode 100644 .github/workflows/ruff.yml diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml deleted file mode 100644 index 0ae263d1..00000000 --- a/.github/workflows/black.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Lint - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.11" - - uses: psf/black@stable diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml new file mode 100644 index 00000000..9c7e174f --- /dev/null +++ b/.github/workflows/ruff.yml @@ -0,0 +1,12 @@ +name: Ruff + +on: [push, pull_request] + +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + with: + python-version: "3.11" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b4a1e3e5..f895b306 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,9 @@ repos: - - hooks: - - id: black - repo: https://github.com/psf/black - rev: 23.3.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.3 + hooks: + # Run the linter + # - id: ruff + # args: [ --fix ] + # Run the formatter + - id: ruff-format diff --git a/pyproject.toml b/pyproject.toml index 18c3c64d..1d741304 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ build-backend = "hatchling.build" managed = true dev-dependencies = [ "colorama>=0.4.6", - "black>=23.3.0", + "ruff>=0.3.3", "pytest>=7.3.2", "pytest-cov", "pytest-qt>=4.2.0", @@ -100,6 +100,7 @@ lint.ignore = [ lint.select = ["ALL"] target-version = "py310" line-length = 100 +indent-width = 4 exclude = ["scripts", "docs", "conda"] diff --git a/src/arpes/analysis/decomposition.py b/src/arpes/analysis/decomposition.py index dc3ae542..9dec387b 100644 --- a/src/arpes/analysis/decomposition.py +++ b/src/arpes/analysis/decomposition.py @@ -181,7 +181,7 @@ def decomposition_along( @wraps(decomposition_along) def pca_along( - *args: * tuple[xr.DataArray, list[str]], + *args: *tuple[xr.DataArray, list[str]], **kwargs: Unpack[PCAParam], ) -> tuple[xr.DataArray, sklearn.decomposition.PCA]: """Specializes `decomposition_along` with `sklearn.decomposition.PCA`.""" @@ -192,7 +192,7 @@ def pca_along( @wraps(decomposition_along) def factor_analysis_along( - *args: * tuple[xr.DataArray, list[str]], + *args: *tuple[xr.DataArray, list[str]], **kwargs: Unpack[FactorAnalysisParam], ) -> tuple[xr.DataArray, sklearn.decomposition.FactorAnalysis]: """Specializes `decomposition_along` with `sklearn.decomposition.FactorAnalysis`.""" @@ -201,7 +201,7 @@ def factor_analysis_along( @wraps(decomposition_along) def ica_along( - *args: * tuple[xr.DataArray, list[str]], + *args: *tuple[xr.DataArray, list[str]], **kwargs: Unpack[FastICAParam], ) -> tuple[xr.DataArray, sklearn.decomposition.FastICA]: """Specializes `decomposition_along` with `sklearn.decomposition.FastICA`.""" @@ -210,7 +210,7 @@ def ica_along( @wraps(decomposition_along) def nmf_along( - *args: * tuple[xr.DataArray, list[str]], + *args: *tuple[xr.DataArray, list[str]], **kwargs: Unpack[NMFParam], ) -> tuple[xr.DataArray, sklearn.decomposition.NMF]: """Specializes `decomposition_along` with `sklearn.decomposition.NMF`.""" diff --git a/src/arpes/analysis/deconvolution.py b/src/arpes/analysis/deconvolution.py index 91afd821..28ca90e4 100644 --- a/src/arpes/analysis/deconvolution.py +++ b/src/arpes/analysis/deconvolution.py @@ -166,7 +166,7 @@ def make_psf( ) if fwhm: - sigmas = {k: v / (2 * np.sqrt(2 * np.log(2))) for k, v, in sigmas.items()} + sigmas = {k: v / (2 * np.sqrt(2 * np.log(2))) for k, v in sigmas.items()} cov: NDArray[np.float_] = np.zeros((len(sigmas), len(sigmas))) for i, dim in enumerate(data.dims): cov[i][i] = sigmas[dim] ** 2 # sigma is deviation, but multivariate_normal uses covariant diff --git a/src/arpes/corrections/fermi_edge_corrections.py b/src/arpes/corrections/fermi_edge_corrections.py index 4f166fa4..9a6253fb 100644 --- a/src/arpes/corrections/fermi_edge_corrections.py +++ b/src/arpes/corrections/fermi_edge_corrections.py @@ -103,9 +103,7 @@ def apply_direct_fermi_edge_correction( correction = build_direct_fermi_edge_correction(arr, *args, **kwargs) assert isinstance(correction, xr.Dataset) - shift_amount = ( - -correction / arr.G.stride(generic_dim_names=False)["eV"] - ) # pylint: disable=invalid-unary-operand-type + shift_amount = -correction / arr.G.stride(generic_dim_names=False)["eV"] # pylint: disable=invalid-unary-operand-type energy_axis_index = list(arr.dims).index("eV") correction_axis = list(arr.dims).index(correction.dims[0]) diff --git a/src/arpes/load_pxt.py b/src/arpes/load_pxt.py index 0fed1fcc..3455fc98 100644 --- a/src/arpes/load_pxt.py +++ b/src/arpes/load_pxt.py @@ -104,8 +104,7 @@ def read_igor_binary_wave(raw_bytes: bytes) -> xr.DataArray: wave_data = np.fromstring( raw_bytes[ - igor_wave_header_dtype.itemsize - + offset : igor_wave_header_dtype.itemsize + igor_wave_header_dtype.itemsize + offset : igor_wave_header_dtype.itemsize + n_points * point_size + offset ], diff --git a/src/arpes/simulation.py b/src/arpes/simulation.py index 4e84e940..145d5395 100644 --- a/src/arpes/simulation.py +++ b/src/arpes/simulation.py @@ -181,9 +181,9 @@ def cloud_to_arr( cloud_as_image[(int(np.floor(x)) + 1) % shape_x][int(np.floor(y)) % shape_y] += ( 1 - frac_low_x ) * frac_low_y - cloud_as_image[int(np.floor(x)) % shape_x][ - (int(np.floor(y)) + 1) % shape_y - ] += frac_low_x * (1 - frac_low_y) + cloud_as_image[int(np.floor(x)) % shape_x][(int(np.floor(y)) + 1) % shape_y] += ( + frac_low_x * (1 - frac_low_y) + ) cloud_as_image[(int(np.floor(x)) + 1) % shape_x][(int(np.floor(y)) + 1) % shape_y] += ( 1 - frac_low_x ) * (1 - frac_low_y)