Skip to content

Commit

Permalink
Merge pull request #194 from lsst-sqre/tickets/DM-40628
Browse files Browse the repository at this point in the history
DM-40628: Switch to Ruff for linting
  • Loading branch information
rra authored Sep 5, 2023
2 parents d5d36f1 + 7cb30e2 commit d0f7f88
Show file tree
Hide file tree
Showing 38 changed files with 473 additions and 382 deletions.
7 changes: 0 additions & 7 deletions .flake8

This file was deleted.

18 changes: 7 additions & 11 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@ repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-yaml
- id: check-merge-conflict
- id: check-toml
- id: check-yaml
- id: trailing-whitespace

- repo: https://github.com/PyCQA/isort
rev: 5.12.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.286
hooks:
- id: isort
additional_dependencies:
- toml
- id: ruff
args: [--fix, --exit-non-zero-on-fix]

- repo: https://github.com/ambv/black
rev: 23.3.0
Expand All @@ -23,8 +24,3 @@ repos:
- id: blacken-docs
additional_dependencies: [black==23.1.0]
args: [-l, '76', -t, py311]

- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
3 changes: 3 additions & 0 deletions changelog.d/20230905_084310_rra_DM_40628.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
### Other changes

- Safir now uses the [Ruff](https://beta.ruff.rs/docs/) linter instead of flake8 and isort.
2 changes: 1 addition & 1 deletion docs/user-guide/github-apps/webhook-models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ This page provides a quick reference for the Pydantic models provided in `safir.

Safir's coverage of GitHub webhooks is not exhaustive.
You can contribute additional models as needed.

Additionally, the models are not necessarily complete.
GitHub may provide additional fields that are not parsed by these models because they were not deemed relevant.
To use additional fields documented by GitHub, you can either subclass these models and add additional fields, or contribute updates to the models in Safir.
Expand Down
115 changes: 109 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,6 @@ exclude = '''
# Use single-quoted strings so TOML treats the string like a Python r-string
# Multi-line strings are implicitly treated by black as regular expressions

[tool.isort]
profile = "black"
line_length = 79
known_first_party = ["safir", "tests"]
skip = ["docs/conf.py"]

[tool.pytest.ini_options]
asyncio_mode = "strict"
filterwarnings = [
Expand Down Expand Up @@ -171,6 +165,115 @@ init_typed = true
warn_required_dynamic_aliases = true
warn_untyped_fields = true

# The rule used with Ruff configuration is to disable every lint that has
# legitimate exceptions that are not dodgy code, rather than cluttering code
# with noqa markers. This is therefore a reiatively relaxed configuration that
# errs on the side of disabling legitimate lints.
#
# Reference for settings: https://beta.ruff.rs/docs/settings/
# Reference for rules: https://beta.ruff.rs/docs/rules/
[tool.ruff]
exclude = [
"docs/**",
]
line-length = 79
ignore = [
"ANN101", # self should not have a type annotation
"ANN102", # cls should not have a type annotation
"ANN401", # sometimes Any is the right type
"ARG001", # unused function arguments are often legitimate
"ARG002", # unused method arguments are often legitimate
"ARG005", # unused lambda arguments are often legitimate
"BLE001", # we want to catch and report Exception in background tasks
"C414", # nested sorted is how you sort by multiple keys with reverse
"COM812", # omitting trailing commas allows black autoreformatting
"D102", # sometimes we use docstring inheritence
"D104", # don't see the point of documenting every package
"D105", # our style doesn't require docstrings for magic methods
"D106", # Pydantic uses a nested Config class that doesn't warrant docs
"D205", # our documentation style allows a folded first line
"EM101", # justification (duplicate string in traceback) is silly
"EM102", # justification (duplicate string in traceback) is silly
"FBT003", # positional booleans are normal for Pydantic field defaults
"FIX002", # point of a TODO comment is that we're not ready to fix it
"G004", # forbidding logging f-strings is appealing, but not our style
"RET505", # disagree that omitting else always makes code more readable
"PLR0913", # factory pattern uses constructors with many arguments
"PLR2004", # too aggressive about magic values
"S105", # good idea but too many false positives on non-passwords
"S106", # good idea but too many false positives on non-passwords
"S603", # not going to manually mark every subprocess call as reviewed
"S607", # using PATH is not a security vulnerability
"SIM102", # sometimes the formatting of nested if statements is clearer
"SIM117", # sometimes nested with contexts are clearer
"TCH001", # we decided to not maintain separate TYPE_CHECKING blocks
"TCH002", # we decided to not maintain separate TYPE_CHECKING blocks
"TCH003", # we decided to not maintain separate TYPE_CHECKING blocks
"TD003", # we don't require issues be created for TODOs
"TID252", # if we're going to use relative imports, use them always
"TRY003", # good general advice but lint is way too aggressive

# Safir-specific rules.
"N818", # Exception is correct in some cases, others are part of API
"PLW0603", # necessary trick for safir.logging
]
select = ["ALL"]
target-version = "py311"

[tool.ruff.per-file-ignores]
"src/safir/testing/**" = [
"S101", # test support functions are allowed to use assert
]
"tests/**" = [
"C901", # tests are allowed to be complex, sometimes that's convenient
"D101", # tests don't need docstrings
"D103", # tests don't need docstrings
"PLR0915", # tests are allowed to be long, sometimes that's convenient
"PT012", # way too aggressive about limiting pytest.raises blocks
"S101", # tests should use assert
"SLF001", # tests are allowed to access private members
]

[tool.ruff.isort]
known-first-party = ["safir", "tests"]
split-on-trailing-comma = false

[tool.ruff.flake8-bugbear]
extend-immutable-calls = [
"fastapi.Form",
"fastapi.Header",
"fastapi.Depends",
"fastapi.Path",
"fastapi.Query",
]

# These are too useful as attributes or methods to allow the conflict with the
# built-in to rule out their use.
[tool.ruff.flake8-builtins]
builtins-ignorelist = [
"all",
"any",
"dict",
"help",
"id",
"list",
"open",
"type",
]

[tool.ruff.flake8-pytest-style]
fixture-parentheses = false
mark-parentheses = false

[tool.ruff.pep8-naming]
classmethod-decorators = [
"pydantic.root_validator",
"pydantic.validator",
]

[tool.ruff.pydocstyle]
convention = "numpy"

[tool.scriv]
categories = [
"Backwards-incompatible changes",
Expand Down
61 changes: 31 additions & 30 deletions src/safir/arq.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
"""An `arq <https://arq-docs.helpmanual.io>`__ client with a mock for
testing.
"""
"""An arq_ client with a mock for testing."""

from __future__ import annotations

Expand All @@ -9,13 +7,15 @@
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Optional, Self
from typing import Any, Self

from arq import create_pool
from arq.connections import ArqRedis, RedisSettings
from arq.constants import default_queue_name as arq_default_queue_name
from arq.jobs import Job, JobStatus

from .datetime import current_datetime

__all__ = [
"ArqJobError",
"JobNotQueued",
Expand Down Expand Up @@ -173,7 +173,7 @@ async def from_job(cls, job: Job) -> Self:
status=job_status,
# private attribute of Job; not available in JobDef
# queue_name is available in JobResult
queue_name=job._queue_name,
queue_name=job._queue_name, # noqa: SLF001
)


Expand Down Expand Up @@ -269,11 +269,11 @@ async def from_job(cls, job: Job) -> Self:


class ArqQueue(metaclass=abc.ABCMeta):
"""An common interface for working with an arq queue that can be
"""A common interface for working with an arq queue that can be
implemented either with a real Redis backend, or an in-memory repository
for testing.
See also
See Also
--------
RedisArqQueue
Production implementation with a Redis store.
Expand All @@ -288,8 +288,8 @@ def __init__(

@property
def default_queue_name(self) -> str:
"""Name of the default queue, if the ``_queue_name`` parameter is
no set in method calls.
"""Name of the default queue, if the ``_queue_name`` parameter is not
set in method calls.
"""
return self._default_queue_name

Expand All @@ -298,7 +298,7 @@ async def enqueue(
self,
task_name: str,
*task_args: Any,
_queue_name: Optional[str] = None,
_queue_name: str | None = None,
**task_kwargs: Any,
) -> JobMetadata:
"""Add a job to the queue.
Expand Down Expand Up @@ -328,7 +328,7 @@ async def enqueue(

@abc.abstractmethod
async def get_job_metadata(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> JobMetadata:
"""Get metadata about a `~arq.jobs.Job`.
Expand All @@ -354,9 +354,9 @@ async def get_job_metadata(

@abc.abstractmethod
async def get_job_result(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> JobResult:
"""The job result, if available.
"""Retrieve the job result, if available.
Parameters
----------
Expand Down Expand Up @@ -410,7 +410,7 @@ async def enqueue(
self,
task_name: str,
*task_args: Any,
_queue_name: Optional[str] = None,
_queue_name: str | None = None,
**task_kwargs: Any,
) -> JobMetadata:
job = await self._pool.enqueue_job(
Expand All @@ -422,24 +422,25 @@ async def enqueue(
if job:
return await JobMetadata.from_job(job)
else:
# TODO if implementing hard-coded job IDs, set as an argument
# TODO(jonathansick): if implementing hard-coded job IDs, set as
# an argument
raise JobNotQueued(None)

def _get_job(self, job_id: str, queue_name: Optional[str] = None) -> Job:
def _get_job(self, job_id: str, queue_name: str | None = None) -> Job:
return Job(
job_id,
self._pool,
_queue_name=queue_name or self.default_queue_name,
)

async def get_job_metadata(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> JobMetadata:
job = self._get_job(job_id, queue_name=queue_name)
return await JobMetadata.from_job(job)

async def get_job_result(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> JobResult:
job = self._get_job(job_id, queue_name=queue_name)
return await JobResult.from_job(job)
Expand All @@ -466,7 +467,7 @@ async def enqueue(
self,
task_name: str,
*task_args: Any,
_queue_name: Optional[str] = None,
_queue_name: str | None = None,
**task_kwargs: Any,
) -> JobMetadata:
queue_name = self._resolve_queue_name(_queue_name)
Expand All @@ -475,33 +476,33 @@ async def enqueue(
name=task_name,
args=task_args,
kwargs=task_kwargs,
enqueue_time=datetime.now(),
enqueue_time=current_datetime(microseconds=True),
status=JobStatus.queued,
queue_name=queue_name,
)
self._job_metadata[queue_name][new_job.id] = new_job
return new_job

async def get_job_metadata(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> JobMetadata:
queue_name = self._resolve_queue_name(queue_name)
try:
return self._job_metadata[queue_name][job_id]
except KeyError:
raise JobNotFound(job_id)
except KeyError as e:
raise JobNotFound(job_id) from e

async def get_job_result(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> JobResult:
queue_name = self._resolve_queue_name(queue_name)
try:
return self._job_results[queue_name][job_id]
except KeyError:
raise JobResultUnavailable(job_id)
except KeyError as e:
raise JobResultUnavailable(job_id) from e

async def set_in_progress(
self, job_id: str, queue_name: Optional[str] = None
self, job_id: str, queue_name: str | None = None
) -> None:
"""Set a job's status to in progress, for mocking a queue in tests."""
job = await self.get_job_metadata(job_id, queue_name=queue_name)
Expand All @@ -517,7 +518,7 @@ async def set_complete(
*,
result: Any,
success: bool = True,
queue_name: Optional[str] = None,
queue_name: str | None = None,
) -> None:
"""Set a job's result, for mocking a queue in tests."""
queue_name = self._resolve_queue_name(queue_name)
Expand All @@ -534,8 +535,8 @@ async def set_complete(
kwargs=job_metadata.kwargs,
status=job_metadata.status,
enqueue_time=job_metadata.enqueue_time,
start_time=datetime.now(),
finish_time=datetime.now(),
start_time=current_datetime(microseconds=True),
finish_time=current_datetime(microseconds=True),
result=result,
success=success,
queue_name=queue_name,
Expand Down
Loading

0 comments on commit d0f7f88

Please sign in to comment.