Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fetch results of evening voting sessions #957

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
"""Rename result column to status

Revision ID: 1f516b18c4f6
Revises: 9b35d19b64c4
Create Date: 2024-12-08 11:25:26.051408

"""

from alembic import op

# revision identifiers, used by Alembic.
revision = "1f516b18c4f6"
down_revision = "9b35d19b64c4"
branch_labels = None
depends_on = None


def upgrade() -> None:
op.alter_column("pipeline_runs", column_name="result", new_column_name="status")


def downgrade() -> None:
op.alter_column("pipeline_runs", column_name="status", new_column_name="result")
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
"""Add checksum column to pipeline_runs table

Revision ID: 2f958a6f147d
Revises: 1f516b18c4f6
Create Date: 2024-12-07 17:12:10.792707

"""

import sqlalchemy as sa
from alembic import op

# revision identifiers, used by Alembic.
revision = "2f958a6f147d"
down_revision = "1f516b18c4f6"
branch_labels = None
depends_on = None


def upgrade() -> None:
op.add_column("pipeline_runs", sa.Column("checksum", sa.Unicode))


def downgrade() -> None:
op.drop_column("pipeline_runs", "checksum")
4 changes: 2 additions & 2 deletions backend/howtheyvote/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .common import Base, BaseWithId, DataIssue, Fragment, PipelineRun, PipelineRunResult
from .common import Base, BaseWithId, DataIssue, Fragment, PipelineRun, PipelineStatus
from .country import Country, CountryType
from .eurovoc import EurovocConcept, EurovocConceptType
from .group import Group
Expand All @@ -24,7 +24,7 @@
"BaseWithId",
"Fragment",
"PipelineRun",
"PipelineRunResult",
"PipelineStatus",
"DataIssue",
"Country",
"CountryType",
Expand Down
6 changes: 4 additions & 2 deletions backend/howtheyvote/models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,11 @@ class DataIssue(Enum):
VOTE_GROUP_NO_MAIN_VOTE = "VOTE_GROUP_NO_MAIN_VOTE"


class PipelineRunResult(Enum):
class PipelineStatus(Enum):
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
DATA_UNAVAILABLE = "DATA_UNAVAILABLE"
DATA_UNCHANGED = "DATA_UNCHANGED"


class PipelineRun(Base):
Expand All @@ -49,4 +50,5 @@ class PipelineRun(Base):
started_at: Mapped[sa.DateTime] = mapped_column(sa.DateTime)
finished_at: Mapped[sa.DateTime] = mapped_column(sa.DateTime)
pipeline: Mapped[str] = mapped_column(sa.Unicode)
result: Mapped[PipelineRunResult] = mapped_column(sa.Enum(PipelineRunResult))
status: Mapped[PipelineStatus] = mapped_column(sa.Enum(PipelineStatus))
checksum: Mapped[str] = mapped_column(sa.Unicode)
5 changes: 2 additions & 3 deletions backend/howtheyvote/pipelines/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from .common import DataUnavailableError, PipelineError
from .common import PipelineResult
from .members import MembersPipeline
from .press import PressPipeline
from .rcv_list import RCVListPipeline
from .sessions import SessionsPipeline

__all__ = [
"PipelineError",
"DataUnavailableError",
"PipelineResult",
"RCVListPipeline",
"PressPipeline",
"MembersPipeline",
Expand Down
62 changes: 62 additions & 0 deletions backend/howtheyvote/pipelines/common.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,68 @@
import hashlib
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any

from requests import Response
from structlog import get_logger

from ..models import PipelineStatus
from ..scrapers import ScrapingError

log = get_logger(__name__)


@dataclass
class PipelineResult:
status: PipelineStatus
checksum: str | None


class PipelineError(Exception):
pass


class DataUnavailableError(PipelineError):
pass


class DataUnchangedError(PipelineError):
pass
Comment on lines 21 to +30
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note to self: These aren’t actually errors, they are exceptions used for flow control. I think I’ve blindly followed the linter recommendation, but this naming doesn’t really make sense for this case.

According to PEP 8, exception classes should have an Error suffix, but only if they actually are errors, otherwise they should have no special suffix, so in this case DataUnchanged and DataUnavailable would be more appropriate.

https://peps.python.org/pep-0008/#exception-names

It probably makes sense to fix this separately though, there might be other similar cases elsewhere.



class BasePipeline(ABC):
last_run_checksum: str | None
checksum: str | None

def __init__(self, last_run_checksum: str | None = None, **kwargs: Any) -> None:
self.last_run_checksum = last_run_checksum
self.checksum = None
self._log = log.bind(pipeline=type(self).__name__, **kwargs)

def run(self) -> PipelineResult:
self._log.info("Running pipeline")

try:
self._run()
status = PipelineStatus.SUCCESS
except DataUnavailableError:
status = PipelineStatus.DATA_UNAVAILABLE
except DataUnchangedError:
status = PipelineStatus.DATA_UNCHANGED
except ScrapingError:
status = PipelineStatus.FAILURE
self._log.exception("Failed running pipeline")

return PipelineResult(
status=status,
checksum=self.checksum,
)

@abstractmethod
def _run(self) -> None:
raise NotImplementedError


def compute_response_checksum(response: Response) -> str:
"""Compute the SHA256 hash of the response contents."""
return hashlib.sha256(response.content).hexdigest()
29 changes: 9 additions & 20 deletions backend/howtheyvote/pipelines/members.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,34 +13,23 @@
ScrapingError,
)
from ..store import Aggregator, BulkWriter, index_records, map_member
from .common import BasePipeline

log = get_logger(__name__)


class MembersPipeline:
class MembersPipeline(BasePipeline):
def __init__(self, term: int):
super().__init__(term=term)
self.term = term
self._member_ids: set[str] = set()

def run(self) -> None:
log.info(
"Running pipeline",
name=type(self).__name__,
term=self.term,
)

try:
self._scrape_members()
self._scrape_member_groups()
self._scrape_member_infos()
self._download_member_photos()
self._index_members()
except ScrapingError:
log.exception(
"Failed running pipeline",
name=type(self).__name__,
term=self.term,
)
def _run(self) -> None:
self._scrape_members()
self._scrape_member_groups()
self._scrape_member_infos()
self._download_member_photos()
self._index_members()

def _scrape_members(self) -> None:
log.info("Scraping RCV lists", term=self.term)
Expand Down
35 changes: 11 additions & 24 deletions backend/howtheyvote/pipelines/press.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@
ScrapingError,
)
from ..store import Aggregator, BulkWriter, index_records, map_press_release, map_vote
from .common import BasePipeline

log = get_logger(__name__)


class PressPipeline:
class PressPipeline(BasePipeline):
# At the time we introduced this constant, the value covered roughly one term. However,
# this obviously depends on the amount of press releases published and might need to be
# adjusted or made configurable in the future.
Expand All @@ -30,35 +31,21 @@ def __init__(
date: datetime.date | None = None,
with_rss: bool | None = False,
):
super().__init__(date=date, with_rss=with_rss)
self.date = date
self.with_rss = with_rss
self._release_ids: set[str] = set()
self._vote_ids: set[str] = set()

def run(self) -> None:
log.info(
"Running pipeline",
name=type(self).__name__,
date=self.date,
with_rss=self.with_rss,
)
def _run(self) -> None:
if self.with_rss:
self._scrape_press_releases_rss()

try:
if self.with_rss:
self._scrape_press_releases_rss()

self._scrape_press_releases_index()
self._scrape_press_releases()
self._analyze_featured_votes()
self._index_press_releases()
self._index_votes()
except ScrapingError:
log.exception(
"Failed running pipeline",
name=type(self).__name__,
date=self.date,
with_rss=self.with_rss,
)
self._scrape_press_releases_index()
self._scrape_press_releases()
self._analyze_featured_votes()
self._index_press_releases()
self._index_votes()

def _scrape_press_releases_rss(self) -> None:
log.info("Fetching press releases from RSS", date=self.date)
Expand Down
96 changes: 50 additions & 46 deletions backend/howtheyvote/pipelines/rcv_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,65 +27,54 @@
)
from ..sharepics import generate_vote_sharepic
from ..store import Aggregator, BulkWriter, index_records, map_vote, map_vote_group
from .common import DataUnavailableError, PipelineError
from .common import (
BasePipeline,
DataUnavailableError,
DataUnchangedError,
compute_response_checksum,
)

log = get_logger(__name__)


class RCVListPipeline:
class RCVListPipeline(BasePipeline):
"""Scrapes the RCV vote results for a single day, then runs analysis on the
extracted votes and scrapes additional information such as data about legislative
procedures."""

def __init__(self, term: int, date: datetime.date):
def __init__(
self,
term: int,
date: datetime.date,
last_run_checksum: str | None = None,
):
super().__init__(term=term, date=date, last_run_checksum=last_run_checksum)
self.term = term
self.date = date
self.last_run_checksum = last_run_checksum
self.checksum: str | None = None
self._vote_ids: set[str] = set()
self._vote_group_ids: set[str] = set()
self._request_cache: RequestCache = LRUCache(maxsize=25)

def run(self) -> None:
log.info(
"Running pipeline",
name=type(self).__name__,
term=self.term,
date=self.date,
)

try:
self._scrape_rcv_list()
self._scrape_documents()
self._scrape_eurlex_documents()
self._scrape_procedures()
self._scrape_eurlex_procedures()
self._analyze_main_votes()
self._analyze_vote_groups()
self._analyze_vote_data_issues()
self._index_votes()

# Share pictures have to be generated after the votes are indexed. Otherwise,
# rendering the share pictures fails as data about new votes hasn’t yet been
# written to the database.
self._generate_vote_sharepics()

self._analyze_vote_groups_data_issues()
self._index_vote_groups()
except NoWorkingUrlError as exc:
log.exception(
"Failed running pipeline",
name=type(self).__name__,
term=self.term,
date=self.date,
)
raise DataUnavailableError("Pipeline data source is not available") from exc
except ScrapingError as exc:
log.exception(
"Failed running pipeline",
name=type(self).__name__,
term=self.term,
date=self.date,
)
raise PipelineError("Failed running pipeline") from exc
def _run(self) -> None:
self._scrape_rcv_list()
self._scrape_documents()
self._scrape_eurlex_documents()
self._scrape_procedures()
self._scrape_eurlex_procedures()
self._analyze_main_votes()
self._analyze_vote_groups()
self._analyze_vote_data_issues()
self._index_votes()

# Share pictures have to be generated after the votes are indexed. Otherwise,
# rendering the share pictures fails as data about new votes hasn’t yet been
# written to the database.
self._generate_vote_sharepics()

self._analyze_vote_groups_data_issues()
self._index_vote_groups()

def _scrape_rcv_list(self) -> None:
log.info("Fetching active members", date=self.date)
Expand All @@ -107,8 +96,23 @@ def _scrape_rcv_list(self) -> None:
active_members=active_members,
)

try:
fragments = scraper.run()
except NoWorkingUrlError as exc:
raise DataUnavailableError("Pipeline data source is not available") from exc

if (
self.last_run_checksum is not None
and self.last_run_checksum == compute_response_checksum(scraper.response)
):
raise DataUnchangedError(
"The data source hasn't changed since the last pipeline run."
)

self.checksum = compute_response_checksum(scraper.response)

writer = BulkWriter()
writer.add(scraper.run())
writer.add(fragments)
writer.flush()

self._vote_ids = writer.get_touched()
Expand Down
Loading
Loading