Skip to content

Entity Mention Linker #2088

Entity Mention Linker

Entity Mention Linker #2088

Triggered via pull request February 2, 2024 15:09
Status Failure
Total duration 19m 43s
Artifacts

ci.yml

on: pull_request
Fit to window
Zoom out
Zoom in

Annotations

11 errors and 1 warning
test
The runner has received a shutdown signal. This can happen when the runner service is stopped, or a manually started runner is canceled.
test: flair/data.py#L345
ruff pytest_ruff.RuffError: flair/data.py:366:16: SIM401 Use `self.annotation_layers.get(typename, [])` instead of an `if` block | 364 | return self.labels 365 | 366 | return self.annotation_layers[typename] if typename in self.annotation_layers else [] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SIM401 367 | 368 | @Property | = help: Replace with `self.annotation_layers.get(typename, [])`
test: flair/data.py#L1
Black format check --- /home/runner/work/flair/flair/flair/data.py 2024-02-02 15:09:53.017994+00:00 +++ /home/runner/work/flair/flair/flair/data.py 2024-02-02 15:12:18.100663+00:00 @@ -1042,16 +1042,14 @@ def get_span(self, start: int, stop: int): span_slice = slice(start, stop) return self[span_slice] @typing.overload - def __getitem__(self, idx: int) -> Token: - ... + def __getitem__(self, idx: int) -> Token: ... @typing.overload - def __getitem__(self, s: slice) -> Span: - ... + def __getitem__(self, s: slice) -> Span: ... def __getitem__(self, subscript): if isinstance(subscript, slice): return Span(self.tokens[subscript]) else:
test: flair/datasets/base.py#L345
ruff pytest_ruff.RuffError: flair/datasets/base.py:185:22: SIM401 Use `document.get(_, "")` instead of an `if` block | 183 | sentence = self._parse_document_to_sentence( 184 | document[self.text], 185 | [document[_] if _ in document else "" for _ in self.categories], | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SIM401 186 | tokenizer, 187 | ) | = help: Replace with `document.get(_, "")` flair/datasets/base.py:228:18: SIM401 Use `document.get(_, "")` instead of an `if` block | 226 | sentence = self._parse_document_to_sentence( 227 | document[self.text], 228 | [document[_] if _ in document else "" for _ in self.categories], | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SIM401 229 | self.tokenizer, 230 | ) | = help: Replace with `document.get(_, "")`
test: flair/datasets/biomedical.py#L345
ruff pytest_ruff.RuffError: flair/datasets/biomedical.py:2197:30: SIM401 Use `patch_lines.get(line_no, line)` instead of an `if` block | 2196 | for line in input: 2197 | output.write(patch_lines[line_no] if line_no in patch_lines else line) | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SIM401 2198 | line_no += 1 | = help: Replace with `patch_lines.get(line_no, line)`
test: flair/datasets/sequence_labeling.py#L345
ruff pytest_ruff.RuffError: flair/datasets/sequence_labeling.py:2767:21: SIM113 Use `enumerate()` for index variable `k` in `for` loop | 2765 | k = 0 2766 | for line in file.readlines(): 2767 | k += 1 | ^^^^^^ SIM113 2768 | if k <= train_len: 2769 | train.write(line) |
test: flair/datasets/text_image.py#L345
ruff pytest_ruff.RuffError: flair/datasets/text_image.py:66:12: RUF019 [*] Unnecessary key check before dictionary access | 65 | preprocessor = identity 66 | if "lowercase" in kwargs and kwargs["lowercase"]: | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF019 67 | preprocessor = str.lower | = help: Replace with `dict.get`
test: flair/file_utils.py#L1
Black format check --- /home/runner/work/flair/flair/flair/file_utils.py 2024-02-02 15:09:53.021994+00:00 +++ /home/runner/work/flair/flair/flair/file_utils.py 2024-02-02 15:12:25.977403+00:00 @@ -1,6 +1,7 @@ """Utilities for working with the local dataset cache. Copied from AllenNLP.""" + import base64 import functools import io import logging import mmap
test: flair/models/entity_linker_model.py#L1
Black format check --- /home/runner/work/flair/flair/flair/models/entity_linker_model.py 2024-02-02 15:09:53.021994+00:00 +++ /home/runner/work/flair/flair/flair/models/entity_linker_model.py 2024-02-02 15:12:26.990886+00:00 @@ -106,13 +106,13 @@ **classifierargs: The arguments propagated to :meth:`flair.nn.DefaultClassifier.__init__` """ super().__init__( embeddings=embeddings, label_dictionary=label_dictionary, - final_embedding_size=embeddings.embedding_length * 2 - if pooling_operation == "first_last" - else embeddings.embedding_length, + final_embedding_size=( + embeddings.embedding_length * 2 if pooling_operation == "first_last" else embeddings.embedding_length + ), **classifierargs, ) self.pooling_operation = pooling_operation self._label_type = label_type
test: flair/models/entity_mention_linking.py#L345
ruff pytest_ruff.RuffError: flair/models/entity_mention_linking.py:605:13: SIM401 Use `HYBRID_MODELS_SPARSE_WEIGHT.get(model_name_or_path, sparse_weight)` instead of an `if` block | 604 | sparse_weight = ( 605 | sparse_weight | _____________^ 606 | | if model_name_or_path not in HYBRID_MODELS_SPARSE_WEIGHT 607 | | else HYBRID_MODELS_SPARSE_WEIGHT[model_name_or_path] | |________________________________________________________________^ SIM401 608 | ) | = help: Replace with `HYBRID_MODELS_SPARSE_WEIGHT.get(model_name_or_path, sparse_weight)`
test: flair/models/entity_mention_linking.py#L1
Black format check --- /home/runner/work/flair/flair/flair/models/entity_mention_linking.py 2024-02-02 15:09:53.021994+00:00 +++ /home/runner/work/flair/flair/flair/models/entity_mention_linking.py 2024-02-02 15:12:27.748945+00:00 @@ -441,13 +441,15 @@ } @classmethod def _from_state(cls, state_dict: Dict[str, Any]) -> "EntityPreprocessor": return cls( - preprocessor=None - if state_dict["preprocessor"] is None - else EntityPreprocessor._from_state(state_dict["preprocessor"]), + preprocessor=( + None + if state_dict["preprocessor"] is None + else EntityPreprocessor._from_state(state_dict["preprocessor"]) + ), ) class CandidateSearchIndex(ABC): """Base class for a candidate generator. @@ -901,13 +903,15 @@ # Preprocess entity mentions for entity in entities_mentions: data_points.append(entity.data_point) mentions.append( - self.preprocessor.process_mention(entity.data_point.text, sentence) - if self.preprocessor is not None - else entity.data_point.text, + ( + self.preprocessor.process_mention(entity.data_point.text, sentence) + if self.preprocessor is not None + else entity.data_point.text + ), ) # Retrieve top-k concept / entity candidates for i in range(0, len(mentions), batch_size): candidates = self.candidate_generator.search(entity_mentions=mentions[i : i + batch_size], top_k=top_k)
test
Node.js 16 actions are deprecated. Please update the following actions to use Node.js 20: actions/checkout@v3, actions/setup-python@v4, actions/cache@v3. For more information see: https://github.blog/changelog/2023-09-22-github-actions-transitioning-from-node-16-to-node-20/.