From 8d8ca8800ef9ff0adf959372c2e448e12ab944b3 Mon Sep 17 00:00:00 2001 From: Alexander Druz Date: Mon, 5 Feb 2024 10:21:15 +0100 Subject: [PATCH] Ignore lodash vulnerability --- package.json | 7 +++++++ tests/integration/dataset/test_embedding_column.py | 4 +++- tests/integration/dataset/test_fancy_indexing.py | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/package.json b/package.json index 1d2792e1..1aabca73 100644 --- a/package.json +++ b/package.json @@ -216,5 +216,12 @@ ".jsx", ".tsx" ] + }, + "pnpm": { + "auditConfig": { + "ignoreCves": [ + "CVE-2020-8203" + ] + } } } diff --git a/tests/integration/dataset/test_embedding_column.py b/tests/integration/dataset/test_embedding_column.py index c1e5aee6..159d295f 100644 --- a/tests/integration/dataset/test_embedding_column.py +++ b/tests/integration/dataset/test_embedding_column.py @@ -1,9 +1,11 @@ +from typing import Tuple import numpy as np import pytest from renumics import spotlight from renumics.spotlight import dtypes from renumics.spotlight.dataset.exceptions import InvalidDTypeError, InvalidShapeError +from renumics.spotlight.dataset.typing import EmbeddingColumnInputType @pytest.mark.parametrize("length", [1, 2, 8]) @@ -14,7 +16,7 @@ def test_default(empty_dataset: spotlight.Dataset, length: int) -> None: empty_dataset.append_embedding_column("embedding") assert empty_dataset.get_dtype("embedding") == dtypes.embedding_dtype - valid_values = ( + valid_values: Tuple[EmbeddingColumnInputType] = ( [0] * length, range(length), tuple(range(length)), diff --git a/tests/integration/dataset/test_fancy_indexing.py b/tests/integration/dataset/test_fancy_indexing.py index 30a58772..1c673fc0 100644 --- a/tests/integration/dataset/test_fancy_indexing.py +++ b/tests/integration/dataset/test_fancy_indexing.py @@ -3,7 +3,7 @@ """ import datetime -from typing import Any, List +from typing import Any, List, cast import numpy as np import pytest @@ -300,7 +300,7 @@ def test_setitem(fancy_indexing_dataset: Dataset) -> None: ) target = np.array(fancy_indexing_dataset[column_name]) target[indices] = values - fancy_indexing_dataset[column_name, indices] = values + fancy_indexing_dataset[column_name, indices] = cast(np.ndarray, values) last_edited_at = last_edited_at_column[last_edited_at_indices] if len(last_edited_at_indices) > 0: timestamp_ = _assert_unique_datetime(