From f4809307e409d5ce698364ad48b69d38e0c406e9 Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Fri, 14 Jun 2024 16:33:33 +0200 Subject: [PATCH] Remove read token (#1903) * remove read token * rename var & use org model * style & remove token * fix failing tests on datasets release --- .github/workflows/test_onnxruntime.yml | 2 ++ optimum/utils/testing_utils.py | 3 --- tests/onnxruntime/test_modeling.py | 11 +++++++---- tests/utils/test_task_processors.py | 7 ++++++- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test_onnxruntime.yml b/.github/workflows/test_onnxruntime.yml index 4893b681a66..291a3b08335 100644 --- a/.github/workflows/test_onnxruntime.yml +++ b/.github/workflows/test_onnxruntime.yml @@ -50,6 +50,8 @@ jobs: pytest onnxruntime -m "run_in_series" --durations=0 -vvvv -s - name: Test with pytest (in parallel) + env: + FXMARTYCLONE_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} working-directory: tests run: | pytest onnxruntime -m "not run_in_series" --durations=0 -vvvv -s -n auto diff --git a/optimum/utils/testing_utils.py b/optimum/utils/testing_utils.py index f1c2f668e3c..a7c2b8bb050 100644 --- a/optimum/utils/testing_utils.py +++ b/optimum/utils/testing_utils.py @@ -36,9 +36,6 @@ # Used to test the hub USER = "__DUMMY_OPTIMUM_USER__" -# Not critical, only usable on the sandboxed CI instance. -TOKEN = "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" - def flatten_dict(dictionary: Dict): """ diff --git a/tests/onnxruntime/test_modeling.py b/tests/onnxruntime/test_modeling.py index 3fe2c5e14dc..7b2c8a66b9e 100644 --- a/tests/onnxruntime/test_modeling.py +++ b/tests/onnxruntime/test_modeling.py @@ -938,11 +938,14 @@ def test_stable_diffusion_model_on_rocm_ep_str(self): self.assertListEqual(model.providers, ["ROCMExecutionProvider", "CPUExecutionProvider"]) def test_load_model_from_hub_private(self): - subprocess.run("huggingface-cli logout", shell=True) - # Read token of fxmartyclone (dummy user). - token = "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" + token = os.environ.get("HF_HUB_READ_TOKEN", None) - model = ORTModelForCustomTasks.from_pretrained("fxmartyclone/tiny-onnx-private-2", use_auth_token=token) + if token is None: + self.skipTest("Test requires a token for fxmartyclone in the environment variable `HF_HUB_READ_TOKEN`.") + + model = ORTModelForCustomTasks.from_pretrained( + "optimum-internal-testing/tiny-random-phi-private", use_auth_token=token + ) self.assertIsInstance(model.model, onnxruntime.InferenceSession) self.assertIsInstance(model.config, PretrainedConfig) diff --git a/tests/utils/test_task_processors.py b/tests/utils/test_task_processors.py index af89aec2b90..16567048073 100644 --- a/tests/utils/test_task_processors.py +++ b/tests/utils/test_task_processors.py @@ -50,7 +50,7 @@ "dataset_data_keys": {"question": "question", "context": "answer"}, }, "image-classification": { - "dataset_args": "mnist", + "dataset_args": "sasha/dog-food", "dataset_data_keys": {"image": "image"}, }, } @@ -232,6 +232,11 @@ def test_load_dataset_with_max_length(self): input_ids = dataset[0]["input_ids"] self.assertEqual(len(input_ids), max_length) + def test_load_default_dataset(self): + self.skipTest( + "Skipping so as not to execute conll2003 remote code (test would require trust_remote_code=True)" + ) + class QuestionAnsweringProcessorTest(TestCase, TaskProcessorTestBase): TASK_NAME = "question-answering"