From 291f535a42c0ea2c649823fdcc1d5c5512a5a149 Mon Sep 17 00:00:00 2001 From: Ilyas Moutawwakil <57442720+IlyasMoutawwakil@users.noreply.github.com> Date: Thu, 27 Jun 2024 10:40:56 +0200 Subject: [PATCH] Pin numpy v1 for onnxruntime (#1921) * fix offline ci * pin numpy v1 for now * pin numpy 1 in exporters as well * pin numpy v1 everywhere for transfromers --- .github/workflows/test_offline.yml | 42 ++++++++++++++++-------------- setup.py | 4 +-- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test_offline.yml b/.github/workflows/test_offline.yml index ca90730b6bc..90b0108e512 100644 --- a/.github/workflows/test_offline.yml +++ b/.github/workflows/test_offline.yml @@ -2,9 +2,9 @@ name: Offline usage / Python - Test on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ main ] + branches: [main] concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -15,29 +15,33 @@ jobs: strategy: fail-fast: false matrix: - python-version: [3.9] + python-version: [3.8, 3.9] os: [ubuntu-20.04] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies for pytorch export - run: | - pip install .[tests,exporters,onnxruntime] - - name: Test with unittest - run: | - HF_HOME=/tmp/ huggingface-cli download hf-internal-testing/tiny-random-gpt2 + - name: Checkout code + uses: actions/checkout@v4 - HF_HOME=/tmp/ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} - huggingface-cli download hf-internal-testing/tiny-random-gpt2 + - name: Install dependencies for pytorch export + run: | + pip install .[tests,exporters,onnxruntime] - HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation + - name: Test with pytest + run: | + HF_HOME=/tmp/ huggingface-cli download hf-internal-testing/tiny-random-gpt2 - pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv + HF_HOME=/tmp/ HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation - HF_HUB_OFFLINE=1 pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv \ No newline at end of file + huggingface-cli download hf-internal-testing/tiny-random-gpt2 + + HF_HUB_OFFLINE=1 optimum-cli export onnx --model hf-internal-testing/tiny-random-gpt2 gpt2_onnx --task text-generation + + pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv + + HF_HUB_OFFLINE=1 pytest tests/onnxruntime/test_modeling.py -k "test_load_model_from_hub and not from_hub_onnx" -s -vvvvv diff --git a/setup.py b/setup.py index b40eba068d5..6b28fb696be 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ "transformers[sentencepiece]>=4.26.0,<4.42.0", "torch>=1.11", "packaging", - "numpy", + "numpy<2.0", # transformers requires numpy<2.0 https://github.com/huggingface/transformers/pull/31569 "huggingface_hub>=0.8.0", "datasets", ] @@ -79,10 +79,10 @@ "openvino": "optimum-intel[openvino]>=1.16.0", "nncf": "optimum-intel[nncf]>=1.16.0", "neural-compressor": "optimum-intel[neural-compressor]>=1.16.0", - "graphcore": "optimum-graphcore", "habana": ["optimum-habana", "transformers >= 4.38.0, < 4.39.0"], "neuron": ["optimum-neuron[neuron]>=0.0.20", "transformers >= 4.36.2, < 4.42.0"], "neuronx": ["optimum-neuron[neuronx]>=0.0.20", "transformers >= 4.36.2, < 4.42.0"], + "graphcore": "optimum-graphcore", "furiosa": "optimum-furiosa", "amd": "optimum-amd", "dev": TESTS_REQUIRE + QUALITY_REQUIRE,