From c215bd69da250e96ca179b3195afd324507c0589 Mon Sep 17 00:00:00 2001 From: mstojkovicTT Date: Fri, 20 Dec 2024 14:41:50 +0000 Subject: [PATCH] test llama --- .github/workflows/build-and-test.yml | 19 +++++++------------ forge/test/mlir/llama/test_llama_inference.py | 11 ++++++----- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index d6401a352..f6b7d57b9 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -43,7 +43,7 @@ jobs: matrix: build: - runs-on: runner - test_group_id: [1,2] + # test_group_id: [1,2] runs-on: - in-service @@ -130,11 +130,11 @@ jobs: -DCMAKE_CXX_COMPILER_LAUNCHER=ccache cmake --build ${{ steps.strings.outputs.build-output-dir }} - - name: Run Unit Tests - shell: bash - run: | - source env/activate - cmake --build ${{ steps.strings.outputs.build-output-dir }} -- run_unit_tests + # - name: Run Unit Tests + # shell: bash + # run: | + # source env/activate + # cmake --build ${{ steps.strings.outputs.build-output-dir }} -- run_unit_tests - name: Run Test env: @@ -147,12 +147,7 @@ jobs: apt install -y libgl1 libglx-mesa0 set -o pipefail # Ensures that the exit code reflects the first command that fails pip install pytest-split - pytest -m push --splits 2 \ - --group ${{ matrix.test_group_id }} \ - --splitting-algorithm least_duration \ - -m "${{ inputs.test_mark }}" \ - --junit-xml=${{ steps.strings.outputs.test_report_path }} \ - 2>&1 | tee pytest.log + pytest -m forge/test/mlir/llama/test_llama_inference.py - name: Upload Test Log uses: actions/upload-artifact@v4 diff --git a/forge/test/mlir/llama/test_llama_inference.py b/forge/test/mlir/llama/test_llama_inference.py index 7def7ce9c..b0498d1cf 100644 --- a/forge/test/mlir/llama/test_llama_inference.py +++ b/forge/test/mlir/llama/test_llama_inference.py @@ -10,12 +10,13 @@ from test.mlir.llama.utils.utils import load_model -@pytest.mark.nightly -@pytest.mark.xfail() -@pytest.mark.parametrize("model_path", ["openlm-research/open_llama_3b", "meta-llama/Llama-3.2-1B"]) +# @pytest.mark.nightly +# @pytest.mark.xfail() +@pytest.mark.push() +@pytest.mark.parametrize("model_path", ["meta-llama/Llama-3.2-1B"]) def test_llama_inference(model_path): - if model_path == "meta-llama/Llama-3.2-1B": - pytest.skip("Skipping test for Llama-3.2-1B model, waiting for new transformers version.") + # if model_path == "meta-llama/Llama-3.2-1B": + # pytest.skip("Skipping test for Llama-3.2-1B model, waiting for new transformers version.") # Load Model and Tokenizer framework_model, tokenizer = load_model(model_path)