Skip to content

Commit

Permalink
test llama
Browse files Browse the repository at this point in the history
  • Loading branch information
mstojkovicTT committed Dec 20, 2024
1 parent 77ef35c commit c215bd6
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 17 deletions.
19 changes: 7 additions & 12 deletions .github/workflows/build-and-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ jobs:
matrix:
build:
- runs-on: runner
test_group_id: [1,2]
# test_group_id: [1,2]

runs-on:
- in-service
Expand Down Expand Up @@ -130,11 +130,11 @@ jobs:
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
cmake --build ${{ steps.strings.outputs.build-output-dir }}
- name: Run Unit Tests
shell: bash
run: |
source env/activate
cmake --build ${{ steps.strings.outputs.build-output-dir }} -- run_unit_tests
# - name: Run Unit Tests
# shell: bash
# run: |
# source env/activate
# cmake --build ${{ steps.strings.outputs.build-output-dir }} -- run_unit_tests

- name: Run Test
env:
Expand All @@ -147,12 +147,7 @@ jobs:
apt install -y libgl1 libglx-mesa0
set -o pipefail # Ensures that the exit code reflects the first command that fails
pip install pytest-split
pytest -m push --splits 2 \
--group ${{ matrix.test_group_id }} \
--splitting-algorithm least_duration \
-m "${{ inputs.test_mark }}" \
--junit-xml=${{ steps.strings.outputs.test_report_path }} \
2>&1 | tee pytest.log
pytest -m forge/test/mlir/llama/test_llama_inference.py
- name: Upload Test Log
uses: actions/upload-artifact@v4
Expand Down
11 changes: 6 additions & 5 deletions forge/test/mlir/llama/test_llama_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@
from test.mlir.llama.utils.utils import load_model


@pytest.mark.nightly
@pytest.mark.xfail()
@pytest.mark.parametrize("model_path", ["openlm-research/open_llama_3b", "meta-llama/Llama-3.2-1B"])
# @pytest.mark.nightly
# @pytest.mark.xfail()
@pytest.mark.push()
@pytest.mark.parametrize("model_path", ["meta-llama/Llama-3.2-1B"])
def test_llama_inference(model_path):
if model_path == "meta-llama/Llama-3.2-1B":
pytest.skip("Skipping test for Llama-3.2-1B model, waiting for new transformers version.")
# if model_path == "meta-llama/Llama-3.2-1B":
# pytest.skip("Skipping test for Llama-3.2-1B model, waiting for new transformers version.")

# Load Model and Tokenizer
framework_model, tokenizer = load_model(model_path)
Expand Down

0 comments on commit c215bd6

Please sign in to comment.