Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Record config file name as test suite property #947

Merged
merged 7 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/e2e/vLLM/run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ do

export TEST_DATA_FILE="$MODEL_CONFIG"
pytest \
-r a \
--capture=tee-sys \
--junitxml="test-results/e2e-$(date +%s).xml" \
"$PWD/tests/e2e/vLLM/test_vllm.py" || LOCAL_SUCCESS=$?
Expand Down
11 changes: 10 additions & 1 deletion tests/e2e/vLLM/test_vllm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import shutil
from pathlib import Path
from typing import Callable

import pytest
import yaml
Expand All @@ -20,12 +21,19 @@
logger.warning("vllm is not installed. This test will be skipped")

HF_MODEL_HUB_NAME = "nm-testing"
TEST_DATA_FILE = os.environ.get("TEST_DATA_FILE", None)
TEST_DATA_FILE = os.environ.get("TEST_DATA_FILE", "")


@pytest.fixture
def record_config_file(record_testsuite_property: Callable[[str, object], None]):
test_data_file_name = TEST_DATA_FILE.split("configs/")[-1]
record_testsuite_property("TEST_DATA_FILE_NAME", test_data_file_name)


# Will run each test case in its own process through run_tests.sh
# emulating vLLM CI testing
@requires_gpu_count(1)
@pytest.mark.skipif(not TEST_DATA_FILE, reason="must set TEST_DATA_FILE env var")
dbarbuzzi marked this conversation as resolved.
Show resolved Hide resolved
@pytest.mark.skipif(not vllm_installed, reason="vLLM is not installed, skipping test")
class TestvLLM:
"""
Expand Down Expand Up @@ -73,6 +81,7 @@ def set_up(self):
]
self.api = HfApi()

@pytest.mark.usefixtures("record_config_file")
def test_vllm(self):
# Run vLLM with saved model
import torch
Expand Down
Loading