Skip to content

Commit

Permalink
Add Weekly job for model analysis
Browse files Browse the repository at this point in the history
  • Loading branch information
chandrasekaranpradeep committed Dec 2, 2024
1 parent 7bd0d30 commit 9a6e788
Show file tree
Hide file tree
Showing 7 changed files with 137 additions and 54 deletions.
59 changes: 59 additions & 0 deletions .github/workflows/model-analysis-weekly.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
name: Model Analysis Weekly

on:
workflow_dispatch:
# schedule:
# - cron: '0 23 * * 5' # 11:00 PM UTC Friday (12:00 AM Saturday Serbia)
push:
branches: ["pchandrasekaran/model_analysis_weekly_job"]

jobs:
build-and-push-test:
uses: ./.github/workflows/build-and-test.yml
secrets: inherit
with:
test_mark: 'push'

model-analysis:
needs: build-and-push-test
runs-on: in-service
steps:
- name: Run Model Analysis Script
shell: bash
run: |
source env/activate
python scripts/model_analysis.py \
--test_directory_or_file_path forge/test/models/pytorch \
--dump_failure_logs \
--markdown_directory_path ./model_analysis_docs \
--unique_ops_output_directory_path ./models_unique_ops_output \
2>&1 | tee model_analysis.log
- name: Upload Model Analysis Script Logs
uses: actions/upload-artifact@v4
if: success() || failure()
with:
name: model-analysis-outputs
path: model_analysis.log

- name: Upload Models Unique Ops test Failure Logs
uses: actions/upload-artifact@v4
if: success() || failure()
with:
name: unique-ops-logs
path: ./models_unique_ops_output

- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
branch: model_analysis
committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>
base: main
commit-message: "Update model analysis docs"
title: "Update model analysis docs"
body: "This PR will update model analysis docs"
labels: model_analysis
delete-branch: true
files: ./model_analysis_docs
token: ${{ secrets.GH_TOKEN }}
21 changes: 21 additions & 0 deletions forge/test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,3 +448,24 @@ def pytest_runtest_logreport(report):
for key, default_value in environ_before_test.items():
if os.environ.get(key, "") != default_value:
os.environ[key] = default_value


def pytest_collection_modifyitems(config, items):

marker = config.getoption("-m") # Get the marker from the -m option

if marker and marker == "automatic_model_analysis": # If a marker is specified
filtered_items = [item for item in items if marker in item.keywords]
print("Automatic Model Analysis Collected tests: ")
test_count = 0
for item in items:
if marker in item.keywords:
test_file_path = item.location[0]
test_name = item.location[2]
print(f"{test_file_path}::{test_name}")
test_count += 1
print(f"Automatic Model Analysis Collected test count: {test_count}")
if not filtered_items: # Warn if no tests match the marker
print(f"Warning: No tests found with marker '{marker}'.")
else:
print(items)
1 change: 1 addition & 0 deletions forge/test/models/pytorch/text/bart/test_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def forward(self, input_ids, attention_mask, decoder_input_ids):


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_pt_bart_classifier(test_device):
compiler_cfg = _get_global_compiler_config()
compiler_cfg.compile_depth = CompileDepth.SPLIT_GRAPH
Expand Down
4 changes: 4 additions & 0 deletions forge/test/models/pytorch/text/distilbert/test_distilbert.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_distilbert_masked_lm_pytorch(variant, test_device):
# Load DistilBert tokenizer and model from HuggingFace
Expand Down Expand Up @@ -46,6 +47,7 @@ def test_distilbert_masked_lm_pytorch(variant, test_device):


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_distilbert_question_answering_pytorch(test_device):
# Load Bert tokenizer and model from HuggingFace
model_ckpt = "distilbert-base-cased-distilled-squad"
Expand Down Expand Up @@ -82,6 +84,7 @@ def test_distilbert_question_answering_pytorch(test_device):


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_distilbert_sequence_classification_pytorch(test_device):

# Load DistilBert tokenizer and model from HuggingFace
Expand Down Expand Up @@ -109,6 +112,7 @@ def test_distilbert_sequence_classification_pytorch(test_device):


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_distilbert_token_classification_pytorch(test_device):
# Load DistilBERT tokenizer and model from HuggingFace
model_ckpt = "Davlan/distilbert-base-multilingual-cased-ner-hrl"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_conv_ae_pytorch(test_device):
# Set Forge configuration parameters
compiler_cfg = forge.config._get_global_compiler_config()
Expand Down Expand Up @@ -40,6 +41,7 @@ def test_conv_ae_pytorch(test_device):


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_linear_ae_pytorch(test_device):
# Set Forge configuration parameters
compiler_cfg = forge.config._get_global_compiler_config()
Expand Down
1 change: 1 addition & 0 deletions forge/test/models/pytorch/vision/fpn/test_fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@


@pytest.mark.nightly
@pytest.mark.automatic_model_analysis
def test_fpn_pytorch(test_device):
compiler_cfg = forge.config._get_global_compiler_config()
compiler_cfg.compile_depth = forge.CompileDepth.SPLIT_GRAPH
Expand Down
103 changes: 49 additions & 54 deletions scripts/model_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,53 +542,56 @@ def dump_logs(log_file_dir_path: str, log_file_name: str, content: str):
logger.info(f"Dumped test logs in {log_file}")


def collect_all_pytests(root_dir_path):
def collect_all_model_analysis_test(directory_or_file_path, output_directory_path):

assert check_path(root_dir_path), f"The directory path for collecting pytest {root_dir_path} doesn't exists"
assert check_path(
directory_or_file_path
), f"The directory path for collecting test {directory_or_file_path} doesn't exists"

logger.info(f"Collecting all pytests in {root_dir_path}")
logger.info(f"Collecting all test that has automatic_model_analysis marker in {directory_or_file_path}")

collected_test_outputs = ""
try:
res = subprocess.check_output(["pytest", root_dir_path, "--setup-plan"], stderr=subprocess.STDOUT).decode(
"utf-8"
result = subprocess.run(
["pytest", directory_or_file_path, "-m", "automatic_model_analysis", "--collect-only"],
capture_output=True,
text=True,
check=True,
)
except subprocess.CalledProcessError as e:
output = e.output.decode("utf-8")
logger.error(f"[Error!] output = {output}")
return []

test_list = []
lines = res.split("\n")
for line in lines:
if "warnings summary" in line or "slowest durations" in line:
break
collected_test_outputs += "STDOUT:\n"
collected_test_outputs += result.stdout
collected_test_outputs += "STDERR:\n"
collected_test_outputs += result.stderr

if line and line.startswith(" " + root_dir_path) and "::" in line and "training" not in line:
line = line.strip()
line = line.split(" (fixtures used:")[0] if " (fixtures used:" in line else line
if "Grayskull" not in line and "Wormhole_B0" not in line:
test_list.append(line)
except subprocess.CalledProcessError as e:
collected_test_outputs += e.output

return test_list
dump_logs(output_directory_path, "collected_tests.txt", collected_test_outputs)

test_list = []
with open(os.path.join(output_directory_path, "collected_tests.txt"), "r") as collected_test_file:
lines = collected_test_file.readlines()
test_lines = False
for line in lines:
if "Automatic Model Analysis Collected tests:" in line:
test_lines = True
elif "Automatic Model Analysis Collected test count:" in line:
test_lines = False
break
elif test_lines:
test_list.append(str(line).replace("\n", ""))

def generate_and_export_unique_ops_tests(pytest_directory_path, model_file_path, unique_ops_output_directory_path):
return test_list

# If model_file_path is specified, collect all the tests in the model_file_path parent directory path
# and in the test_list will only include the tests matching with model_file_path,
# otherwise collect all the tests in the pytest_directory_path specified by the user
if model_file_path:
model_file_path_list = model_file_path.split("/")[:-1]
tests_directory_path = "/".join(model_file_path_list)
else:
tests_directory_path = pytest_directory_path

test_list = collect_all_pytests(tests_directory_path)
def generate_and_export_unique_ops_tests(test_directory_or_file_path, unique_ops_output_directory_path):

if model_file_path:
test_list = [test for test in test_list if test.split("::")[0] == model_file_path]
test_list = collect_all_model_analysis_test(test_directory_or_file_path, unique_ops_output_directory_path)

assert test_list != [], f"No tests found in the {tests_directory_path} path"
assert (
test_list != []
), f"No tests found in the {test_directory_or_file_path} path with automatic_model_analysis pytest marker"

# Create a dictonary contains model_name as key and model tests(i.e include variant, task) as values
model_name_to_tests = {}
Expand All @@ -599,6 +602,11 @@ def generate_and_export_unique_ops_tests(pytest_directory_path, model_file_path,
else:
model_name_to_tests[model_name].append(test)

for model_name, tests in model_name_to_tests.items():
print(f"{model_name}:")
for test in tests:
print(f"\t\t\t{test}")

# Generate unique op test for the all collected test and save the models unique ops test information in the unique_ops_output_directory_path
model_output_dir_paths = []
for model_name, tests in model_name_to_tests.items():
Expand Down Expand Up @@ -941,21 +949,16 @@ def run_model_unique_op_tests_and_generate_markdowns(

def main():
parser = argparse.ArgumentParser(
description="""Generate unique ops test for the models present in the pytest_directory_path or model_file_path
description="""Generate unique ops test for the models present in the test_directory_or_file_path
specified by the user and run the unique ops test and generate markdown files, the root markdown file contains model name,
variant name, framework and compiler components supported rate and sub-markdown file contains model variant unique op tests info"""
)

group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--pytest_directory_path",
type=str,
help="Specify the directory path containing models to test.",
)
group.add_argument(
"--model_file_path",
parser.add_argument(
"--test_directory_or_file_path",
type=str,
help="Specify the model file path to generate unique op tests and markdown file.",
default=os.path.join(os.getcwd(), "forge/test"),
help="Specify the directory or file path containing models test with automatic_model_analysis pytest marker",
)
parser.add_argument(
"--dump_failure_logs",
Expand All @@ -964,29 +967,21 @@ def main():
)
parser.add_argument(
"--markdown_directory_path",
default=os.path.join(os.getcwd(), "new_models_analysis_docs"),
default=os.path.join(os.getcwd(), "models_analysis_docs"),
required=False,
help="Specify the directory path for saving models information as markdowns file",
)
parser.add_argument(
"--unique_ops_output_directory_path",
default=os.path.join(os.getcwd(), "new_unique_ops"),
default=os.path.join(os.getcwd(), "unique_ops"),
required=False,
help="Specify the output directory path for saving models unique op tests outputs(i.e failure logs, xlsx file)",
)

args = parser.parse_args()

if args.pytest_directory_path is not None:
assert check_path(
args.pytest_directory_path
), f"Specified pytest directory path {args.pytest_directory_path} doesn't exists"
else:
assert check_path(args.model_file_path), f"Specified model file path {args.model_file_path} doesn't exists"

model_output_dir_paths = generate_and_export_unique_ops_tests(
pytest_directory_path=args.pytest_directory_path,
model_file_path=args.model_file_path,
test_directory_or_file_path=args.test_directory_or_file_path,
unique_ops_output_directory_path=args.unique_ops_output_directory_path,
)

Expand Down

0 comments on commit 9a6e788

Please sign in to comment.