From a0f710eec90287b50207115092e8491a35fb04b2 Mon Sep 17 00:00:00 2001 From: "Priyankha Devi A.S" Date: Fri, 27 Dec 2024 16:15:52 +0530 Subject: [PATCH] Add CPU test for swin_tiny_2 model (#940) --- .../models/pytorch/vision/swin/test_swin.py | 95 +++++++++++++++---- .../pytorch/vision/swin/utils/image_utils.py | 11 +++ 2 files changed, 85 insertions(+), 21 deletions(-) create mode 100644 forge/test/models/pytorch/vision/swin/utils/image_utils.py diff --git a/forge/test/models/pytorch/vision/swin/test_swin.py b/forge/test/models/pytorch/vision/swin/test_swin.py index 8f3dda218..e50d3afa8 100644 --- a/forge/test/models/pytorch/vision/swin/test_swin.py +++ b/forge/test/models/pytorch/vision/swin/test_swin.py @@ -2,37 +2,90 @@ # SPDX-License-Identifier: Apache-2.0 # STEP 0: import Forge library -import forge -import pytest import os -from transformers import ViTImageProcessor +import pytest import timm +from transformers import ViTImageProcessor, Swinv2Model, Swinv2ForImageClassification, Swinv2ForMaskedImageModeling from test.utils import download_model -from PIL import Image -import requests +import forge +from forge.verify.verify import verify +from test.models.pytorch.vision.swin.utils.image_utils import load_image @pytest.mark.nightly @pytest.mark.model_analysis -def test_swin_v1_tiny_4_224_hf_pytorch(test_device): - # pytest.skip() # Working on it - # STEP 1: Set Forge configuration parameters - compiler_cfg = forge.config._get_global_compiler_config() - compiler_cfg.compile_depth = forge.CompileDepth.SPLIT_GRAPH - - # STEP 2: Create Forge module from PyTorch model - feature_extractor = ViTImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") +@pytest.mark.xfail( + reason='RuntimeError: TT_ASSERT @ forge/csrc/passes/commute_utils.cpp:1105: reshape->op_name() == "reshape"' +) +@pytest.mark.parametrize("variant", ["microsoft/swin-tiny-patch4-window7-224"]) +def test_swin_v1_tiny_4_224_hf_pytorch(variant): + + # STEP 1: Create Forge module from PyTorch model + feature_extractor = ViTImageProcessor.from_pretrained(variant) # model = SwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224", torchscript=True) - model = download_model(timm.create_model, "swin_tiny_patch4_window7_224", pretrained=True) + model = download_model(timm.create_model, variant, pretrained=True) model.eval() - # STEP 3: Prepare input samples + # STEP 2: Prepare input samples + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + inputs = load_image(url, feature_extractor) + + # STEP 3: Run inference on Tenstorrent device + compiled_model = forge.compile( + model, sample_inputs=inputs, module_name="pt_" + str(variant.split("/")[-1].replace("-", "_")) + ) + verify(inputs, model, compiled_model) + + +@pytest.mark.nightly +@pytest.mark.model_analysis +@pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen") +@pytest.mark.parametrize("variant", ["microsoft/swinv2-tiny-patch4-window8-256"]) +def test_swin_v2_tiny_4_256_hf_pytorch(variant): + + feature_extractor = ViTImageProcessor.from_pretrained(variant) + framework_model = Swinv2Model.from_pretrained(variant) + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + inputs = load_image(url, feature_extractor) + + compiled_model = forge.compile( + framework_model, sample_inputs=inputs, module_name="pt_" + str(variant.split("/")[-1].replace("-", "_")) + ) + verify(inputs, framework_model, compiled_model) + + +@pytest.mark.nightly +@pytest.mark.model_analysis +@pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen") +@pytest.mark.parametrize("variant", ["microsoft/swinv2-tiny-patch4-window8-256"]) +def test_swin_v2_tiny_image_classification(variant): + + feature_extractor = ViTImageProcessor.from_pretrained(variant) + framework_model = Swinv2ForImageClassification.from_pretrained(variant) + url = "http://images.cocodataset.org/val2017/000000039769.jpg" - image = Image.open(requests.get(url, stream=True).raw) + inputs = load_image(url, feature_extractor) - # STEP 4: Run inference on Tenstorrent device - img_tensor = feature_extractor(images=image, return_tensors="pt").pixel_values - print(img_tensor.shape) + compiled_model = forge.compile( + framework_model, sample_inputs=inputs, module_name="pt_" + str(variant.split("/")[-1].replace("-", "_")) + ) + verify(inputs, framework_model, compiled_model) + + +@pytest.mark.nightly +@pytest.mark.model_analysis +@pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen") +@pytest.mark.parametrize("variant", ["microsoft/swinv2-tiny-patch4-window8-256"]) +def test_swin_v2_tiny_masked(variant): + + feature_extractor = ViTImageProcessor.from_pretrained(variant) + framework_model = Swinv2ForMaskedImageModeling.from_pretrained(variant) + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + inputs = load_image(url, feature_extractor) - inputs = [img_tensor] - compiled_model = forge.compile(model, sample_inputs=inputs, module_name="pt_swin_tiny_patch4_window7_224") + compiled_model = forge.compile( + framework_model, sample_inputs=inputs, module_name="pt_" + str(variant.split("/")[-1].replace("-", "_")) + ) + verify(inputs, framework_model, compiled_model) diff --git a/forge/test/models/pytorch/vision/swin/utils/image_utils.py b/forge/test/models/pytorch/vision/swin/utils/image_utils.py new file mode 100644 index 000000000..2c39b58f7 --- /dev/null +++ b/forge/test/models/pytorch/vision/swin/utils/image_utils.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 +from PIL import Image +import requests + + +def load_image(image_path, feature_extractor): + image = Image.open(requests.get(image_path, stream=True).raw) + img_tensor = feature_extractor(images=image, return_tensors="pt").pixel_values + return [img_tensor]