From ba3264b4e8125a798d61eb6f663b5c423be1b957 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 5 Feb 2024 14:50:07 +0000 Subject: [PATCH] Image Feature Extraction pipeline (#28216) * Draft pipeline * Fixup * Fix docstrings * Update doctest * Update pipeline_model_mapping * Update docstring * Update tests * Update src/transformers/pipelines/image_feature_extraction.py Co-authored-by: Omar Sanseviero * Fix docstrings - review comments * Remove pipeline mapping for composite vision models * Add to pipeline tests * Remove for flava (multimodal) * safe pil import * Add requirements for pipeline run * Account for super slow efficientnet * Review comments * Fix tests * Swap order of kwargs * Use build_pipeline_init_args * Add back FE pipeline for Vilt * Include image_processor_kwargs in docstring * Mark test as flaky * Update TODO * Update tests/pipelines/test_pipelines_image_feature_extraction.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Add license header --------- Co-authored-by: Omar Sanseviero Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- docs/source/en/main_classes/pipelines.md | 6 + docs/source/ja/main_classes/pipelines.md | 8 +- docs/source/zh/main_classes/pipelines.md | 8 +- src/transformers/__init__.py | 2 + src/transformers/pipelines/__init__.py | 15 ++ .../pipelines/feature_extraction.py | 2 +- .../pipelines/image_feature_extraction.py | 92 ++++++++++ src/transformers/pipelines/image_to_text.py | 15 ++ tests/models/beit/test_modeling_beit.py | 2 +- tests/models/bit/test_modeling_bit.py | 2 +- tests/models/blip/test_modeling_blip.py | 5 +- tests/models/clip/test_modeling_clip.py | 4 +- .../test_modeling_conditional_detr.py | 2 +- .../models/convnext/test_modeling_convnext.py | 2 +- .../convnextv2/test_modeling_convnextv2.py | 2 +- tests/models/cvt/test_modeling_cvt.py | 2 +- .../data2vec/test_modeling_data2vec_vision.py | 2 +- .../test_modeling_deformable_detr.py | 2 +- tests/models/deit/test_modeling_deit.py | 2 +- tests/models/deta/test_modeling_deta.py | 2 +- tests/models/detr/test_modeling_detr.py | 2 +- tests/models/dinat/test_modeling_dinat.py | 2 +- tests/models/dinov2/test_modeling_dinov2.py | 2 +- .../models/donut/test_modeling_donut_swin.py | 2 +- tests/models/dpt/test_modeling_dpt.py | 2 +- .../test_modeling_efficientformer.py | 2 +- .../test_modeling_efficientnet.py | 8 +- .../models/focalnet/test_modeling_focalnet.py | 2 +- tests/models/glpn/test_modeling_glpn.py | 4 +- .../models/imagegpt/test_modeling_imagegpt.py | 2 +- tests/models/levit/test_modeling_levit.py | 2 +- .../mask2former/test_modeling_mask2former.py | 2 +- .../maskformer/test_modeling_maskformer.py | 2 +- tests/models/mgp_str/test_modeling_mgp_str.py | 8 +- .../test_modeling_mobilenet_v1.py | 2 +- .../test_modeling_mobilenet_v2.py | 2 +- .../mobilevit/test_modeling_mobilevit.py | 2 +- .../mobilevitv2/test_modeling_mobilevitv2.py | 2 +- tests/models/nat/test_modeling_nat.py | 2 +- tests/models/owlv2/test_modeling_owlv2.py | 5 +- tests/models/owlvit/test_modeling_owlvit.py | 5 +- .../poolformer/test_modeling_poolformer.py | 2 +- tests/models/pvt/test_modeling_pvt.py | 2 +- tests/models/regnet/test_modeling_regnet.py | 2 +- tests/models/resnet/test_modeling_resnet.py | 2 +- .../segformer/test_modeling_segformer.py | 2 +- .../swiftformer/test_modeling_swiftformer.py | 2 +- tests/models/swin/test_modeling_swin.py | 2 +- tests/models/swin2sr/test_modeling_swin2sr.py | 2 +- tests/models/swinv2/test_modeling_swinv2.py | 2 +- .../test_modeling_table_transformer.py | 2 +- tests/models/vilt/test_modeling_vilt.py | 2 +- tests/models/vit/test_modeling_vit.py | 2 +- .../vit_hybrid/test_modeling_vit_hybrid.py | 2 +- tests/models/vit_mae/test_modeling_vit_mae.py | 2 +- tests/models/vit_msn/test_modeling_vit_msn.py | 2 +- tests/models/yolos/test_modeling_yolos.py | 4 +- ...test_pipelines_image_feature_extraction.py | 157 ++++++++++++++++++ tests/test_pipeline_mixin.py | 9 + utils/check_docstrings.py | 1 + 60 files changed, 387 insertions(+), 53 deletions(-) create mode 100644 src/transformers/pipelines/image_feature_extraction.py create mode 100644 tests/pipelines/test_pipelines_image_feature_extraction.py diff --git a/docs/source/en/main_classes/pipelines.md b/docs/source/en/main_classes/pipelines.md index 3cd0fc5bb97913..61bdf3729a7e0a 100644 --- a/docs/source/en/main_classes/pipelines.md +++ b/docs/source/en/main_classes/pipelines.md @@ -469,6 +469,12 @@ Pipelines available for multimodal tasks include the following. - __call__ - all +### ImageFeatureExtractionPipeline + +[[autodoc]] ImageFeatureExtractionPipeline + - __call__ + - all + ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline diff --git a/docs/source/ja/main_classes/pipelines.md b/docs/source/ja/main_classes/pipelines.md index 321659de95ba6e..90eb17c0c44387 100644 --- a/docs/source/ja/main_classes/pipelines.md +++ b/docs/source/ja/main_classes/pipelines.md @@ -25,7 +25,7 @@ Recognition、Masked Language Modeling、Sentiment Analysis、Feature Extraction パイプラインの抽象化には2つのカテゴリーがある: - [`pipeline`] は、他のすべてのパイプラインをカプセル化する最も強力なオブジェクトです。 -- タスク固有のパイプラインは、[オーディオ](#audio)、[コンピューター ビジョン](#computer-vision)、[自然言語処理](#natural-language-processing)、および [マルチモーダル](#multimodal) タスクで使用できます。 +- タスク固有のパイプラインは、[オーディオ](#audio)、[コンピューター ビジョン](#computer-vision)、[自然言語処理](#natural-language-processing)、および [マルチモーダル](#multimodal) タスクで使用できます。 ## The pipeline abstraction @@ -477,6 +477,12 @@ my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) - __call__ - all +### ImageFeatureExtractionPipeline + +[[autodoc]] ImageFeatureExtractionPipeline + - __call__ + - all + ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline diff --git a/docs/source/zh/main_classes/pipelines.md b/docs/source/zh/main_classes/pipelines.md index 4d2f1f0f9386a3..82d6de8e7161a4 100644 --- a/docs/source/zh/main_classes/pipelines.md +++ b/docs/source/zh/main_classes/pipelines.md @@ -435,7 +435,7 @@ See [`TokenClassificationPipeline`] for all details. - __call__ - all -## 多模态 +## 多模态 可用于多模态任务的pipeline包括以下几种。 @@ -451,6 +451,12 @@ See [`TokenClassificationPipeline`] for all details. - __call__ - all +### ImageFeatureExtractionPipeline + +[[autodoc]] ImageFeatureExtractionPipeline + - __call__ + - all + ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 415c880d6352f0..5e2b87089aba9e 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -973,6 +973,7 @@ "FeatureExtractionPipeline", "FillMaskPipeline", "ImageClassificationPipeline", + "ImageFeatureExtractionPipeline", "ImageSegmentationPipeline", "ImageToImagePipeline", "ImageToTextPipeline", @@ -5709,6 +5710,7 @@ FeatureExtractionPipeline, FillMaskPipeline, ImageClassificationPipeline, + ImageFeatureExtractionPipeline, ImageSegmentationPipeline, ImageToImagePipeline, ImageToTextPipeline, diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 5a8525a358b816..16842293549291 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -66,6 +66,7 @@ from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline from .image_classification import ImageClassificationPipeline +from .image_feature_extraction import ImageFeatureExtractionPipeline from .image_segmentation import ImageSegmentationPipeline from .image_to_image import ImageToImagePipeline from .image_to_text import ImageToTextPipeline @@ -362,6 +363,18 @@ }, "type": "image", }, + "image-feature-extraction": { + "impl": ImageFeatureExtractionPipeline, + "tf": (TFAutoModel,) if is_tf_available() else (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/vit-base-patch16-224", "29e7a1e183"), + "tf": ("google/vit-base-patch16-224", "29e7a1e183"), + } + }, + "type": "image", + }, "image-segmentation": { "impl": ImageSegmentationPipeline, "tf": (), @@ -500,6 +513,7 @@ def check_task(task: str) -> Tuple[str, Dict, Any]: - `"feature-extraction"` - `"fill-mask"` - `"image-classification"` + - `"image-feature-extraction"` - `"image-segmentation"` - `"image-to-text"` - `"image-to-image"` @@ -586,6 +600,7 @@ def pipeline( - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. - `"fill-mask"`: will return a [`FillMaskPipeline`]:. - `"image-classification"`: will return a [`ImageClassificationPipeline`]. + - `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`]. - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. - `"image-to-image"`: will return a [`ImageToImagePipeline`]. - `"image-to-text"`: will return a [`ImageToTextPipeline`]. diff --git a/src/transformers/pipelines/feature_extraction.py b/src/transformers/pipelines/feature_extraction.py index d704345db03df9..118baeccd0d6a2 100644 --- a/src/transformers/pipelines/feature_extraction.py +++ b/src/transformers/pipelines/feature_extraction.py @@ -14,7 +14,7 @@ ) class FeatureExtractionPipeline(Pipeline): """ - Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base + Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks. Example: diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py new file mode 100644 index 00000000000000..ccfe7c40d7e76d --- /dev/null +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -0,0 +1,92 @@ +from typing import Dict + +from ..utils import add_end_docstrings, is_vision_available +from .base import GenericTensor, Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from ..image_utils import load_image + + +@add_end_docstrings( + build_pipeline_init_args(has_image_processor=True), + """ + image_processor_kwargs (`dict`, *optional*): + Additional dictionary of keyword arguments passed along to the image processor e.g. + {"size": {"height": 100, "width": 100}} + """, +) +class ImageFeatureExtractionPipeline(Pipeline): + """ + Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base + transformer, which can be used as features in downstream tasks. + + Example: + + ```python + >>> from transformers import pipeline + + >>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction") + >>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True) + >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image. + torch.Size([1, 197, 768]) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: + `"image-feature-extraction"`. + + All vision models may be used for this pipeline. See a list of all models, including community-contributed models on + [huggingface.co/models](https://huggingface.co/models). + """ + + def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, **kwargs): + preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs + postprocess_params = {"return_tensors": return_tensors} if return_tensors is not None else {} + + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] + + return preprocess_params, {}, postprocess_params + + def preprocess(self, image, timeout=None, **image_processor_kwargs) -> Dict[str, GenericTensor]: + image = load_image(image, timeout=timeout) + model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, return_tensors=False): + # [0] is the first available tensor, logits or last_hidden_state. + if return_tensors: + return model_outputs[0] + if self.framework == "pt": + return model_outputs[0].tolist() + elif self.framework == "tf": + return model_outputs[0].numpy().tolist() + + def __call__(self, *args, **kwargs): + """ + Extract the features of the input(s). + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images, which must then be passed as a string. + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL + images. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and + the call may block forever. + Return: + A nested list of `float`: The features computed by the model. + """ + return super().__call__(*args, **kwargs) diff --git a/src/transformers/pipelines/image_to_text.py b/src/transformers/pipelines/image_to_text.py index ec1d07e0228253..26698ecf0cebc0 100644 --- a/src/transformers/pipelines/image_to_text.py +++ b/src/transformers/pipelines/image_to_text.py @@ -1,3 +1,18 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from typing import List, Union from ..utils import ( diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py index fdf4607d62693f..40b0d6aa0bd38d 100644 --- a/tests/models/beit/test_modeling_beit.py +++ b/tests/models/beit/test_modeling_beit.py @@ -242,7 +242,7 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) pipeline_model_mapping = ( { - "feature-extraction": BeitModel, + "image-feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } diff --git a/tests/models/bit/test_modeling_bit.py b/tests/models/bit/test_modeling_bit.py index 03e2bd1095191d..1705aad976c091 100644 --- a/tests/models/bit/test_modeling_bit.py +++ b/tests/models/bit/test_modeling_bit.py @@ -162,7 +162,7 @@ class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": BitModel, "image-classification": BitForImageClassification} + {"image-feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py index 4792757f9118f3..54512596b01c96 100644 --- a/tests/models/blip/test_modeling_blip.py +++ b/tests/models/blip/test_modeling_blip.py @@ -429,7 +429,10 @@ def prepare_config_and_inputs_for_common(self): class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlipModel,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": BlipModel, "image-to-text": BlipForConditionalGeneration} + { + "feature-extraction": BlipModel, + "image-to-text": BlipForConditionalGeneration, + } if is_torch_available() else {} ) diff --git a/tests/models/clip/test_modeling_clip.py b/tests/models/clip/test_modeling_clip.py index b96edcc56da76c..e3b87d966427b1 100644 --- a/tests/models/clip/test_modeling_clip.py +++ b/tests/models/clip/test_modeling_clip.py @@ -477,7 +477,9 @@ def prepare_config_and_inputs_for_common(self): @require_torch class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": CLIPModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": CLIPModel, "image-feature-extraction": CLIPVisionModel} if is_torch_available() else {} + ) fx_compatible = True test_head_masking = False test_pruning = False diff --git a/tests/models/conditional_detr/test_modeling_conditional_detr.py b/tests/models/conditional_detr/test_modeling_conditional_detr.py index aa0318f241aa92..f297634a2e7553 100644 --- a/tests/models/conditional_detr/test_modeling_conditional_detr.py +++ b/tests/models/conditional_detr/test_modeling_conditional_detr.py @@ -185,7 +185,7 @@ class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline else () ) pipeline_model_mapping = ( - {"feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection} + {"image-feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection} if is_torch_available() else {} ) diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py index ac2b6f927c8dfc..a56c38e3876b50 100644 --- a/tests/models/convnext/test_modeling_convnext.py +++ b/tests/models/convnext/test_modeling_convnext.py @@ -172,7 +172,7 @@ class ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase else () ) pipeline_model_mapping = ( - {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} + {"image-feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/convnextv2/test_modeling_convnextv2.py b/tests/models/convnextv2/test_modeling_convnextv2.py index 694901a1846994..b13028dba8045d 100644 --- a/tests/models/convnextv2/test_modeling_convnextv2.py +++ b/tests/models/convnextv2/test_modeling_convnextv2.py @@ -180,7 +180,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa else () ) pipeline_model_mapping = ( - {"feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification} + {"image-feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/cvt/test_modeling_cvt.py b/tests/models/cvt/test_modeling_cvt.py index 4abeb5571c7b58..aef8108e1766c4 100644 --- a/tests/models/cvt/test_modeling_cvt.py +++ b/tests/models/cvt/test_modeling_cvt.py @@ -151,7 +151,7 @@ class CvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CvtModel, CvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} + {"image-feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/data2vec/test_modeling_data2vec_vision.py b/tests/models/data2vec/test_modeling_data2vec_vision.py index bdb95588ac5cb7..20733cb2e428f6 100644 --- a/tests/models/data2vec/test_modeling_data2vec_vision.py +++ b/tests/models/data2vec/test_modeling_data2vec_vision.py @@ -178,7 +178,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ) pipeline_model_mapping = ( { - "feature-extraction": Data2VecVisionModel, + "image-feature-extraction": Data2VecVisionModel, "image-classification": Data2VecVisionForImageClassification, "image-segmentation": Data2VecVisionForSemanticSegmentation, } diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py index 38c42c55c34298..336f2437c4e7ae 100644 --- a/tests/models/deformable_detr/test_modeling_deformable_detr.py +++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py @@ -191,7 +191,7 @@ def create_and_check_deformable_detr_object_detection_head_model(self, config, p class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} + {"image-feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} if is_torch_available() else {} ) diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 9cd5be8fd3752c..87ac1690966003 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -206,7 +206,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) pipeline_model_mapping = ( { - "feature-extraction": DeiTModel, + "image-feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() diff --git a/tests/models/deta/test_modeling_deta.py b/tests/models/deta/test_modeling_deta.py index d8e16fca4982e6..3a3a957dd012e2 100644 --- a/tests/models/deta/test_modeling_deta.py +++ b/tests/models/deta/test_modeling_deta.py @@ -217,7 +217,7 @@ def create_and_check_deta_object_detection_head_model(self, config, pixel_values class DetaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DetaModel, DetaForObjectDetection) if is_torchvision_available() else () pipeline_model_mapping = ( - {"feature-extraction": DetaModel, "object-detection": DetaForObjectDetection} + {"image-feature-extraction": DetaModel, "object-detection": DetaForObjectDetection} if is_torchvision_available() else {} ) diff --git a/tests/models/detr/test_modeling_detr.py b/tests/models/detr/test_modeling_detr.py index de30d9db9b1409..02159795e823cf 100644 --- a/tests/models/detr/test_modeling_detr.py +++ b/tests/models/detr/test_modeling_detr.py @@ -182,7 +182,7 @@ class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ) pipeline_model_mapping = ( { - "feature-extraction": DetrModel, + "image-feature-extraction": DetrModel, "image-segmentation": DetrForSegmentation, "object-detection": DetrForObjectDetection, } diff --git a/tests/models/dinat/test_modeling_dinat.py b/tests/models/dinat/test_modeling_dinat.py index c824060cf816b2..c29339881eb495 100644 --- a/tests/models/dinat/test_modeling_dinat.py +++ b/tests/models/dinat/test_modeling_dinat.py @@ -207,7 +207,7 @@ class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": DinatModel, "image-classification": DinatForImageClassification} + {"image-feature-extraction": DinatModel, "image-classification": DinatForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py index 8e68165754b0ed..f0365cac2a59ee 100644 --- a/tests/models/dinov2/test_modeling_dinov2.py +++ b/tests/models/dinov2/test_modeling_dinov2.py @@ -217,7 +217,7 @@ class Dinov2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": Dinov2Model, "image-classification": Dinov2ForImageClassification} + {"image-feature-extraction": Dinov2Model, "image-classification": Dinov2ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/donut/test_modeling_donut_swin.py b/tests/models/donut/test_modeling_donut_swin.py index e52e679e42e682..23b7094d9b743f 100644 --- a/tests/models/donut/test_modeling_donut_swin.py +++ b/tests/models/donut/test_modeling_donut_swin.py @@ -145,7 +145,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DonutSwinModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": DonutSwinModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": DonutSwinModel} if is_torch_available() else {} fx_compatible = True test_pruning = False diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 0b398c923e686f..2c092062791f7d 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -163,7 +163,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, - "feature-extraction": DPTModel, + "image-feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() diff --git a/tests/models/efficientformer/test_modeling_efficientformer.py b/tests/models/efficientformer/test_modeling_efficientformer.py index 73283fbbf60026..2d6176960a5c5f 100644 --- a/tests/models/efficientformer/test_modeling_efficientformer.py +++ b/tests/models/efficientformer/test_modeling_efficientformer.py @@ -190,7 +190,7 @@ class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T ) pipeline_model_mapping = ( { - "feature-extraction": EfficientFormerModel, + "image-feature-extraction": EfficientFormerModel, "image-classification": ( EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py index 32050e3d21a5e1..19d66aca95ae2b 100644 --- a/tests/models/efficientnet/test_modeling_efficientnet.py +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -130,7 +130,7 @@ class EfficientNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": EfficientNetModel, "image-classification": EfficientNetForImageClassification} + {"image-feature-extraction": EfficientNetModel, "image-classification": EfficientNetForImageClassification} if is_torch_available() else {} ) @@ -216,6 +216,12 @@ def test_model_from_pretrained(self): model = EfficientNetModel.from_pretrained(model_name) self.assertIsNotNone(model) + @is_pipeline_test + @require_vision + @slow + def test_pipeline_image_feature_extraction(self): + super().test_pipeline_image_feature_extraction() + @is_pipeline_test @require_vision @slow diff --git a/tests/models/focalnet/test_modeling_focalnet.py b/tests/models/focalnet/test_modeling_focalnet.py index 6de095d975234d..2b6f8cf9ab1522 100644 --- a/tests/models/focalnet/test_modeling_focalnet.py +++ b/tests/models/focalnet/test_modeling_focalnet.py @@ -238,7 +238,7 @@ class FocalNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase else () ) pipeline_model_mapping = ( - {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} + {"image-feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/glpn/test_modeling_glpn.py b/tests/models/glpn/test_modeling_glpn.py index 138a8cf2832eef..90f8996984d32c 100644 --- a/tests/models/glpn/test_modeling_glpn.py +++ b/tests/models/glpn/test_modeling_glpn.py @@ -146,7 +146,9 @@ def prepare_config_and_inputs_for_common(self): class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GLPNModel, GLPNForDepthEstimation) if is_torch_available() else () pipeline_model_mapping = ( - {"depth-estimation": GLPNForDepthEstimation, "feature-extraction": GLPNModel} if is_torch_available() else {} + {"depth-estimation": GLPNForDepthEstimation, "image-feature-extraction": GLPNModel} + if is_torch_available() + else {} ) test_head_masking = False diff --git a/tests/models/imagegpt/test_modeling_imagegpt.py b/tests/models/imagegpt/test_modeling_imagegpt.py index ad8c8d290e6715..40ea7ce0f4f559 100644 --- a/tests/models/imagegpt/test_modeling_imagegpt.py +++ b/tests/models/imagegpt/test_modeling_imagegpt.py @@ -271,7 +271,7 @@ class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterM ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} + {"image-feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/levit/test_modeling_levit.py b/tests/models/levit/test_modeling_levit.py index d569b2b5385235..b6d9832704a521 100644 --- a/tests/models/levit/test_modeling_levit.py +++ b/tests/models/levit/test_modeling_levit.py @@ -176,7 +176,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) pipeline_model_mapping = ( { - "feature-extraction": LevitModel, + "image-feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py index fd9a513ab03263..d4167cfffe644c 100644 --- a/tests/models/mask2former/test_modeling_mask2former.py +++ b/tests/models/mask2former/test_modeling_mask2former.py @@ -197,7 +197,7 @@ def comm_check_on_output(result): @require_torch class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": Mask2FormerModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": Mask2FormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py index 16ff3caed47504..d376216040591e 100644 --- a/tests/models/maskformer/test_modeling_maskformer.py +++ b/tests/models/maskformer/test_modeling_maskformer.py @@ -197,7 +197,7 @@ def comm_check_on_output(result): class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} + {"image-feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) diff --git a/tests/models/mgp_str/test_modeling_mgp_str.py b/tests/models/mgp_str/test_modeling_mgp_str.py index a7fd95a1311c5c..b2c3cb1400e49d 100644 --- a/tests/models/mgp_str/test_modeling_mgp_str.py +++ b/tests/models/mgp_str/test_modeling_mgp_str.py @@ -31,7 +31,7 @@ import torch from torch import nn - from transformers import MgpstrForSceneTextRecognition + from transformers import MgpstrForSceneTextRecognition, MgpstrModel if is_vision_available(): @@ -118,7 +118,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class MgpstrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MgpstrForSceneTextRecognition,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": MgpstrForSceneTextRecognition} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": MgpstrForSceneTextRecognition, "image-feature-extraction": MgpstrModel} + if is_torch_available() + else {} + ) fx_compatible = False test_pruning = False diff --git a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py index 35848da3161d51..6262475b8d0c71 100644 --- a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py @@ -147,7 +147,7 @@ class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC all_model_classes = (MobileNetV1Model, MobileNetV1ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} + {"image-feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py index bbd83408853ceb..75580bfdf2b232 100644 --- a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py @@ -195,7 +195,7 @@ class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC ) pipeline_model_mapping = ( { - "feature-extraction": MobileNetV2Model, + "image-feature-extraction": MobileNetV2Model, "image-classification": MobileNetV2ForImageClassification, "image-segmentation": MobileNetV2ForSemanticSegmentation, } diff --git a/tests/models/mobilevit/test_modeling_mobilevit.py b/tests/models/mobilevit/test_modeling_mobilevit.py index 563bee802322d0..fc2ea5eba38321 100644 --- a/tests/models/mobilevit/test_modeling_mobilevit.py +++ b/tests/models/mobilevit/test_modeling_mobilevit.py @@ -188,7 +188,7 @@ class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ) pipeline_model_mapping = ( { - "feature-extraction": MobileViTModel, + "image-feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } diff --git a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py index 192cf3a9e1e896..1fb6be94a2400c 100644 --- a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py +++ b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py @@ -190,7 +190,7 @@ class MobileViTV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC pipeline_model_mapping = ( { - "feature-extraction": MobileViTV2Model, + "image-feature-extraction": MobileViTV2Model, "image-classification": MobileViTV2ForImageClassification, "image-segmentation": MobileViTV2ForSemanticSegmentation, } diff --git a/tests/models/nat/test_modeling_nat.py b/tests/models/nat/test_modeling_nat.py index 3ab49d2d9557fa..cbdbfc83c5e0ad 100644 --- a/tests/models/nat/test_modeling_nat.py +++ b/tests/models/nat/test_modeling_nat.py @@ -204,7 +204,7 @@ class NatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": NatModel, "image-classification": NatForImageClassification} + {"image-feature-extraction": NatModel, "image-classification": NatForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index 8dbf3fcde89bbc..3dbcab2c934eaa 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -433,7 +433,10 @@ def prepare_config_and_inputs_for_common(self): class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Owlv2Model,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": Owlv2Model, "zero-shot-object-detection": Owlv2ForObjectDetection} + { + "feature-extraction": Owlv2Model, + "zero-shot-object-detection": Owlv2ForObjectDetection, + } if is_torch_available() else {} ) diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index 8edbf411f7b94e..e99eb736e8255d 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -428,7 +428,10 @@ def prepare_config_and_inputs_for_common(self): class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OwlViTModel,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": OwlViTModel, "zero-shot-object-detection": OwlViTForObjectDetection} + { + "feature-extraction": OwlViTModel, + "zero-shot-object-detection": OwlViTForObjectDetection, + } if is_torch_available() else {} ) diff --git a/tests/models/poolformer/test_modeling_poolformer.py b/tests/models/poolformer/test_modeling_poolformer.py index 070564e718bf1e..e387053f110ada 100644 --- a/tests/models/poolformer/test_modeling_poolformer.py +++ b/tests/models/poolformer/test_modeling_poolformer.py @@ -124,7 +124,7 @@ def prepare_config_and_inputs_for_common(self): class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": PoolFormerModel, "image-classification": PoolFormerForImageClassification} + {"image-feature-extraction": PoolFormerModel, "image-classification": PoolFormerForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/pvt/test_modeling_pvt.py b/tests/models/pvt/test_modeling_pvt.py index e174b67a07887c..d17041ecfaa55f 100644 --- a/tests/models/pvt/test_modeling_pvt.py +++ b/tests/models/pvt/test_modeling_pvt.py @@ -158,7 +158,7 @@ def prepare_img(): class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PvtModel, PvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": PvtModel, "image-classification": PvtForImageClassification} + {"image-feature-extraction": PvtModel, "image-classification": PvtForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/regnet/test_modeling_regnet.py b/tests/models/regnet/test_modeling_regnet.py index 9840575f317ecd..420609bf0300f0 100644 --- a/tests/models/regnet/test_modeling_regnet.py +++ b/tests/models/regnet/test_modeling_regnet.py @@ -126,7 +126,7 @@ class RegNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} + {"image-feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/resnet/test_modeling_resnet.py b/tests/models/resnet/test_modeling_resnet.py index bae9eb6d24c8cb..543013bc41b063 100644 --- a/tests/models/resnet/test_modeling_resnet.py +++ b/tests/models/resnet/test_modeling_resnet.py @@ -170,7 +170,7 @@ class ResNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} + {"image-feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/segformer/test_modeling_segformer.py b/tests/models/segformer/test_modeling_segformer.py index d9a4dce9ffeb3c..8cb7cbad42f2d0 100644 --- a/tests/models/segformer/test_modeling_segformer.py +++ b/tests/models/segformer/test_modeling_segformer.py @@ -171,7 +171,7 @@ class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ) pipeline_model_mapping = ( { - "feature-extraction": SegformerModel, + "image-feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } diff --git a/tests/models/swiftformer/test_modeling_swiftformer.py b/tests/models/swiftformer/test_modeling_swiftformer.py index 83b6aa3510d925..a1e6229d5a6e81 100644 --- a/tests/models/swiftformer/test_modeling_swiftformer.py +++ b/tests/models/swiftformer/test_modeling_swiftformer.py @@ -139,7 +139,7 @@ class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC all_model_classes = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} + {"image-feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/swin/test_modeling_swin.py b/tests/models/swin/test_modeling_swin.py index e82c13f8db2744..cd0b99fdc986a2 100644 --- a/tests/models/swin/test_modeling_swin.py +++ b/tests/models/swin/test_modeling_swin.py @@ -232,7 +232,7 @@ class SwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": SwinModel, "image-classification": SwinForImageClassification} + {"image-feature-extraction": SwinModel, "image-classification": SwinForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py index 581e8debc7e7d7..556b65a249a22f 100644 --- a/tests/models/swin2sr/test_modeling_swin2sr.py +++ b/tests/models/swin2sr/test_modeling_swin2sr.py @@ -162,7 +162,7 @@ def prepare_config_and_inputs_for_common(self): class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} + {"image-feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} if is_torch_available() else {} ) diff --git a/tests/models/swinv2/test_modeling_swinv2.py b/tests/models/swinv2/test_modeling_swinv2.py index ebe05a9a71b4b5..73f731cd60abbb 100644 --- a/tests/models/swinv2/test_modeling_swinv2.py +++ b/tests/models/swinv2/test_modeling_swinv2.py @@ -217,7 +217,7 @@ class Swinv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": Swinv2Model, "image-classification": Swinv2ForImageClassification} + {"image-feature-extraction": Swinv2Model, "image-classification": Swinv2ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/table_transformer/test_modeling_table_transformer.py b/tests/models/table_transformer/test_modeling_table_transformer.py index bb869d9422bb5a..eb5e80c93886b9 100644 --- a/tests/models/table_transformer/test_modeling_table_transformer.py +++ b/tests/models/table_transformer/test_modeling_table_transformer.py @@ -200,7 +200,7 @@ class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipelin else () ) pipeline_model_mapping = ( - {"feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection} + {"image-feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection} if is_torch_available() else {} ) diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index 853701e3a8ea78..e17d6ce61b302f 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -228,7 +228,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} + {"image-feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} if is_torch_available() else {} ) diff --git a/tests/models/vit/test_modeling_vit.py b/tests/models/vit/test_modeling_vit.py index 2e9a632a3719d4..c8181d2c2b5a2e 100644 --- a/tests/models/vit/test_modeling_vit.py +++ b/tests/models/vit/test_modeling_vit.py @@ -193,7 +193,7 @@ class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} + {"image-feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/vit_hybrid/test_modeling_vit_hybrid.py b/tests/models/vit_hybrid/test_modeling_vit_hybrid.py index 567394c97942f4..2a8b5087f3966b 100644 --- a/tests/models/vit_hybrid/test_modeling_vit_hybrid.py +++ b/tests/models/vit_hybrid/test_modeling_vit_hybrid.py @@ -156,7 +156,7 @@ class ViTHybridModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas all_model_classes = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} + {"image-feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/vit_mae/test_modeling_vit_mae.py b/tests/models/vit_mae/test_modeling_vit_mae.py index 21a66b8a6d92a2..c1afc9694df561 100644 --- a/tests/models/vit_mae/test_modeling_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_vit_mae.py @@ -164,7 +164,7 @@ class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": ViTMAEModel} if is_torch_available() else {} test_pruning = False test_torchscript = False diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py index 96e107e7950ecc..a4cc370ec21c7a 100644 --- a/tests/models/vit_msn/test_modeling_vit_msn.py +++ b/tests/models/vit_msn/test_modeling_vit_msn.py @@ -152,7 +152,7 @@ class ViTMSNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} + {"image-feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/yolos/test_modeling_yolos.py b/tests/models/yolos/test_modeling_yolos.py index 390a54ebc99c3a..4b2aff30948767 100644 --- a/tests/models/yolos/test_modeling_yolos.py +++ b/tests/models/yolos/test_modeling_yolos.py @@ -168,7 +168,9 @@ class YolosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (YolosModel, YolosForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} + {"image-feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} + if is_torch_available() + else {} ) test_pruning = False diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py new file mode 100644 index 00000000000000..a9c99ad50bc604 --- /dev/null +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -0,0 +1,157 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import pytest + +from transformers import ( + MODEL_MAPPING, + TF_MODEL_MAPPING, + TOKENIZER_MAPPING, + ImageFeatureExtractionPipeline, + is_tf_available, + is_torch_available, + is_vision_available, + pipeline, +) +from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch + + +if is_torch_available(): + import torch + +if is_tf_available(): + import tensorflow as tf + +if is_vision_available(): + from PIL import Image + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@is_pipeline_test +class ImageFeatureExtractionPipelineTests(unittest.TestCase): + model_mapping = MODEL_MAPPING + tf_model_mapping = TF_MODEL_MAPPING + + @require_torch + def test_small_model_pt(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" + ) + img = prepare_img() + outputs = feature_extractor(img) + self.assertEqual( + nested_simplify(outputs[0][0]), + [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip + + @require_tf + def test_small_model_tf(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" + ) + img = prepare_img() + outputs = feature_extractor(img) + self.assertEqual( + nested_simplify(outputs[0][0]), + [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip + + @require_torch + def test_image_processing_small_model_pt(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" + ) + + # test with image processor parameters + image_processor_kwargs = {"size": {"height": 300, "width": 300}} + img = prepare_img() + with pytest.raises(ValueError): + # Image doesn't match model input size + feature_extractor(img, image_processor_kwargs=image_processor_kwargs) + + image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} + img = prepare_img() + outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) + self.assertEqual(np.squeeze(outputs).shape, (226, 32)) + + @require_tf + def test_image_processing_small_model_tf(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" + ) + + # test with image processor parameters + image_processor_kwargs = {"size": {"height": 300, "width": 300}} + img = prepare_img() + with pytest.raises(ValueError): + # Image doesn't match model input size + feature_extractor(img, image_processor_kwargs=image_processor_kwargs) + + image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} + img = prepare_img() + outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) + self.assertEqual(np.squeeze(outputs).shape, (226, 32)) + + @require_torch + def test_return_tensors_pt(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" + ) + img = prepare_img() + outputs = feature_extractor(img, return_tensors=True) + self.assertTrue(torch.is_tensor(outputs)) + + @require_tf + def test_return_tensors_tf(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" + ) + img = prepare_img() + outputs = feature_extractor(img, return_tensors=True) + self.assertTrue(tf.is_tensor(outputs)) + + def get_test_pipeline(self, model, tokenizer, processor): + if processor is None: + self.skipTest("No image processor") + + elif type(model.config) in TOKENIZER_MAPPING: + self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") + + elif model.config.is_encoder_decoder: + self.skipTest( + """encoder_decoder models are trickier for this pipeline. + Do we want encoder + decoder inputs to get some featues? + Do we want encoder only features ? + For now ignore those. + """ + ) + + feature_extractor = ImageFeatureExtractionPipeline(model=model, image_processor=processor) + img = prepare_img() + return feature_extractor, [img, img] + + def run_pipeline_test(self, feature_extractor, examples): + imgs = examples + outputs = feature_extractor(imgs[0]) + + self.assertEqual(len(outputs), 1) + + outputs = feature_extractor(imgs) + self.assertEqual(len(outputs), 2) diff --git a/tests/test_pipeline_mixin.py b/tests/test_pipeline_mixin.py index bd4b9eb39343a2..dbd783e9dc1a9e 100644 --- a/tests/test_pipeline_mixin.py +++ b/tests/test_pipeline_mixin.py @@ -39,6 +39,7 @@ from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests +from .pipelines.test_pipelines_image_feature_extraction import ImageFeatureExtractionPipelineTests from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests @@ -70,6 +71,7 @@ "feature-extraction": {"test": FeatureExtractionPipelineTests}, "fill-mask": {"test": FillMaskPipelineTests}, "image-classification": {"test": ImageClassificationPipelineTests}, + "image-feature-extraction": {"test": ImageFeatureExtractionPipelineTests}, "image-segmentation": {"test": ImageSegmentationPipelineTests}, "image-to-image": {"test": ImageToImagePipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, @@ -374,6 +376,13 @@ def test_pipeline_image_segmentation(self): def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") + @is_pipeline_test + @require_timm + @require_vision + @require_torch + def test_pipeline_image_feature_extraction(self): + self.run_task_tests(task="image-feature-extraction") + @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index 8a4394f6afb384..7c895163d95988 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -324,6 +324,7 @@ "IdeficsConfig", "IdeficsProcessor", "ImageClassificationPipeline", + "ImageFeatureExtractionPipeline", "ImageGPTConfig", "ImageSegmentationPipeline", "ImageToImagePipeline",