From 12a4da139f6fa8da3bc79b283d01ab8bd9f5caef Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 22 Dec 2023 21:21:11 +0000 Subject: [PATCH 01/25] Draft pipeline --- docs/source/en/main_classes/pipelines.md | 6 + docs/source/ja/main_classes/pipelines.md | 8 +- docs/source/zh/main_classes/pipelines.md | 8 +- src/transformers/__init__.py | 2 + src/transformers/pipelines/__init__.py | 15 ++ .../pipelines/image_feature_extraction.py | 96 ++++++++++ ...test_pipelines_image_feature_extraction.py | 177 ++++++++++++++++++ 7 files changed, 310 insertions(+), 2 deletions(-) create mode 100644 src/transformers/pipelines/image_feature_extraction.py create mode 100644 tests/pipelines/test_pipelines_image_feature_extraction.py diff --git a/docs/source/en/main_classes/pipelines.md b/docs/source/en/main_classes/pipelines.md index 3cd0fc5bb97913..61bdf3729a7e0a 100644 --- a/docs/source/en/main_classes/pipelines.md +++ b/docs/source/en/main_classes/pipelines.md @@ -469,6 +469,12 @@ Pipelines available for multimodal tasks include the following. - __call__ - all +### ImageFeatureExtractionPipeline + +[[autodoc]] ImageFeatureExtractionPipeline + - __call__ + - all + ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline diff --git a/docs/source/ja/main_classes/pipelines.md b/docs/source/ja/main_classes/pipelines.md index 321659de95ba6e..90eb17c0c44387 100644 --- a/docs/source/ja/main_classes/pipelines.md +++ b/docs/source/ja/main_classes/pipelines.md @@ -25,7 +25,7 @@ Recognition、Masked Language Modeling、Sentiment Analysis、Feature Extraction パイプラインの抽象化には2つのカテゴリーがある: - [`pipeline`] は、他のすべてのパイプラインをカプセル化する最も強力なオブジェクトです。 -- タスク固有のパイプラインは、[オーディオ](#audio)、[コンピューター ビジョン](#computer-vision)、[自然言語処理](#natural-language-processing)、および [マルチモーダル](#multimodal) タスクで使用できます。 +- タスク固有のパイプラインは、[オーディオ](#audio)、[コンピューター ビジョン](#computer-vision)、[自然言語処理](#natural-language-processing)、および [マルチモーダル](#multimodal) タスクで使用できます。 ## The pipeline abstraction @@ -477,6 +477,12 @@ my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) - __call__ - all +### ImageFeatureExtractionPipeline + +[[autodoc]] ImageFeatureExtractionPipeline + - __call__ + - all + ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline diff --git a/docs/source/zh/main_classes/pipelines.md b/docs/source/zh/main_classes/pipelines.md index 4d2f1f0f9386a3..82d6de8e7161a4 100644 --- a/docs/source/zh/main_classes/pipelines.md +++ b/docs/source/zh/main_classes/pipelines.md @@ -435,7 +435,7 @@ See [`TokenClassificationPipeline`] for all details. - __call__ - all -## 多模态 +## 多模态 可用于多模态任务的pipeline包括以下几种。 @@ -451,6 +451,12 @@ See [`TokenClassificationPipeline`] for all details. - __call__ - all +### ImageFeatureExtractionPipeline + +[[autodoc]] ImageFeatureExtractionPipeline + - __call__ + - all + ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 415c880d6352f0..5e2b87089aba9e 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -973,6 +973,7 @@ "FeatureExtractionPipeline", "FillMaskPipeline", "ImageClassificationPipeline", + "ImageFeatureExtractionPipeline", "ImageSegmentationPipeline", "ImageToImagePipeline", "ImageToTextPipeline", @@ -5709,6 +5710,7 @@ FeatureExtractionPipeline, FillMaskPipeline, ImageClassificationPipeline, + ImageFeatureExtractionPipeline, ImageSegmentationPipeline, ImageToImagePipeline, ImageToTextPipeline, diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 5a8525a358b816..16842293549291 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -66,6 +66,7 @@ from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline from .image_classification import ImageClassificationPipeline +from .image_feature_extraction import ImageFeatureExtractionPipeline from .image_segmentation import ImageSegmentationPipeline from .image_to_image import ImageToImagePipeline from .image_to_text import ImageToTextPipeline @@ -362,6 +363,18 @@ }, "type": "image", }, + "image-feature-extraction": { + "impl": ImageFeatureExtractionPipeline, + "tf": (TFAutoModel,) if is_tf_available() else (), + "pt": (AutoModel,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/vit-base-patch16-224", "29e7a1e183"), + "tf": ("google/vit-base-patch16-224", "29e7a1e183"), + } + }, + "type": "image", + }, "image-segmentation": { "impl": ImageSegmentationPipeline, "tf": (), @@ -500,6 +513,7 @@ def check_task(task: str) -> Tuple[str, Dict, Any]: - `"feature-extraction"` - `"fill-mask"` - `"image-classification"` + - `"image-feature-extraction"` - `"image-segmentation"` - `"image-to-text"` - `"image-to-image"` @@ -586,6 +600,7 @@ def pipeline( - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. - `"fill-mask"`: will return a [`FillMaskPipeline`]:. - `"image-classification"`: will return a [`ImageClassificationPipeline`]. + - `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`]. - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. - `"image-to-image"`: will return a [`ImageToImagePipeline`]. - `"image-to-text"`: will return a [`ImageToTextPipeline`]. diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py new file mode 100644 index 00000000000000..2ff3ae83a84d53 --- /dev/null +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -0,0 +1,96 @@ +from typing import Dict + +from .base import GenericTensor, Pipeline + + +# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output` +class ImageFeatureExtractionPipeline(Pipeline): + """ + Image feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base + transformer, which can be used as features in downstream tasks. + + Example: + + ```python + >>> from transformers import pipeline + + >>> extractor = pipeline(model="bert-base-uncased", task="image-feature-extraction") + >>> result = extractor("This is a simple test.", return_tensors=True) + >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input string. + torch.Size([1, 8, 768]) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: + `"feature-extraction"`. + + All models may be used for this pipeline. See a list of all models, including community-contributed models on + [huggingface.co/models](https://huggingface.co/models). + + Arguments: + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from + [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. + tokenizer (`Optional`, *optional*): + feature_extractor (`Optional`, *optional*): + image_processor ([`PreTrainedImageProcessor`], *optional*): + The image processor that will be used by the pipeline to encode data for the model. This object inherits from + [`PreTrainedImageProcessor`]. + modelcard (`str` or [`ModelCard`], *optional*): + Model card attributed to the model for this pipeline. + framework (`str`, *optional*): + The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be + installed. + + If no framework is specified, will default to the one currently installed. If no framework is specified and + both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is + provided. + task (`str`, defaults to `""`, *optional*, defaults to `""`): + A task-identifier for the pipeline. + args_parser ([`~pipelines.ArgumentHandler`], *optional*): + Reference to the object in charge of parsing supplied pipeline parameters. + device (`int`, *optional*, defaults to -1): + Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on + the associated CUDA device id. + torch_dtype (`Union`, *optional*): + binary_output (`bool`, *optional*, defaults to `False`): + """ + + def _sanitize_parameters(self, return_tensors=None, **kwargs): + preprocess_params = {} + postprocess_params = {} + + if return_tensors is not None: + postprocess_params["return_tensors"] = return_tensors + + return preprocess_params, {}, postprocess_params + + def preprocess(self, image, **preprocess_kwargs) -> Dict[str, GenericTensor]: + model_inputs = self.image_processor(image, return_tensors=self.framework, **preprocess_kwargs) + return model_inputs + + def _forward(self, model_inputs): + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, return_tensors=False): + # [0] is the first available tensor, logits or last_hidden_state. + if return_tensors: + return model_outputs[0] + if self.framework == "pt": + return model_outputs[0].tolist() + elif self.framework == "tf": + return model_outputs[0].numpy().tolist() + + def __call__(self, *args, **kwargs): + """ + Extract the features of the input(s). + + Args: + args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of. + + Return: + A nested list of `float`: The features computed by the model. + """ + return super().__call__(*args, **kwargs) diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py new file mode 100644 index 00000000000000..4d8c2c6bb183a1 --- /dev/null +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -0,0 +1,177 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from PIL import Image + +from transformers import ( + TOKENIZER_MAPPING, + MODEL_MAPPING, + TF_MODEL_MAPPING, + ImageFeatureExtractionPipeline, + is_tf_available, + is_torch_available, + pipeline, +) +from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch + + +if is_torch_available(): + import torch + +if is_tf_available(): + import tensorflow as tf + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + + +@is_pipeline_test +class FeatureExtractionPipelineTests(unittest.TestCase): + model_mapping = MODEL_MAPPING + tf_model_mapping = TF_MODEL_MAPPING + + @require_torch + def test_small_model_pt(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" + ) + img = prepare_img() + outputs = feature_extractor(img) + self.assertEqual( + nested_simplify(outputs[0][0]), + [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip + + @require_tf + def test_small_model_tf(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" + ) + img = prepare_img() + outputs = feature_extractor(img) + self.assertEqual( + nested_simplify(outputs[0][0]), + [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip + + @require_torch + def test_image_processing_small_model_pt(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" + ) + # test with empty parameters + img = prepare_img() + outputs = feature_extractor(img) + self.assertEqual( + nested_simplify(outputs[0][0]), + [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip + + # test with various tokenizer parameters + preprocess_kwargs = {"size": {"height": 300, "width": 300}} + img = prepare_img() + outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + self.assertEqual(np.squeeze(outputs).shape, (226, 32)) + + @require_tf + def test_image_processing_small_model_tf(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" + ) + # test with empty parameters + img = prepare_img() + outputs = feature_extractor(img) + self.assertEqual( + nested_simplify(outputs[0][0]), + [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip + + # test with various tokenizer parameters + preprocess_kwargs = {"size": {"height": 300, "width": 300}} + img = prepare_img() + outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + self.assertEqual(np.squeeze(outputs).shape, (226, 32)) + + @require_torch + def test_return_tensors_pt(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" + ) + img = prepare_img() + outputs = feature_extractor(img, return_tensors=True) + self.assertTrue(torch.is_tensor(outputs)) + + @require_tf + def test_return_tensors_tf(self): + feature_extractor = pipeline( + task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" + ) + img = prepare_img() + outputs = feature_extractor(img, return_tensors=True) + self.assertTrue(tf.is_tensor(outputs)) + + def get_shape(self, input_, shape=None): + if shape is None: + shape = [] + if isinstance(input_, list): + subshapes = [self.get_shape(in_, shape) for in_ in input_] + if all(s == 0 for s in subshapes): + shape.append(len(input_)) + else: + subshape = subshapes[0] + shape = [len(input_), *subshape] + elif isinstance(input_, float): + return 0 + else: + raise ValueError("We expect lists of floats, nothing else") + return shape + + def get_test_pipeline(self, model, image_processor): + if image_processor is None: + self.skipTest("No image processor") + + elif type(model.config) in TOKENIZER_MAPPING: + self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") + + elif model.config.is_encoder_decoder: + self.skipTest( + """encoder_decoder models are trickier for this pipeline. + Do we want encoder + decoder inputs to get some featues? + Do we want encoder only features ? + For now ignore those. + """ + ) + + feature_extractor = ImageFeatureExtractionPipeline(model=model, image_processor=image_processor) + img = prepare_img() + return feature_extractor, [img, img] + + def run_pipeline_test(self, feature_extractor, examples): + img = prepare_img() + outputs = feature_extractor(img) + + shape = self.get_shape(outputs) + self.assertEqual(shape[0], 1) + + img = prepare_img() + outputs = feature_extractor([img, img]) + shape = self.get_shape(outputs) + self.assertEqual(shape[0], 2) + + outputs = feature_extractor(img.reshape(1000, 200), truncation=True) + shape = self.get_shape(outputs) + self.assertEqual(shape[0], 1) From ec40e7dfbe43e78c244a66a1c7111ee1e15d1c63 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Tue, 2 Jan 2024 17:44:07 +0000 Subject: [PATCH 02/25] Fixup --- .../pipelines/image_feature_extraction.py | 15 +++++++++++---- .../test_pipelines_image_feature_extraction.py | 3 +-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 2ff3ae83a84d53..b770ef4c3fb1cc 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -32,8 +32,12 @@ class ImageFeatureExtractionPipeline(Pipeline): model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. - tokenizer (`Optional`, *optional*): - feature_extractor (`Optional`, *optional*): + tokenizer (`Optional`, *optional*): + The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from + [`PreTrainedTokenizer`]. + feature_extractor (`Optional`, *optional*): + The feature extractor that will be used by the pipeline to extract features from the model. This object + inherits from [`PreTrainedFeatureExtractor`]. image_processor ([`PreTrainedImageProcessor`], *optional*): The image processor that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedImageProcessor`]. @@ -53,8 +57,11 @@ class ImageFeatureExtractionPipeline(Pipeline): device (`int`, *optional*, defaults to -1): Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id. - torch_dtype (`Union`, *optional*): - binary_output (`bool`, *optional*, defaults to `False`): + torch_dtype (`str` or `torch.dtype`, *optional*): + Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model + (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). + binary_output (`bool`, *optional*, defaults to `False`): + Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text. """ def _sanitize_parameters(self, return_tensors=None, **kwargs): diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index 4d8c2c6bb183a1..1379ddbde60ce0 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -18,9 +18,9 @@ from PIL import Image from transformers import ( - TOKENIZER_MAPPING, MODEL_MAPPING, TF_MODEL_MAPPING, + TOKENIZER_MAPPING, ImageFeatureExtractionPipeline, is_tf_available, is_torch_available, @@ -42,7 +42,6 @@ def prepare_img(): return image - @is_pipeline_test class FeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING From 26467fccb5d8bab5b0ac22c921448e58601ab11a Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Tue, 2 Jan 2024 17:57:56 +0000 Subject: [PATCH 03/25] Fix docstrings --- src/transformers/pipelines/image_feature_extraction.py | 8 -------- utils/check_docstrings.py | 1 + 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index b770ef4c3fb1cc..bff442ad3c86e9 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -32,12 +32,6 @@ class ImageFeatureExtractionPipeline(Pipeline): model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. - tokenizer (`Optional`, *optional*): - The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from - [`PreTrainedTokenizer`]. - feature_extractor (`Optional`, *optional*): - The feature extractor that will be used by the pipeline to extract features from the model. This object - inherits from [`PreTrainedFeatureExtractor`]. image_processor ([`PreTrainedImageProcessor`], *optional*): The image processor that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedImageProcessor`]. @@ -60,8 +54,6 @@ class ImageFeatureExtractionPipeline(Pipeline): torch_dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). - binary_output (`bool`, *optional*, defaults to `False`): - Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text. """ def _sanitize_parameters(self, return_tensors=None, **kwargs): diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index 8a4394f6afb384..7c895163d95988 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -324,6 +324,7 @@ "IdeficsConfig", "IdeficsProcessor", "ImageClassificationPipeline", + "ImageFeatureExtractionPipeline", "ImageGPTConfig", "ImageSegmentationPipeline", "ImageToImagePipeline", From 1d0a6dd58d745c97695bdac2644d12b4cf8cfbce Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Tue, 2 Jan 2024 19:08:54 +0000 Subject: [PATCH 04/25] Update doctest --- .../pipelines/image_feature_extraction.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index bff442ad3c86e9..cbf27f9a2c6503 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -1,8 +1,13 @@ from typing import Dict +from ..utils import is_vision_available from .base import GenericTensor, Pipeline +if is_vision_available(): + from ..image_utils import load_image + + # Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output` class ImageFeatureExtractionPipeline(Pipeline): """ @@ -14,10 +19,10 @@ class ImageFeatureExtractionPipeline(Pipeline): ```python >>> from transformers import pipeline - >>> extractor = pipeline(model="bert-base-uncased", task="image-feature-extraction") - >>> result = extractor("This is a simple test.", return_tensors=True) + >>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction") + >>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True) >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input string. - torch.Size([1, 8, 768]) + torch.Size([1, 197, 768]) ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) @@ -65,7 +70,8 @@ def _sanitize_parameters(self, return_tensors=None, **kwargs): return preprocess_params, {}, postprocess_params - def preprocess(self, image, **preprocess_kwargs) -> Dict[str, GenericTensor]: + def preprocess(self, image, timeout=None, **preprocess_kwargs) -> Dict[str, GenericTensor]: + image = load_image(image, timeout=timeout) model_inputs = self.image_processor(image, return_tensors=self.framework, **preprocess_kwargs) return model_inputs From 81303dc7538e699beff0dcab22b0f1392fe729f3 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 3 Jan 2024 12:40:38 +0000 Subject: [PATCH 05/25] Update pipeline_model_mapping --- tests/models/align/test_modeling_align.py | 6 +++++- tests/models/beit/test_modeling_beit.py | 2 +- tests/models/bit/test_modeling_bit.py | 2 +- tests/models/blip/test_modeling_blip.py | 6 +++++- tests/models/blip_2/test_modeling_blip_2.py | 1 + tests/models/chinese_clip/test_modeling_chinese_clip.py | 6 +++++- tests/models/clip/test_modeling_clip.py | 4 +++- tests/models/clipseg/test_modeling_clipseg.py | 6 +++++- .../conditional_detr/test_modeling_conditional_detr.py | 2 +- tests/models/convnext/test_modeling_convnext.py | 2 +- tests/models/convnextv2/test_modeling_convnextv2.py | 2 +- tests/models/cvt/test_modeling_cvt.py | 2 +- tests/models/data2vec/test_modeling_data2vec_vision.py | 2 +- .../deformable_detr/test_modeling_deformable_detr.py | 2 +- tests/models/deit/test_modeling_deit.py | 2 +- tests/models/deta/test_modeling_deta.py | 2 +- tests/models/detr/test_modeling_detr.py | 2 +- tests/models/dinat/test_modeling_dinat.py | 2 +- tests/models/dinov2/test_modeling_dinov2.py | 2 +- tests/models/donut/test_modeling_donut_swin.py | 2 +- tests/models/dpt/test_modeling_dpt.py | 2 +- .../efficientformer/test_modeling_efficientformer.py | 2 +- tests/models/efficientnet/test_modeling_efficientnet.py | 2 +- tests/models/flava/test_modeling_flava.py | 2 +- tests/models/focalnet/test_modeling_focalnet.py | 2 +- tests/models/git/test_modeling_git.py | 7 ++++++- tests/models/glpn/test_modeling_glpn.py | 4 +++- tests/models/groupvit/test_modeling_groupvit.py | 6 +++++- tests/models/imagegpt/test_modeling_imagegpt.py | 2 +- tests/models/levit/test_modeling_levit.py | 2 +- tests/models/mask2former/test_modeling_mask2former.py | 2 +- tests/models/maskformer/test_modeling_maskformer.py | 2 +- tests/models/mgp_str/test_modeling_mgp_str.py | 8 ++++++-- tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py | 2 +- tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py | 2 +- tests/models/mobilevit/test_modeling_mobilevit.py | 2 +- tests/models/mobilevitv2/test_modeling_mobilevitv2.py | 2 +- tests/models/nat/test_modeling_nat.py | 2 +- tests/models/owlv2/test_modeling_owlv2.py | 6 +++++- tests/models/owlvit/test_modeling_owlvit.py | 6 +++++- tests/models/poolformer/test_modeling_poolformer.py | 2 +- tests/models/pvt/test_modeling_pvt.py | 2 +- tests/models/regnet/test_modeling_regnet.py | 2 +- tests/models/resnet/test_modeling_resnet.py | 2 +- tests/models/segformer/test_modeling_segformer.py | 2 +- tests/models/swiftformer/test_modeling_swiftformer.py | 2 +- tests/models/swin/test_modeling_swin.py | 2 +- tests/models/swin2sr/test_modeling_swin2sr.py | 2 +- tests/models/swinv2/test_modeling_swinv2.py | 2 +- .../table_transformer/test_modeling_table_transformer.py | 2 +- tests/models/vilt/test_modeling_vilt.py | 2 +- tests/models/vit/test_modeling_vit.py | 2 +- tests/models/vit_hybrid/test_modeling_vit_hybrid.py | 2 +- tests/models/vit_mae/test_modeling_vit_mae.py | 2 +- tests/models/vit_msn/test_modeling_vit_msn.py | 2 +- tests/models/x_clip/test_modeling_x_clip.py | 6 +++++- tests/models/yolos/test_modeling_yolos.py | 4 +++- 57 files changed, 105 insertions(+), 57 deletions(-) diff --git a/tests/models/align/test_modeling_align.py b/tests/models/align/test_modeling_align.py index 99daeb816d2ddb..fb6ac35a0d3f51 100644 --- a/tests/models/align/test_modeling_align.py +++ b/tests/models/align/test_modeling_align.py @@ -447,7 +447,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class AlignModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AlignModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": AlignModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": AlignModel, "image-feature-extraction": AlignVisionModel} + if is_torch_available() + else {} + ) fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py index fdf4607d62693f..40b0d6aa0bd38d 100644 --- a/tests/models/beit/test_modeling_beit.py +++ b/tests/models/beit/test_modeling_beit.py @@ -242,7 +242,7 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) pipeline_model_mapping = ( { - "feature-extraction": BeitModel, + "image-feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } diff --git a/tests/models/bit/test_modeling_bit.py b/tests/models/bit/test_modeling_bit.py index 03e2bd1095191d..1705aad976c091 100644 --- a/tests/models/bit/test_modeling_bit.py +++ b/tests/models/bit/test_modeling_bit.py @@ -162,7 +162,7 @@ class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": BitModel, "image-classification": BitForImageClassification} + {"image-feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py index 4792757f9118f3..65fad78c14c5e3 100644 --- a/tests/models/blip/test_modeling_blip.py +++ b/tests/models/blip/test_modeling_blip.py @@ -429,7 +429,11 @@ def prepare_config_and_inputs_for_common(self): class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (BlipModel,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": BlipModel, "image-to-text": BlipForConditionalGeneration} + { + "feature-extraction": BlipModel, + "image-feature-extraction": BlipVisionModel, + "image-to-text": BlipForConditionalGeneration, + } if is_torch_available() else {} ) diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index dd87961372d262..91bbbe16f2b9f3 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -686,6 +686,7 @@ class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_model_mapping = ( { "feature-extraction": Blip2Model, + "image-feature-extraction": Blip2VisionModel, "image-to-text": Blip2ForConditionalGeneration, "visual-question-answering": Blip2ForConditionalGeneration, } diff --git a/tests/models/chinese_clip/test_modeling_chinese_clip.py b/tests/models/chinese_clip/test_modeling_chinese_clip.py index 8d0eb131e2385b..ba376c1e480121 100644 --- a/tests/models/chinese_clip/test_modeling_chinese_clip.py +++ b/tests/models/chinese_clip/test_modeling_chinese_clip.py @@ -560,7 +560,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": ChineseCLIPModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": ChineseCLIPModel, "image-feature-extraction": ChineseCLIPVisionModel} + if is_torch_available() + else {} + ) fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/clip/test_modeling_clip.py b/tests/models/clip/test_modeling_clip.py index b96edcc56da76c..e3b87d966427b1 100644 --- a/tests/models/clip/test_modeling_clip.py +++ b/tests/models/clip/test_modeling_clip.py @@ -477,7 +477,9 @@ def prepare_config_and_inputs_for_common(self): @require_torch class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": CLIPModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": CLIPModel, "image-feature-extraction": CLIPVisionModel} if is_torch_available() else {} + ) fx_compatible = True test_head_masking = False test_pruning = False diff --git a/tests/models/clipseg/test_modeling_clipseg.py b/tests/models/clipseg/test_modeling_clipseg.py index 0ebf08da89f9a5..69e1a74a55b6d8 100644 --- a/tests/models/clipseg/test_modeling_clipseg.py +++ b/tests/models/clipseg/test_modeling_clipseg.py @@ -449,7 +449,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": CLIPSegModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": CLIPSegModel, "image-feature-extraction": CLIPSegVisionModel} + if is_torch_available() + else {} + ) fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/conditional_detr/test_modeling_conditional_detr.py b/tests/models/conditional_detr/test_modeling_conditional_detr.py index aa0318f241aa92..f297634a2e7553 100644 --- a/tests/models/conditional_detr/test_modeling_conditional_detr.py +++ b/tests/models/conditional_detr/test_modeling_conditional_detr.py @@ -185,7 +185,7 @@ class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline else () ) pipeline_model_mapping = ( - {"feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection} + {"image-feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection} if is_torch_available() else {} ) diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py index ac2b6f927c8dfc..a56c38e3876b50 100644 --- a/tests/models/convnext/test_modeling_convnext.py +++ b/tests/models/convnext/test_modeling_convnext.py @@ -172,7 +172,7 @@ class ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase else () ) pipeline_model_mapping = ( - {"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} + {"image-feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/convnextv2/test_modeling_convnextv2.py b/tests/models/convnextv2/test_modeling_convnextv2.py index 694901a1846994..b13028dba8045d 100644 --- a/tests/models/convnextv2/test_modeling_convnextv2.py +++ b/tests/models/convnextv2/test_modeling_convnextv2.py @@ -180,7 +180,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa else () ) pipeline_model_mapping = ( - {"feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification} + {"image-feature-extraction": ConvNextV2Model, "image-classification": ConvNextV2ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/cvt/test_modeling_cvt.py b/tests/models/cvt/test_modeling_cvt.py index 4abeb5571c7b58..aef8108e1766c4 100644 --- a/tests/models/cvt/test_modeling_cvt.py +++ b/tests/models/cvt/test_modeling_cvt.py @@ -151,7 +151,7 @@ class CvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CvtModel, CvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} + {"image-feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/data2vec/test_modeling_data2vec_vision.py b/tests/models/data2vec/test_modeling_data2vec_vision.py index bdb95588ac5cb7..20733cb2e428f6 100644 --- a/tests/models/data2vec/test_modeling_data2vec_vision.py +++ b/tests/models/data2vec/test_modeling_data2vec_vision.py @@ -178,7 +178,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ) pipeline_model_mapping = ( { - "feature-extraction": Data2VecVisionModel, + "image-feature-extraction": Data2VecVisionModel, "image-classification": Data2VecVisionForImageClassification, "image-segmentation": Data2VecVisionForSemanticSegmentation, } diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py index 38c42c55c34298..336f2437c4e7ae 100644 --- a/tests/models/deformable_detr/test_modeling_deformable_detr.py +++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py @@ -191,7 +191,7 @@ def create_and_check_deformable_detr_object_detection_head_model(self, config, p class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} + {"image-feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} if is_torch_available() else {} ) diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 9cd5be8fd3752c..87ac1690966003 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -206,7 +206,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) pipeline_model_mapping = ( { - "feature-extraction": DeiTModel, + "image-feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() diff --git a/tests/models/deta/test_modeling_deta.py b/tests/models/deta/test_modeling_deta.py index d8e16fca4982e6..3a3a957dd012e2 100644 --- a/tests/models/deta/test_modeling_deta.py +++ b/tests/models/deta/test_modeling_deta.py @@ -217,7 +217,7 @@ def create_and_check_deta_object_detection_head_model(self, config, pixel_values class DetaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DetaModel, DetaForObjectDetection) if is_torchvision_available() else () pipeline_model_mapping = ( - {"feature-extraction": DetaModel, "object-detection": DetaForObjectDetection} + {"image-feature-extraction": DetaModel, "object-detection": DetaForObjectDetection} if is_torchvision_available() else {} ) diff --git a/tests/models/detr/test_modeling_detr.py b/tests/models/detr/test_modeling_detr.py index de30d9db9b1409..02159795e823cf 100644 --- a/tests/models/detr/test_modeling_detr.py +++ b/tests/models/detr/test_modeling_detr.py @@ -182,7 +182,7 @@ class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ) pipeline_model_mapping = ( { - "feature-extraction": DetrModel, + "image-feature-extraction": DetrModel, "image-segmentation": DetrForSegmentation, "object-detection": DetrForObjectDetection, } diff --git a/tests/models/dinat/test_modeling_dinat.py b/tests/models/dinat/test_modeling_dinat.py index c824060cf816b2..c29339881eb495 100644 --- a/tests/models/dinat/test_modeling_dinat.py +++ b/tests/models/dinat/test_modeling_dinat.py @@ -207,7 +207,7 @@ class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": DinatModel, "image-classification": DinatForImageClassification} + {"image-feature-extraction": DinatModel, "image-classification": DinatForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py index 8e68165754b0ed..f0365cac2a59ee 100644 --- a/tests/models/dinov2/test_modeling_dinov2.py +++ b/tests/models/dinov2/test_modeling_dinov2.py @@ -217,7 +217,7 @@ class Dinov2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": Dinov2Model, "image-classification": Dinov2ForImageClassification} + {"image-feature-extraction": Dinov2Model, "image-classification": Dinov2ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/donut/test_modeling_donut_swin.py b/tests/models/donut/test_modeling_donut_swin.py index e52e679e42e682..23b7094d9b743f 100644 --- a/tests/models/donut/test_modeling_donut_swin.py +++ b/tests/models/donut/test_modeling_donut_swin.py @@ -145,7 +145,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DonutSwinModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": DonutSwinModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": DonutSwinModel} if is_torch_available() else {} fx_compatible = True test_pruning = False diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 0b398c923e686f..2c092062791f7d 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -163,7 +163,7 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, - "feature-extraction": DPTModel, + "image-feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() diff --git a/tests/models/efficientformer/test_modeling_efficientformer.py b/tests/models/efficientformer/test_modeling_efficientformer.py index 73283fbbf60026..2d6176960a5c5f 100644 --- a/tests/models/efficientformer/test_modeling_efficientformer.py +++ b/tests/models/efficientformer/test_modeling_efficientformer.py @@ -190,7 +190,7 @@ class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T ) pipeline_model_mapping = ( { - "feature-extraction": EfficientFormerModel, + "image-feature-extraction": EfficientFormerModel, "image-classification": ( EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py index 32050e3d21a5e1..bd16566daed5f5 100644 --- a/tests/models/efficientnet/test_modeling_efficientnet.py +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -130,7 +130,7 @@ class EfficientNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": EfficientNetModel, "image-classification": EfficientNetForImageClassification} + {"image-feature-extraction": EfficientNetModel, "image-classification": EfficientNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/flava/test_modeling_flava.py b/tests/models/flava/test_modeling_flava.py index 48a070d9fe3137..a7ea933a97817e 100644 --- a/tests/models/flava/test_modeling_flava.py +++ b/tests/models/flava/test_modeling_flava.py @@ -921,7 +921,7 @@ def _test_model(self, config, inputs, test_image=False, test_text=False): @require_torch class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FlavaModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": FlavaModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": FlavaModel} if is_torch_available() else {} class_for_tester = FlavaModelTester test_head_masking = False test_pruning = False diff --git a/tests/models/focalnet/test_modeling_focalnet.py b/tests/models/focalnet/test_modeling_focalnet.py index 6de095d975234d..2b6f8cf9ab1522 100644 --- a/tests/models/focalnet/test_modeling_focalnet.py +++ b/tests/models/focalnet/test_modeling_focalnet.py @@ -238,7 +238,7 @@ class FocalNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase else () ) pipeline_model_mapping = ( - {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} + {"image-feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py index c503abfb89db1a..7df3a010b890cf 100644 --- a/tests/models/git/test_modeling_git.py +++ b/tests/models/git/test_modeling_git.py @@ -399,7 +399,12 @@ class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else () all_generative_model_classes = (GitForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM} + { + "feature-extraction": GitModel, + "image-to-text": GitForCausalLM, + "text-generation": GitForCausalLM, + "image-feature-extraction": GitVisionModel, + } if is_torch_available() else {} ) diff --git a/tests/models/glpn/test_modeling_glpn.py b/tests/models/glpn/test_modeling_glpn.py index 138a8cf2832eef..90f8996984d32c 100644 --- a/tests/models/glpn/test_modeling_glpn.py +++ b/tests/models/glpn/test_modeling_glpn.py @@ -146,7 +146,9 @@ def prepare_config_and_inputs_for_common(self): class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GLPNModel, GLPNForDepthEstimation) if is_torch_available() else () pipeline_model_mapping = ( - {"depth-estimation": GLPNForDepthEstimation, "feature-extraction": GLPNModel} if is_torch_available() else {} + {"depth-estimation": GLPNForDepthEstimation, "image-feature-extraction": GLPNModel} + if is_torch_available() + else {} ) test_head_masking = False diff --git a/tests/models/groupvit/test_modeling_groupvit.py b/tests/models/groupvit/test_modeling_groupvit.py index 3d7f50ae6eb62f..5aead0c3141fcc 100644 --- a/tests/models/groupvit/test_modeling_groupvit.py +++ b/tests/models/groupvit/test_modeling_groupvit.py @@ -548,7 +548,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GroupViTModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": GroupViTModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": GroupViTModel, "image-feature-extraction": GroupViTVisionModel} + if is_torch_available() + else {} + ) test_head_masking = False test_pruning = False test_resize_embeddings = False diff --git a/tests/models/imagegpt/test_modeling_imagegpt.py b/tests/models/imagegpt/test_modeling_imagegpt.py index ad8c8d290e6715..40ea7ce0f4f559 100644 --- a/tests/models/imagegpt/test_modeling_imagegpt.py +++ b/tests/models/imagegpt/test_modeling_imagegpt.py @@ -271,7 +271,7 @@ class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterM ) all_generative_model_classes = (ImageGPTForCausalImageModeling,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} + {"image-feature-extraction": ImageGPTModel, "image-classification": ImageGPTForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/levit/test_modeling_levit.py b/tests/models/levit/test_modeling_levit.py index d569b2b5385235..b6d9832704a521 100644 --- a/tests/models/levit/test_modeling_levit.py +++ b/tests/models/levit/test_modeling_levit.py @@ -176,7 +176,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) pipeline_model_mapping = ( { - "feature-extraction": LevitModel, + "image-feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py index fd9a513ab03263..d4167cfffe644c 100644 --- a/tests/models/mask2former/test_modeling_mask2former.py +++ b/tests/models/mask2former/test_modeling_mask2former.py @@ -197,7 +197,7 @@ def comm_check_on_output(result): @require_torch class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": Mask2FormerModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": Mask2FormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py index 16ff3caed47504..d376216040591e 100644 --- a/tests/models/maskformer/test_modeling_maskformer.py +++ b/tests/models/maskformer/test_modeling_maskformer.py @@ -197,7 +197,7 @@ def comm_check_on_output(result): class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} + {"image-feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) diff --git a/tests/models/mgp_str/test_modeling_mgp_str.py b/tests/models/mgp_str/test_modeling_mgp_str.py index a7fd95a1311c5c..b2c3cb1400e49d 100644 --- a/tests/models/mgp_str/test_modeling_mgp_str.py +++ b/tests/models/mgp_str/test_modeling_mgp_str.py @@ -31,7 +31,7 @@ import torch from torch import nn - from transformers import MgpstrForSceneTextRecognition + from transformers import MgpstrForSceneTextRecognition, MgpstrModel if is_vision_available(): @@ -118,7 +118,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class MgpstrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MgpstrForSceneTextRecognition,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": MgpstrForSceneTextRecognition} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": MgpstrForSceneTextRecognition, "image-feature-extraction": MgpstrModel} + if is_torch_available() + else {} + ) fx_compatible = False test_pruning = False diff --git a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py index 35848da3161d51..6262475b8d0c71 100644 --- a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py @@ -147,7 +147,7 @@ class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC all_model_classes = (MobileNetV1Model, MobileNetV1ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} + {"image-feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py index bbd83408853ceb..75580bfdf2b232 100644 --- a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py @@ -195,7 +195,7 @@ class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC ) pipeline_model_mapping = ( { - "feature-extraction": MobileNetV2Model, + "image-feature-extraction": MobileNetV2Model, "image-classification": MobileNetV2ForImageClassification, "image-segmentation": MobileNetV2ForSemanticSegmentation, } diff --git a/tests/models/mobilevit/test_modeling_mobilevit.py b/tests/models/mobilevit/test_modeling_mobilevit.py index 563bee802322d0..fc2ea5eba38321 100644 --- a/tests/models/mobilevit/test_modeling_mobilevit.py +++ b/tests/models/mobilevit/test_modeling_mobilevit.py @@ -188,7 +188,7 @@ class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ) pipeline_model_mapping = ( { - "feature-extraction": MobileViTModel, + "image-feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } diff --git a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py index 192cf3a9e1e896..1fb6be94a2400c 100644 --- a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py +++ b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py @@ -190,7 +190,7 @@ class MobileViTV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC pipeline_model_mapping = ( { - "feature-extraction": MobileViTV2Model, + "image-feature-extraction": MobileViTV2Model, "image-classification": MobileViTV2ForImageClassification, "image-segmentation": MobileViTV2ForSemanticSegmentation, } diff --git a/tests/models/nat/test_modeling_nat.py b/tests/models/nat/test_modeling_nat.py index 3ab49d2d9557fa..cbdbfc83c5e0ad 100644 --- a/tests/models/nat/test_modeling_nat.py +++ b/tests/models/nat/test_modeling_nat.py @@ -204,7 +204,7 @@ class NatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": NatModel, "image-classification": NatForImageClassification} + {"image-feature-extraction": NatModel, "image-classification": NatForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index 8dbf3fcde89bbc..ddc634421f2cba 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -433,7 +433,11 @@ def prepare_config_and_inputs_for_common(self): class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Owlv2Model,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": Owlv2Model, "zero-shot-object-detection": Owlv2ForObjectDetection} + { + "feature-extraction": Owlv2Model, + "zero-shot-object-detection": Owlv2ForObjectDetection, + "image-feature-extraction": Owlv2VisionModel, + } if is_torch_available() else {} ) diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index 8edbf411f7b94e..2c8577cad44bfa 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -428,7 +428,11 @@ def prepare_config_and_inputs_for_common(self): class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OwlViTModel,) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": OwlViTModel, "zero-shot-object-detection": OwlViTForObjectDetection} + { + "feature-extraction": OwlViTModel, + "zero-shot-object-detection": OwlViTForObjectDetection, + "image-feature-extraction": OwlViTVisionModel, + } if is_torch_available() else {} ) diff --git a/tests/models/poolformer/test_modeling_poolformer.py b/tests/models/poolformer/test_modeling_poolformer.py index 070564e718bf1e..e387053f110ada 100644 --- a/tests/models/poolformer/test_modeling_poolformer.py +++ b/tests/models/poolformer/test_modeling_poolformer.py @@ -124,7 +124,7 @@ def prepare_config_and_inputs_for_common(self): class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PoolFormerModel, PoolFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": PoolFormerModel, "image-classification": PoolFormerForImageClassification} + {"image-feature-extraction": PoolFormerModel, "image-classification": PoolFormerForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/pvt/test_modeling_pvt.py b/tests/models/pvt/test_modeling_pvt.py index e174b67a07887c..d17041ecfaa55f 100644 --- a/tests/models/pvt/test_modeling_pvt.py +++ b/tests/models/pvt/test_modeling_pvt.py @@ -158,7 +158,7 @@ def prepare_img(): class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PvtModel, PvtForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": PvtModel, "image-classification": PvtForImageClassification} + {"image-feature-extraction": PvtModel, "image-classification": PvtForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/regnet/test_modeling_regnet.py b/tests/models/regnet/test_modeling_regnet.py index 9840575f317ecd..420609bf0300f0 100644 --- a/tests/models/regnet/test_modeling_regnet.py +++ b/tests/models/regnet/test_modeling_regnet.py @@ -126,7 +126,7 @@ class RegNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} + {"image-feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/resnet/test_modeling_resnet.py b/tests/models/resnet/test_modeling_resnet.py index bae9eb6d24c8cb..543013bc41b063 100644 --- a/tests/models/resnet/test_modeling_resnet.py +++ b/tests/models/resnet/test_modeling_resnet.py @@ -170,7 +170,7 @@ class ResNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} + {"image-feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/segformer/test_modeling_segformer.py b/tests/models/segformer/test_modeling_segformer.py index d9a4dce9ffeb3c..8cb7cbad42f2d0 100644 --- a/tests/models/segformer/test_modeling_segformer.py +++ b/tests/models/segformer/test_modeling_segformer.py @@ -171,7 +171,7 @@ class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ) pipeline_model_mapping = ( { - "feature-extraction": SegformerModel, + "image-feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } diff --git a/tests/models/swiftformer/test_modeling_swiftformer.py b/tests/models/swiftformer/test_modeling_swiftformer.py index 83b6aa3510d925..a1e6229d5a6e81 100644 --- a/tests/models/swiftformer/test_modeling_swiftformer.py +++ b/tests/models/swiftformer/test_modeling_swiftformer.py @@ -139,7 +139,7 @@ class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC all_model_classes = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} + {"image-feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/swin/test_modeling_swin.py b/tests/models/swin/test_modeling_swin.py index e82c13f8db2744..cd0b99fdc986a2 100644 --- a/tests/models/swin/test_modeling_swin.py +++ b/tests/models/swin/test_modeling_swin.py @@ -232,7 +232,7 @@ class SwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": SwinModel, "image-classification": SwinForImageClassification} + {"image-feature-extraction": SwinModel, "image-classification": SwinForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py index 581e8debc7e7d7..556b65a249a22f 100644 --- a/tests/models/swin2sr/test_modeling_swin2sr.py +++ b/tests/models/swin2sr/test_modeling_swin2sr.py @@ -162,7 +162,7 @@ def prepare_config_and_inputs_for_common(self): class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} + {"image-feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} if is_torch_available() else {} ) diff --git a/tests/models/swinv2/test_modeling_swinv2.py b/tests/models/swinv2/test_modeling_swinv2.py index ebe05a9a71b4b5..73f731cd60abbb 100644 --- a/tests/models/swinv2/test_modeling_swinv2.py +++ b/tests/models/swinv2/test_modeling_swinv2.py @@ -217,7 +217,7 @@ class Swinv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": Swinv2Model, "image-classification": Swinv2ForImageClassification} + {"image-feature-extraction": Swinv2Model, "image-classification": Swinv2ForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/table_transformer/test_modeling_table_transformer.py b/tests/models/table_transformer/test_modeling_table_transformer.py index bb869d9422bb5a..eb5e80c93886b9 100644 --- a/tests/models/table_transformer/test_modeling_table_transformer.py +++ b/tests/models/table_transformer/test_modeling_table_transformer.py @@ -200,7 +200,7 @@ class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipelin else () ) pipeline_model_mapping = ( - {"feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection} + {"image-feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection} if is_torch_available() else {} ) diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index 853701e3a8ea78..e17d6ce61b302f 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -228,7 +228,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} + {"image-feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} if is_torch_available() else {} ) diff --git a/tests/models/vit/test_modeling_vit.py b/tests/models/vit/test_modeling_vit.py index 2e9a632a3719d4..c8181d2c2b5a2e 100644 --- a/tests/models/vit/test_modeling_vit.py +++ b/tests/models/vit/test_modeling_vit.py @@ -193,7 +193,7 @@ class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else () ) pipeline_model_mapping = ( - {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} + {"image-feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/vit_hybrid/test_modeling_vit_hybrid.py b/tests/models/vit_hybrid/test_modeling_vit_hybrid.py index 567394c97942f4..2a8b5087f3966b 100644 --- a/tests/models/vit_hybrid/test_modeling_vit_hybrid.py +++ b/tests/models/vit_hybrid/test_modeling_vit_hybrid.py @@ -156,7 +156,7 @@ class ViTHybridModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas all_model_classes = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} + {"image-feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/vit_mae/test_modeling_vit_mae.py b/tests/models/vit_mae/test_modeling_vit_mae.py index 21a66b8a6d92a2..c1afc9694df561 100644 --- a/tests/models/vit_mae/test_modeling_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_vit_mae.py @@ -164,7 +164,7 @@ class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} + pipeline_model_mapping = {"image-feature-extraction": ViTMAEModel} if is_torch_available() else {} test_pruning = False test_torchscript = False diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py index 96e107e7950ecc..a4cc370ec21c7a 100644 --- a/tests/models/vit_msn/test_modeling_vit_msn.py +++ b/tests/models/vit_msn/test_modeling_vit_msn.py @@ -152,7 +152,7 @@ class ViTMSNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} + {"image-feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py index db28b41c0b39a3..6b43718e44f4ae 100644 --- a/tests/models/x_clip/test_modeling_x_clip.py +++ b/tests/models/x_clip/test_modeling_x_clip.py @@ -533,7 +533,11 @@ def prepare_config_and_inputs_for_common(self): @require_torch class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XCLIPModel,) if is_torch_available() else () - pipeline_model_mapping = {"feature-extraction": XCLIPModel} if is_torch_available() else {} + pipeline_model_mapping = ( + {"feature-extraction": XCLIPModel, "image-feature-extraction": XCLIPVisionModel} + if is_torch_available() + else {} + ) fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/yolos/test_modeling_yolos.py b/tests/models/yolos/test_modeling_yolos.py index 390a54ebc99c3a..4b2aff30948767 100644 --- a/tests/models/yolos/test_modeling_yolos.py +++ b/tests/models/yolos/test_modeling_yolos.py @@ -168,7 +168,9 @@ class YolosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (YolosModel, YolosForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} + {"image-feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} + if is_torch_available() + else {} ) test_pruning = False From ba549d570cbf7eac0f6ae9055e4676a729fd3492 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 8 Jan 2024 17:08:46 +0000 Subject: [PATCH 06/25] Update docstring --- src/transformers/pipelines/image_feature_extraction.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index cbf27f9a2c6503..55e5800cba27e8 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -8,7 +8,6 @@ from ..image_utils import load_image -# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output` class ImageFeatureExtractionPipeline(Pipeline): """ Image feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base @@ -27,10 +26,10 @@ class ImageFeatureExtractionPipeline(Pipeline): Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) - This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: - `"feature-extraction"`. + This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier: + `"image-feature-extraction"`. - All models may be used for this pipeline. See a list of all models, including community-contributed models on + All vision models may be used for this pipeline. See a list of all models, including community-contributed models on [huggingface.co/models](https://huggingface.co/models). Arguments: From 1468c54544fef36fd7f6a09bef2b6a646d0c2808 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 8 Jan 2024 17:38:48 +0000 Subject: [PATCH 07/25] Update tests --- .../pipelines/image_feature_extraction.py | 4 ++-- ...test_pipelines_image_feature_extraction.py | 22 ++++++++++++++----- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 55e5800cba27e8..48674c8590066e 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -61,8 +61,8 @@ class ImageFeatureExtractionPipeline(Pipeline): """ def _sanitize_parameters(self, return_tensors=None, **kwargs): - preprocess_params = {} - postprocess_params = {} + preprocess_params = kwargs.pop("preprocess_kwargs", {}) + postprocess_params = kwargs.pop("postprocess_kwargs", {}) if return_tensors is not None: postprocess_params["return_tensors"] = return_tensors diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index 1379ddbde60ce0..95f2a20a5d8567 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -15,6 +15,7 @@ import unittest import numpy as np +import pytest from PIL import Image from transformers import ( @@ -74,17 +75,22 @@ def test_image_processing_small_model_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) - # test with empty parameters img = prepare_img() outputs = feature_extractor(img) self.assertEqual( nested_simplify(outputs[0][0]), [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip - # test with various tokenizer parameters + # test with image processor parameters preprocess_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() - outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + with pytest.raises(ValueError): + # Image doesn't match model input size + feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + + preprocess_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} + img = prepare_img() + feature_extractor(img, preprocess_kwargs=preprocess_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) @require_tf @@ -99,10 +105,16 @@ def test_image_processing_small_model_tf(self): nested_simplify(outputs[0][0]), [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip - # test with various tokenizer parameters + # test with image processor parameters preprocess_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() - outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + with pytest.raises(ValueError): + # Image doesn't match model input size + feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + + preprocess_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} + img = prepare_img() + feature_extractor(img, preprocess_kwargs=preprocess_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) @require_torch From 102422b813923afbedccb534f02516e25ee6e51f Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 10 Jan 2024 13:25:56 +0000 Subject: [PATCH 08/25] Update src/transformers/pipelines/image_feature_extraction.py Co-authored-by: Omar Sanseviero --- src/transformers/pipelines/image_feature_extraction.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 48674c8590066e..1a7bb70bad5611 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -64,6 +64,8 @@ def _sanitize_parameters(self, return_tensors=None, **kwargs): preprocess_params = kwargs.pop("preprocess_kwargs", {}) postprocess_params = kwargs.pop("postprocess_kwargs", {}) + if "timeout" in kwargs: + preprocess_params["timeout"] = kwargs["timeout"] if return_tensors is not None: postprocess_params["return_tensors"] = return_tensors From 466ce4e50779bdad634cd045b2ff33200afd0d2e Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 10 Jan 2024 16:13:09 +0000 Subject: [PATCH 09/25] Fix docstrings - review comments --- .../pipelines/image_feature_extraction.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 1a7bb70bad5611..3ad2b2e9af2b6e 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -48,8 +48,6 @@ class ImageFeatureExtractionPipeline(Pipeline): If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. - task (`str`, defaults to `""`, *optional*, defaults to `""`): - A task-identifier for the pipeline. args_parser ([`~pipelines.ArgumentHandler`], *optional*): Reference to the object in charge of parsing supplied pipeline parameters. device (`int`, *optional*, defaults to -1): @@ -94,8 +92,19 @@ def __call__(self, *args, **kwargs): Extract the features of the input(s). Args: - args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of. - + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images, which must then be passed as a string. + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL + images. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and + the call may block forever. Return: A nested list of `float`: The features computed by the model. """ From 1923eb16ff3619e979aa1ade8a6ce39b36f67af9 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 11 Jan 2024 13:10:13 +0000 Subject: [PATCH 10/25] Remove pipeline mapping for composite vision models --- tests/models/align/test_modeling_align.py | 6 +----- tests/models/blip/test_modeling_blip.py | 1 - tests/models/blip_2/test_modeling_blip_2.py | 1 - tests/models/chinese_clip/test_modeling_chinese_clip.py | 6 +----- tests/models/clipseg/test_modeling_clipseg.py | 6 +----- tests/models/git/test_modeling_git.py | 1 - tests/models/groupvit/test_modeling_groupvit.py | 6 +----- tests/models/owlv2/test_modeling_owlv2.py | 1 - tests/models/owlvit/test_modeling_owlvit.py | 1 - tests/models/vilt/test_modeling_vilt.py | 6 +----- tests/models/x_clip/test_modeling_x_clip.py | 6 +----- tests/pipelines/test_pipelines_image_feature_extraction.py | 2 +- 12 files changed, 7 insertions(+), 36 deletions(-) diff --git a/tests/models/align/test_modeling_align.py b/tests/models/align/test_modeling_align.py index fb6ac35a0d3f51..99daeb816d2ddb 100644 --- a/tests/models/align/test_modeling_align.py +++ b/tests/models/align/test_modeling_align.py @@ -447,11 +447,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class AlignModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AlignModel,) if is_torch_available() else () - pipeline_model_mapping = ( - {"feature-extraction": AlignModel, "image-feature-extraction": AlignVisionModel} - if is_torch_available() - else {} - ) + pipeline_model_mapping = {"feature-extraction": AlignModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py index 65fad78c14c5e3..54512596b01c96 100644 --- a/tests/models/blip/test_modeling_blip.py +++ b/tests/models/blip/test_modeling_blip.py @@ -431,7 +431,6 @@ class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_model_mapping = ( { "feature-extraction": BlipModel, - "image-feature-extraction": BlipVisionModel, "image-to-text": BlipForConditionalGeneration, } if is_torch_available() diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index 91bbbe16f2b9f3..dd87961372d262 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -686,7 +686,6 @@ class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_model_mapping = ( { "feature-extraction": Blip2Model, - "image-feature-extraction": Blip2VisionModel, "image-to-text": Blip2ForConditionalGeneration, "visual-question-answering": Blip2ForConditionalGeneration, } diff --git a/tests/models/chinese_clip/test_modeling_chinese_clip.py b/tests/models/chinese_clip/test_modeling_chinese_clip.py index ba376c1e480121..8d0eb131e2385b 100644 --- a/tests/models/chinese_clip/test_modeling_chinese_clip.py +++ b/tests/models/chinese_clip/test_modeling_chinese_clip.py @@ -560,11 +560,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPModel,) if is_torch_available() else () - pipeline_model_mapping = ( - {"feature-extraction": ChineseCLIPModel, "image-feature-extraction": ChineseCLIPVisionModel} - if is_torch_available() - else {} - ) + pipeline_model_mapping = {"feature-extraction": ChineseCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/clipseg/test_modeling_clipseg.py b/tests/models/clipseg/test_modeling_clipseg.py index 69e1a74a55b6d8..0ebf08da89f9a5 100644 --- a/tests/models/clipseg/test_modeling_clipseg.py +++ b/tests/models/clipseg/test_modeling_clipseg.py @@ -449,11 +449,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else () - pipeline_model_mapping = ( - {"feature-extraction": CLIPSegModel, "image-feature-extraction": CLIPSegVisionModel} - if is_torch_available() - else {} - ) + pipeline_model_mapping = {"feature-extraction": CLIPSegModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py index 7df3a010b890cf..7027290411ad57 100644 --- a/tests/models/git/test_modeling_git.py +++ b/tests/models/git/test_modeling_git.py @@ -403,7 +403,6 @@ class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, "feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM, - "image-feature-extraction": GitVisionModel, } if is_torch_available() else {} diff --git a/tests/models/groupvit/test_modeling_groupvit.py b/tests/models/groupvit/test_modeling_groupvit.py index 5aead0c3141fcc..3d7f50ae6eb62f 100644 --- a/tests/models/groupvit/test_modeling_groupvit.py +++ b/tests/models/groupvit/test_modeling_groupvit.py @@ -548,11 +548,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GroupViTModel,) if is_torch_available() else () - pipeline_model_mapping = ( - {"feature-extraction": GroupViTModel, "image-feature-extraction": GroupViTVisionModel} - if is_torch_available() - else {} - ) + pipeline_model_mapping = {"feature-extraction": GroupViTModel} if is_torch_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index ddc634421f2cba..3dbcab2c934eaa 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -436,7 +436,6 @@ class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): { "feature-extraction": Owlv2Model, "zero-shot-object-detection": Owlv2ForObjectDetection, - "image-feature-extraction": Owlv2VisionModel, } if is_torch_available() else {} diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index 2c8577cad44bfa..e99eb736e8255d 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -431,7 +431,6 @@ class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): { "feature-extraction": OwlViTModel, "zero-shot-object-detection": OwlViTForObjectDetection, - "image-feature-extraction": OwlViTVisionModel, } if is_torch_available() else {} diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index e17d6ce61b302f..fd93d9ab5821ac 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -227,11 +227,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else () ) - pipeline_model_mapping = ( - {"image-feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} - if is_torch_available() - else {} - ) + pipeline_model_mapping = {"visual-question-answering": ViltForQuestionAnswering} if is_torch_available() else {} test_pruning = False test_headmasking = False test_torchscript = False diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py index 6b43718e44f4ae..db28b41c0b39a3 100644 --- a/tests/models/x_clip/test_modeling_x_clip.py +++ b/tests/models/x_clip/test_modeling_x_clip.py @@ -533,11 +533,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XCLIPModel,) if is_torch_available() else () - pipeline_model_mapping = ( - {"feature-extraction": XCLIPModel, "image-feature-extraction": XCLIPVisionModel} - if is_torch_available() - else {} - ) + pipeline_model_mapping = {"feature-extraction": XCLIPModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index 95f2a20a5d8567..fbbc356eefd85d 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -44,7 +44,7 @@ def prepare_img(): @is_pipeline_test -class FeatureExtractionPipelineTests(unittest.TestCase): +class ImageFeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING From 9e598c9d30c40a5afa6cb5e8680a517090dd26e1 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 11 Jan 2024 13:43:19 +0000 Subject: [PATCH 11/25] Add to pipeline tests --- .../test_pipelines_image_feature_extraction.py | 17 ++++++----------- tests/test_pipeline_mixin.py | 7 +++++++ 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index fbbc356eefd85d..f5efefaaa3da12 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -151,8 +151,8 @@ def get_shape(self, input_, shape=None): raise ValueError("We expect lists of floats, nothing else") return shape - def get_test_pipeline(self, model, image_processor): - if image_processor is None: + def get_test_pipeline(self, model, tokenizer, processor): + if processor is None: self.skipTest("No image processor") elif type(model.config) in TOKENIZER_MAPPING: @@ -167,22 +167,17 @@ def get_test_pipeline(self, model, image_processor): """ ) - feature_extractor = ImageFeatureExtractionPipeline(model=model, image_processor=image_processor) + feature_extractor = ImageFeatureExtractionPipeline(model=model, image_processor=processor) img = prepare_img() return feature_extractor, [img, img] def run_pipeline_test(self, feature_extractor, examples): - img = prepare_img() - outputs = feature_extractor(img) + imgs = examples + outputs = feature_extractor(imgs[0]) shape = self.get_shape(outputs) self.assertEqual(shape[0], 1) - img = prepare_img() - outputs = feature_extractor([img, img]) + outputs = feature_extractor(imgs) shape = self.get_shape(outputs) self.assertEqual(shape[0], 2) - - outputs = feature_extractor(img.reshape(1000, 200), truncation=True) - shape = self.get_shape(outputs) - self.assertEqual(shape[0], 1) diff --git a/tests/test_pipeline_mixin.py b/tests/test_pipeline_mixin.py index bd4b9eb39343a2..c5446fc8628c09 100644 --- a/tests/test_pipeline_mixin.py +++ b/tests/test_pipeline_mixin.py @@ -39,6 +39,7 @@ from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests +from .pipelines.test_pipelines_image_feature_extraction import ImageFeatureExtractionPipelineTests from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests @@ -70,6 +71,7 @@ "feature-extraction": {"test": FeatureExtractionPipelineTests}, "fill-mask": {"test": FillMaskPipelineTests}, "image-classification": {"test": ImageClassificationPipelineTests}, + "image-feature-extraction": {"test": ImageFeatureExtractionPipelineTests}, "image-segmentation": {"test": ImageSegmentationPipelineTests}, "image-to-image": {"test": ImageToImagePipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, @@ -374,6 +376,11 @@ def test_pipeline_image_segmentation(self): def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") + @is_pipeline_test + @require_vision + def test_pipeline_image_feature_extraction(self): + self.run_task_tests(task="image-feature-extraction") + @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision From f990de98e328316815194b17a656965ff4d0e3f9 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 11 Jan 2024 16:47:26 +0000 Subject: [PATCH 12/25] Remove for flava (multimodal) --- tests/models/flava/test_modeling_flava.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/flava/test_modeling_flava.py b/tests/models/flava/test_modeling_flava.py index a7ea933a97817e..48a070d9fe3137 100644 --- a/tests/models/flava/test_modeling_flava.py +++ b/tests/models/flava/test_modeling_flava.py @@ -921,7 +921,7 @@ def _test_model(self, config, inputs, test_image=False, test_text=False): @require_torch class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FlavaModel,) if is_torch_available() else () - pipeline_model_mapping = {"image-feature-extraction": FlavaModel} if is_torch_available() else {} + pipeline_model_mapping = {"feature-extraction": FlavaModel} if is_torch_available() else {} class_for_tester = FlavaModelTester test_head_masking = False test_pruning = False From 999e1e16dc4083e32c4b5d188a7779c934bec674 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 11 Jan 2024 17:04:46 +0000 Subject: [PATCH 13/25] safe pil import --- tests/pipelines/test_pipelines_image_feature_extraction.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index f5efefaaa3da12..e8a5d346a9bbd4 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -16,7 +16,6 @@ import numpy as np import pytest -from PIL import Image from transformers import ( MODEL_MAPPING, @@ -25,6 +24,7 @@ ImageFeatureExtractionPipeline, is_tf_available, is_torch_available, + is_vision_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch @@ -36,6 +36,9 @@ if is_tf_available(): import tensorflow as tf +if is_vision_available(): + from PIL import Image + # We will verify our results on an image of cute cats def prepare_img(): From 3f85b3799f645fe89e86090a257864332c9ec111 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 12 Jan 2024 11:58:29 +0000 Subject: [PATCH 14/25] Add requirements for pipeline run --- tests/test_pipeline_mixin.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_pipeline_mixin.py b/tests/test_pipeline_mixin.py index c5446fc8628c09..dbd783e9dc1a9e 100644 --- a/tests/test_pipeline_mixin.py +++ b/tests/test_pipeline_mixin.py @@ -377,7 +377,9 @@ def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") @is_pipeline_test + @require_timm @require_vision + @require_torch def test_pipeline_image_feature_extraction(self): self.run_task_tests(task="image-feature-extraction") From 960b74a45981dcef171d45a2830839f4f0dafe59 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 12 Jan 2024 13:30:59 +0000 Subject: [PATCH 15/25] Account for super slow efficientnet --- tests/models/efficientnet/test_modeling_efficientnet.py | 6 ++++++ tests/pipelines/test_pipelines_image_feature_extraction.py | 6 ++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py index bd16566daed5f5..19d66aca95ae2b 100644 --- a/tests/models/efficientnet/test_modeling_efficientnet.py +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -216,6 +216,12 @@ def test_model_from_pretrained(self): model = EfficientNetModel.from_pretrained(model_name) self.assertIsNotNone(model) + @is_pipeline_test + @require_vision + @slow + def test_pipeline_image_feature_extraction(self): + super().test_pipeline_image_feature_extraction() + @is_pipeline_test @require_vision @slow diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index e8a5d346a9bbd4..eca1a38ca4cd8d 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -178,9 +178,7 @@ def run_pipeline_test(self, feature_extractor, examples): imgs = examples outputs = feature_extractor(imgs[0]) - shape = self.get_shape(outputs) - self.assertEqual(shape[0], 1) + self.assertEqual(len(outputs), 1) outputs = feature_extractor(imgs) - shape = self.get_shape(outputs) - self.assertEqual(shape[0], 2) + self.assertEqual(len(outputs), 2) From e4098cb777bd8955093405c0aaa0ca80abd0ea38 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 24 Jan 2024 21:09:58 +0000 Subject: [PATCH 16/25] Review comments --- .../pipelines/feature_extraction.py | 2 +- .../pipelines/image_feature_extraction.py | 14 ++++----- ...test_pipelines_image_feature_extraction.py | 31 ++----------------- 3 files changed, 9 insertions(+), 38 deletions(-) diff --git a/src/transformers/pipelines/feature_extraction.py b/src/transformers/pipelines/feature_extraction.py index d704345db03df9..118baeccd0d6a2 100644 --- a/src/transformers/pipelines/feature_extraction.py +++ b/src/transformers/pipelines/feature_extraction.py @@ -14,7 +14,7 @@ ) class FeatureExtractionPipeline(Pipeline): """ - Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base + Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks. Example: diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 3ad2b2e9af2b6e..6dfc0747cecdf0 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -10,7 +10,7 @@ class ImageFeatureExtractionPipeline(Pipeline): """ - Image feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base + Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base transformer, which can be used as features in downstream tasks. Example: @@ -20,7 +20,7 @@ class ImageFeatureExtractionPipeline(Pipeline): >>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction") >>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True) - >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input string. + >>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image. torch.Size([1, 197, 768]) ``` @@ -59,19 +59,17 @@ class ImageFeatureExtractionPipeline(Pipeline): """ def _sanitize_parameters(self, return_tensors=None, **kwargs): - preprocess_params = kwargs.pop("preprocess_kwargs", {}) - postprocess_params = kwargs.pop("postprocess_kwargs", {}) + preprocess_params = kwargs.pop("image_processor_kwargs", {}) + postprocess_params = {"return_tensors": return_tensors} if return_tensors is not None else {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] - if return_tensors is not None: - postprocess_params["return_tensors"] = return_tensors return preprocess_params, {}, postprocess_params - def preprocess(self, image, timeout=None, **preprocess_kwargs) -> Dict[str, GenericTensor]: + def preprocess(self, image, timeout=None, **image_processor_kwargs) -> Dict[str, GenericTensor]: image = load_image(image, timeout=timeout) - model_inputs = self.image_processor(image, return_tensors=self.framework, **preprocess_kwargs) + model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs) return model_inputs def _forward(self, model_inputs): diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index eca1a38ca4cd8d..cbdb412adff9dd 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -78,11 +78,6 @@ def test_image_processing_small_model_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) - img = prepare_img() - outputs = feature_extractor(img) - self.assertEqual( - nested_simplify(outputs[0][0]), - [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip # test with image processor parameters preprocess_kwargs = {"size": {"height": 300, "width": 300}} @@ -93,7 +88,7 @@ def test_image_processing_small_model_pt(self): preprocess_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() - feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) @require_tf @@ -101,12 +96,6 @@ def test_image_processing_small_model_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" ) - # test with empty parameters - img = prepare_img() - outputs = feature_extractor(img) - self.assertEqual( - nested_simplify(outputs[0][0]), - [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip # test with image processor parameters preprocess_kwargs = {"size": {"height": 300, "width": 300}} @@ -117,7 +106,7 @@ def test_image_processing_small_model_tf(self): preprocess_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() - feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) @require_torch @@ -138,22 +127,6 @@ def test_return_tensors_tf(self): outputs = feature_extractor(img, return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) - def get_shape(self, input_, shape=None): - if shape is None: - shape = [] - if isinstance(input_, list): - subshapes = [self.get_shape(in_, shape) for in_ in input_] - if all(s == 0 for s in subshapes): - shape.append(len(input_)) - else: - subshape = subshapes[0] - shape = [len(input_), *subshape] - elif isinstance(input_, float): - return 0 - else: - raise ValueError("We expect lists of floats, nothing else") - return shape - def get_test_pipeline(self, model, tokenizer, processor): if processor is None: self.skipTest("No image processor") From 50c0f631dfd0d2bc31a1be469150941437bcc70d Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 24 Jan 2024 21:40:52 +0000 Subject: [PATCH 17/25] Fix tests --- .../pipelines/image_feature_extraction.py | 4 ++-- .../test_pipelines_image_feature_extraction.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 6dfc0747cecdf0..372b7b9d1f1207 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -58,8 +58,8 @@ class ImageFeatureExtractionPipeline(Pipeline): (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). """ - def _sanitize_parameters(self, return_tensors=None, **kwargs): - preprocess_params = kwargs.pop("image_processor_kwargs", {}) + def _sanitize_parameters(self, return_tensors=None, image_processor_kwargs=None, **kwargs): + preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs postprocess_params = {"return_tensors": return_tensors} if return_tensors is not None else {} if "timeout" in kwargs: diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index cbdb412adff9dd..21f153e2ca1dfb 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -80,15 +80,15 @@ def test_image_processing_small_model_pt(self): ) # test with image processor parameters - preprocess_kwargs = {"size": {"height": 300, "width": 300}} + image_processor_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() with pytest.raises(ValueError): # Image doesn't match model input size - feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + feature_extractor(img, image_processor_kwargs=image_processor_kwargs) - preprocess_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} + image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() - outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) @require_tf @@ -98,15 +98,15 @@ def test_image_processing_small_model_tf(self): ) # test with image processor parameters - preprocess_kwargs = {"size": {"height": 300, "width": 300}} + image_processor_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() with pytest.raises(ValueError): # Image doesn't match model input size - feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + feature_extractor(img, image_processor_kwargs=image_processor_kwargs) - preprocess_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} + image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() - outputs = feature_extractor(img, preprocess_kwargs=preprocess_kwargs) + outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) @require_torch From 42853ce1ad6676600507dff290f797f11fe95252 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 25 Jan 2024 19:16:56 +0000 Subject: [PATCH 18/25] Swap order of kwargs --- src/transformers/pipelines/image_feature_extraction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index 372b7b9d1f1207..a9ee8c14c560aa 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -58,7 +58,7 @@ class ImageFeatureExtractionPipeline(Pipeline): (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). """ - def _sanitize_parameters(self, return_tensors=None, image_processor_kwargs=None, **kwargs): + def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, **kwargs): preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs postprocess_params = {"return_tensors": return_tensors} if return_tensors is not None else {} From 4b059f03cc40191b504ec3c9a4df281f3e8a1b7f Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 31 Jan 2024 19:56:10 +0000 Subject: [PATCH 19/25] Use build_pipeline_init_args --- .../pipelines/image_feature_extraction.py | 30 ++----------------- tests/models/git/test_modeling_git.py | 6 +--- 2 files changed, 4 insertions(+), 32 deletions(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index a9ee8c14c560aa..d7003693128ffd 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -1,13 +1,14 @@ from typing import Dict -from ..utils import is_vision_available -from .base import GenericTensor, Pipeline +from ..utils import add_end_docstrings, is_vision_available +from .base import GenericTensor, Pipeline, build_pipeline_init_args if is_vision_available(): from ..image_utils import load_image +@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class ImageFeatureExtractionPipeline(Pipeline): """ Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base @@ -31,31 +32,6 @@ class ImageFeatureExtractionPipeline(Pipeline): All vision models may be used for this pipeline. See a list of all models, including community-contributed models on [huggingface.co/models](https://huggingface.co/models). - - Arguments: - model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): - The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from - [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. - image_processor ([`PreTrainedImageProcessor`], *optional*): - The image processor that will be used by the pipeline to encode data for the model. This object inherits from - [`PreTrainedImageProcessor`]. - modelcard (`str` or [`ModelCard`], *optional*): - Model card attributed to the model for this pipeline. - framework (`str`, *optional*): - The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be - installed. - - If no framework is specified, will default to the one currently installed. If no framework is specified and - both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is - provided. - args_parser ([`~pipelines.ArgumentHandler`], *optional*): - Reference to the object in charge of parsing supplied pipeline parameters. - device (`int`, *optional*, defaults to -1): - Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on - the associated CUDA device id. - torch_dtype (`str` or `torch.dtype`, *optional*): - Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model - (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). """ def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, **kwargs): diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py index 7027290411ad57..c503abfb89db1a 100644 --- a/tests/models/git/test_modeling_git.py +++ b/tests/models/git/test_modeling_git.py @@ -399,11 +399,7 @@ class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else () all_generative_model_classes = (GitForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( - { - "feature-extraction": GitModel, - "image-to-text": GitForCausalLM, - "text-generation": GitForCausalLM, - } + {"feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM} if is_torch_available() else {} ) From 2c5c2fb2b8478243f32ae1ecd4a5f30dda6dab5f Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 31 Jan 2024 20:02:18 +0000 Subject: [PATCH 20/25] Add back FE pipeline for Vilt --- tests/models/vilt/test_modeling_vilt.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index fd93d9ab5821ac..e17d6ce61b302f 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -227,7 +227,11 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else () ) - pipeline_model_mapping = {"visual-question-answering": ViltForQuestionAnswering} if is_torch_available() else {} + pipeline_model_mapping = ( + {"image-feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} + if is_torch_available() + else {} + ) test_pruning = False test_headmasking = False test_torchscript = False From 95f9c585b27226d4e90d523f8e95227892b63740 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 31 Jan 2024 20:17:38 +0000 Subject: [PATCH 21/25] Include image_processor_kwargs in docstring --- src/transformers/pipelines/image_feature_extraction.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/image_feature_extraction.py b/src/transformers/pipelines/image_feature_extraction.py index d7003693128ffd..ccfe7c40d7e76d 100644 --- a/src/transformers/pipelines/image_feature_extraction.py +++ b/src/transformers/pipelines/image_feature_extraction.py @@ -8,7 +8,14 @@ from ..image_utils import load_image -@add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) +@add_end_docstrings( + build_pipeline_init_args(has_image_processor=True), + """ + image_processor_kwargs (`dict`, *optional*): + Additional dictionary of keyword arguments passed along to the image processor e.g. + {"size": {"height": 100, "width": 100}} + """, +) class ImageFeatureExtractionPipeline(Pipeline): """ Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base From 94945ec27a4779482f3c26b7e36325823936300c Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 1 Feb 2024 18:24:18 +0000 Subject: [PATCH 22/25] Mark test as flaky --- .../test_modeling_vision_encoder_decoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index 7c3925b30293ba..a3ee223b38fd22 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -324,7 +324,7 @@ def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) - # FIXME @gante: flaky test + # FIXME @ydshieh: flaky test @is_flaky( description="Fails on distributed runs e.g.: https://app.circleci.com/pipelines/github/huggingface/transformers/83611/workflows/666b01c9-1be8-4daa-b85d-189e670fc168/jobs/1078635/tests#failed-test-0" ) From 0a839a0a6cf53aece12c3342f85c76d14f39d00e Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:32:15 +0000 Subject: [PATCH 23/25] Update TODO --- .../test_modeling_vision_encoder_decoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index a3ee223b38fd22..7c3925b30293ba 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -324,7 +324,7 @@ def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) - # FIXME @ydshieh: flaky test + # FIXME @gante: flaky test @is_flaky( description="Fails on distributed runs e.g.: https://app.circleci.com/pipelines/github/huggingface/transformers/83611/workflows/666b01c9-1be8-4daa-b85d-189e670fc168/jobs/1078635/tests#failed-test-0" ) From 3fd0797d355ba27e8e31462f9e7b15a82dd2d737 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 5 Feb 2024 13:09:32 +0000 Subject: [PATCH 24/25] Update tests/pipelines/test_pipelines_image_feature_extraction.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- tests/pipelines/test_pipelines_image_feature_extraction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/test_pipelines_image_feature_extraction.py b/tests/pipelines/test_pipelines_image_feature_extraction.py index 21f153e2ca1dfb..a9c99ad50bc604 100644 --- a/tests/pipelines/test_pipelines_image_feature_extraction.py +++ b/tests/pipelines/test_pipelines_image_feature_extraction.py @@ -1,4 +1,4 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 04c109c3a43375ed6c11e785492904d61ea1913c Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 5 Feb 2024 13:12:48 +0000 Subject: [PATCH 25/25] Add license header --- src/transformers/pipelines/image_to_text.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/transformers/pipelines/image_to_text.py b/src/transformers/pipelines/image_to_text.py index ec1d07e0228253..26698ecf0cebc0 100644 --- a/src/transformers/pipelines/image_to_text.py +++ b/src/transformers/pipelines/image_to_text.py @@ -1,3 +1,18 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from typing import List, Union from ..utils import (