Skip to content

Commit

Permalink
rename all test_processing_*.py to test_processor_*.py (#33878)
Browse files Browse the repository at this point in the history
* rename all test_processing_*.py to test_processor_*.py ans fix duplicate test processor paligemma

* fix copies

* fix broken tests

* fix-copies

* fix test processor bridgetower
  • Loading branch information
yonigozlan authored Oct 2, 2024
1 parent 2f25ab9 commit 62e8c75
Show file tree
Hide file tree
Showing 14 changed files with 46 additions and 170 deletions.
7 changes: 5 additions & 2 deletions src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from dataclasses import dataclass
from functools import lru_cache
from pathlib import Path
from typing import Optional, Tuple, Union
from typing import List, Optional, Tuple, Union

import torch
import torch.nn.functional as F
Expand Down Expand Up @@ -208,7 +208,10 @@ def load_cuda_kernels():

# Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention
def multi_scale_deformable_attention(
value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor
value: Tensor,
value_spatial_shapes: Union[Tensor, List[Tuple]],
sampling_locations: Tensor,
attention_weights: Tensor,
) -> Tensor:
batch_size, _, num_heads, hidden_dim = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
Expand Down
2 changes: 1 addition & 1 deletion tests/models/blip/test_processor_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def test_unstructured_kwargs_batched(self):
self.skip_processor_without_typed_kwargs(processor)

input_str = ["lower newer", "upper older longer string"]
image_input = self.prepare_image_inputs() * 2
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
Expand Down
29 changes: 1 addition & 28 deletions tests/models/blip_2/test_processor_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

import pytest

from transformers.testing_utils import require_torch, require_vision
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available

from ...test_processing_common import ProcessorTesterMixin
Expand Down Expand Up @@ -139,30 +139,3 @@ def test_model_input_names(self):

# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertCountEqual(list(inputs.keys()), ["input_ids", "pixel_values", "attention_mask"])

@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
if not tokenizer.pad_token:
tokenizer.pad_token = "[TEST_PAD]"
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)

input_str = ["lower newer", "upper older longer string"]
image_input = self.prepare_image_inputs() * 2
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
crop_size={"height": 214, "width": 214},
size={"height": 214, "width": 214},
padding="longest",
max_length=76,
)
self.assertEqual(inputs["pixel_values"].shape[2], 214)

self.assertEqual(len(inputs["input_ids"][0]), 11)
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,13 @@
import tempfile
import unittest

import numpy as np

from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available

from ...test_processing_common import ProcessorTesterMixin


if is_vision_available():
from PIL import Image

from transformers import (
AutoProcessor,
BridgeTowerImageProcessor,
Expand All @@ -35,7 +31,7 @@


@require_vision
class Blip2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
class BridgeTowerProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = BridgeTowerProcessor

def setUp(self):
Expand All @@ -57,17 +53,6 @@ def get_image_processor(self, **kwargs):
def tearDown(self):
shutil.rmtree(self.tmpdirname)

def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""

image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]

image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]

return image_inputs

# Some kwargs tests are overriden from common tests to handle shortest_edge
# and size_divisor behaviour

Expand Down Expand Up @@ -149,7 +134,7 @@ def test_unstructured_kwargs_batched(self):
self.skip_processor_without_typed_kwargs(processor)

input_str = ["lower newer", "upper older longer string"]
image_input = self.prepare_image_inputs() * 2
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,6 @@
import unittest

from transformers import DonutImageProcessor, DonutProcessor, XLMRobertaTokenizerFast
from transformers.testing_utils import (
require_torch,
require_vision,
)

from ...test_processing_common import ProcessorTesterMixin

Expand Down Expand Up @@ -65,30 +61,3 @@ def test_token2json(self):
actual_json = self.processor.token2json(sequence)

self.assertDictEqual(actual_json, expected_json)

@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
if not tokenizer.pad_token:
tokenizer.pad_token = "[TEST_PAD]"
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)

input_str = ["lower newer", "upper older longer string"]
image_input = self.prepare_image_inputs() * 2
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
crop_size={"height": 214, "width": 214},
size={"height": 214, "width": 214},
padding="longest",
max_length=76,
)
self.assertEqual(inputs["pixel_values"].shape[2], 214)

self.assertEqual(len(inputs["input_ids"][0]), 7)
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def floats_list(shape, scale=1.0, rng=None, name=None):
@require_torch
@require_sentencepiece
@require_torchaudio
# Copied from tests.models.musicgen.test_processing_musicgen.MusicgenProcessorTest with Musicgen->MusicgenMelody, Encodec->MusicgenMelody, padding_mask->attention_mask, input_values->input_features
# Copied from tests.models.musicgen.test_processor_musicgen.MusicgenProcessorTest with Musicgen->MusicgenMelody, Encodec->MusicgenMelody, padding_mask->attention_mask, input_values->input_features
class MusicgenMelodyProcessorTest(unittest.TestCase):
def setUp(self):
# Ignore copy
Expand Down
84 changes: 0 additions & 84 deletions tests/models/paligemma/test_processing_paligemma.py

This file was deleted.

42 changes: 36 additions & 6 deletions tests/models/paligemma/test_processor_paligemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,15 @@
import tempfile
import unittest

from transformers import GemmaTokenizer
from transformers import GemmaTokenizer, PaliGemmaProcessor
from transformers.testing_utils import get_tests_dir, require_torch, require_vision
from transformers.utils import is_vision_available

from ...test_processing_common import ProcessorTesterMixin


if is_vision_available():
from transformers import (
PaliGemmaProcessor,
SiglipImageProcessor,
is_vision_available,
)
from transformers import SiglipImageProcessor

SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")

Expand Down Expand Up @@ -61,3 +57,37 @@ def test_image_seq_length(self):
text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length"
)
self.assertEqual(len(inputs["input_ids"][0]), 112 + 14)

def test_text_with_image_tokens(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")

processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
text_multi_images = "<image><image><bos>Dummy text!"
text_single_image = "<image><bos>Dummy text!"
text_no_image = "Dummy text!"

image = self.prepare_image_inputs()

out_noimage = processor(text=text_no_image, images=image, return_tensors="np")
out_singlimage = processor(text=text_single_image, images=image, return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_singlimage[k].tolist())

out_multiimages = processor(text=text_multi_images, images=[image, image], return_tensors="np")
out_noimage = processor(text=text_no_image, images=[[image, image]], return_tensors="np")

# We can't be sure what is users intention, whether user want "one text + two images" or user forgot to add the second text
with self.assertRaises(ValueError):
out_noimage = processor(text=text_no_image, images=[image, image], return_tensors="np")

for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_multiimages[k].tolist())

text_batched = ["Dummy text!", "Dummy text!"]
text_batched_with_image = ["<image><bos>Dummy text!", "<image><bos>Dummy text!"]
out_images = processor(text=text_batched_with_image, images=[image, image], return_tensors="np")
out_noimage_nested = processor(text=text_batched, images=[[image], [image]], return_tensors="np")
out_noimage = processor(text=text_batched, images=[image, image], return_tensors="np")
for k in out_noimage:
self.assertTrue(out_noimage[k].tolist() == out_images[k].tolist() == out_noimage_nested[k].tolist())

0 comments on commit 62e8c75

Please sign in to comment.