diff --git a/.github/workflows/build-past-ci-docker-images.yml b/.github/workflows/build-past-ci-docker-images.yml index 21028568c963eb..302386b6851b18 100644 --- a/.github/workflows/build-past-ci-docker-images.yml +++ b/.github/workflows/build-past-ci-docker-images.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - version: ["1.13", "1.12", "1.11", "1.10"] + version: ["1.13", "1.12", "1.11"] runs-on: ubuntu-22.04 steps: - diff --git a/.github/workflows/self-nightly-past-ci-caller.yml b/.github/workflows/self-nightly-past-ci-caller.yml index dfc258e5be856a..32ebc2be597c91 100644 --- a/.github/workflows/self-nightly-past-ci-caller.yml +++ b/.github/workflows/self-nightly-past-ci-caller.yml @@ -56,17 +56,6 @@ jobs: sha: ${{ github.sha }} secrets: inherit - run_past_ci_pytorch_1-10: - name: PyTorch 1.10 - if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))) - needs: [run_past_ci_pytorch_1-11] - uses: ./.github/workflows/self-past.yml - with: - framework: pytorch - version: "1.10" - sha: ${{ github.sha }} - secrets: inherit - run_past_ci_tensorflow_2-11: name: TensorFlow 2.11 if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')) diff --git a/README.md b/README.md index dd46c1dee6e53f..e33ce15962c777 100644 --- a/README.md +++ b/README.md @@ -250,7 +250,7 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta ### With pip -This repository is tested on Python 3.8+, Flax 0.4.1+, PyTorch 1.10+, and TensorFlow 2.6+. +This repository is tested on Python 3.8+, Flax 0.4.1+, PyTorch 1.11+, and TensorFlow 2.6+. You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). diff --git a/README_es.md b/README_es.md index 3cc52152e5bcd5..b344279e2d5631 100644 --- a/README_es.md +++ b/README_es.md @@ -225,7 +225,7 @@ El modelo en si es un [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.h ### Con pip -Este repositorio está probado en Python 3.8+, Flax 0.4.1+, PyTorch 1.10+ y TensorFlow 2.6+. +Este repositorio está probado en Python 3.8+, Flax 0.4.1+, PyTorch 1.11+ y TensorFlow 2.6+. Deberías instalar 🤗 Transformers en un [ambiente virtual](https://docs.python.org/3/library/venv.html). Si no estas familiarizado con los entornos virtuales de Python, consulta la [guía de usuario](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). diff --git a/README_hd.md b/README_hd.md index 20a62f5c05e516..03108f894580c2 100644 --- a/README_hd.md +++ b/README_hd.md @@ -201,7 +201,7 @@ checkpoint: जाँच बिंदु ### पिप का उपयोग करना -इस रिपॉजिटरी का परीक्षण Python 3.8+, Flax 0.4.1+, PyTorch 1.10+ और TensorFlow 2.6+ के तहत किया गया है। +इस रिपॉजिटरी का परीक्षण Python 3.8+, Flax 0.4.1+, PyTorch 1.11+ और TensorFlow 2.6+ के तहत किया गया है। आप [वर्चुअल एनवायरनमेंट](https://docs.python.org/3/library/venv.html) में 🤗 ट्रांसफॉर्मर इंस्टॉल कर सकते हैं। यदि आप अभी तक पायथन के वर्चुअल एनवायरनमेंट से परिचित नहीं हैं, तो कृपया इसे [उपयोगकर्ता निर्देश](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) पढ़ें। diff --git a/README_ja.md b/README_ja.md index b70719a2cd84f7..bbf1f70d7746b3 100644 --- a/README_ja.md +++ b/README_ja.md @@ -259,7 +259,7 @@ Hugging Faceチームによって作られた **[トランスフォーマーを ### pipにて -このリポジトリは、Python 3.8+, Flax 0.4.1+, PyTorch 1.10+, TensorFlow 2.6+ でテストされています。 +このリポジトリは、Python 3.8+, Flax 0.4.1+, PyTorch 1.11+, TensorFlow 2.6+ でテストされています。 🤗Transformersは[仮想環境](https://docs.python.org/3/library/venv.html)にインストールする必要があります。Pythonの仮想環境に慣れていない場合は、[ユーザーガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)を確認してください。 diff --git a/README_ko.md b/README_ko.md index 48ced9d0122a06..e8a04ed9806495 100644 --- a/README_ko.md +++ b/README_ko.md @@ -176,7 +176,7 @@ limitations under the License. ### pip로 설치하기 -이 저장소는 Python 3.8+, Flax 0.4.1+, PyTorch 1.10+, TensorFlow 2.6+에서 테스트 되었습니다. +이 저장소는 Python 3.8+, Flax 0.4.1+, PyTorch 1.11+, TensorFlow 2.6+에서 테스트 되었습니다. [가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Transformers를 설치하세요. Python 가상 환경에 익숙하지 않다면, [사용자 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 확인하세요. diff --git a/README_pt-br.md b/README_pt-br.md index fc4208de2e2d25..788d0c14cf3c28 100644 --- a/README_pt-br.md +++ b/README_pt-br.md @@ -258,7 +258,7 @@ O modelo em si é um [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.ht ### Com pip -Este repositório é testado no Python 3.8+, Flax 0.4.1+, PyTorch 1.10+ e TensorFlow 2.6+. +Este repositório é testado no Python 3.8+, Flax 0.4.1+, PyTorch 1.11+ e TensorFlow 2.6+. Você deve instalar o 🤗 Transformers em um [ambiente virtual](https://docs.python.org/3/library/venv.html). Se você não está familiarizado com ambientes virtuais em Python, confira o [guia do usuário](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). diff --git a/README_ru.md b/README_ru.md index ba3b2cbc9b4572..2701bd3bbc431c 100644 --- a/README_ru.md +++ b/README_ru.md @@ -248,7 +248,7 @@ Hugging Face Hub. Мы хотим, чтобы Transformers позволил ра ### С помощью pip -Данный репозиторий протестирован на Python 3.8+, Flax 0.4.1+, PyTorch 1.10+ и TensorFlow 2.6+. +Данный репозиторий протестирован на Python 3.8+, Flax 0.4.1+, PyTorch 1.11+ и TensorFlow 2.6+. Устанавливать 🤗 Transformers следует в [виртуальной среде](https://docs.python.org/3/library/venv.html). Если вы не знакомы с виртуальными средами Python, ознакомьтесь с [руководством пользователя](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). diff --git a/README_te.md b/README_te.md index 28b390cf8a7a14..66ded8dd10781f 100644 --- a/README_te.md +++ b/README_te.md @@ -251,7 +251,7 @@ limitations under the License. ### పిప్ తో -ఈ రిపోజిటరీ పైథాన్ 3.8+, ఫ్లాక్స్ 0.4.1+, PyTorch 1.10+ మరియు TensorFlow 2.6+లో పరీక్షించబడింది. +ఈ రిపోజిటరీ పైథాన్ 3.8+, ఫ్లాక్స్ 0.4.1+, PyTorch 1.11+ మరియు TensorFlow 2.6+లో పరీక్షించబడింది. మీరు [వర్చువల్ వాతావరణం](https://docs.python.org/3/library/venv.html)లో 🤗 ట్రాన్స్‌ఫార్మర్‌లను ఇన్‌స్టాల్ చేయాలి. మీకు పైథాన్ వర్చువల్ పరిసరాల గురించి తెలియకుంటే, [యూజర్ గైడ్](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) చూడండి. diff --git a/README_zh-hans.md b/README_zh-hans.md index f537c779cdddd0..578190784e04fa 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -201,7 +201,7 @@ checkpoint: 检查点 ### 使用 pip -这个仓库已在 Python 3.8+、Flax 0.4.1+、PyTorch 1.10+ 和 TensorFlow 2.6+ 下经过测试。 +这个仓库已在 Python 3.8+、Flax 0.4.1+、PyTorch 1.11+ 和 TensorFlow 2.6+ 下经过测试。 你可以在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Transformers。如果你还不熟悉 Python 的虚拟环境,请阅此[用户说明](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 2f35e28089fca1..3e891341ce1dd2 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -213,7 +213,7 @@ Tokenizer 為所有的預訓練模型提供了預處理,並可以直接轉換 ### 使用 pip -這個 Repository 已在 Python 3.8+、Flax 0.4.1+、PyTorch 1.10+ 和 TensorFlow 2.6+ 下經過測試。 +這個 Repository 已在 Python 3.8+、Flax 0.4.1+、PyTorch 1.11+ 和 TensorFlow 2.6+ 下經過測試。 你可以在[虛擬環境](https://docs.python.org/3/library/venv.html)中安裝 🤗 Transformers。如果你還不熟悉 Python 的虛擬環境,請閱此[使用者指引](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)。 diff --git a/setup.py b/setup.py index aba442ff42ba25..f94063890467db 100644 --- a/setup.py +++ b/setup.py @@ -175,7 +175,7 @@ "timeout-decorator", "timm", "tokenizers>=0.14,<0.19", - "torch>=1.10,!=1.12.0", + "torch>=1.11,!=1.12.0", "torchaudio", "torchvision", "pyctcdecode>=0.4.0", diff --git a/src/transformers/convert_graph_to_onnx.py b/src/transformers/convert_graph_to_onnx.py index 5449d98237ea64..4538f381f2eacd 100644 --- a/src/transformers/convert_graph_to_onnx.py +++ b/src/transformers/convert_graph_to_onnx.py @@ -273,40 +273,22 @@ def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format import torch from torch.onnx import export - from transformers.pytorch_utils import is_torch_less_than_1_11 - print(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt") ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names) - # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, - # so we check the torch version for backwards compatibility - if is_torch_less_than_1_11: - export( - nlp.model, - model_args, - f=output.as_posix(), - input_names=ordered_input_names, - output_names=output_names, - dynamic_axes=dynamic_axes, - do_constant_folding=True, - use_external_data_format=use_external_format, - enable_onnx_checker=True, - opset_version=opset, - ) - else: - export( - nlp.model, - model_args, - f=output.as_posix(), - input_names=ordered_input_names, - output_names=output_names, - dynamic_axes=dynamic_axes, - do_constant_folding=True, - opset_version=opset, - ) + export( + nlp.model, + model_args, + f=output.as_posix(), + input_names=ordered_input_names, + output_names=output_names, + dynamic_axes=dynamic_axes, + do_constant_folding=True, + opset_version=opset, + ) def convert_tensorflow(nlp: Pipeline, opset: int, output: Path): diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index fcace1826ac453..0d1620537f7121 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -80,7 +80,7 @@ "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.14,<0.19", - "torch": "torch>=1.10,!=1.12.0", + "torch": "torch>=1.11,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 05d74d65425216..ceb8cf1d1d73d4 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -95,7 +95,6 @@ is_torchdynamo_compiling, ) from .utils.quantization_config import AwqConfig, BitsAndBytesConfig, GPTQConfig, QuantizationMethod -from .utils.versions import require_version_core XLA_USE_BF16 = os.environ.get("XLA_USE_BF16", "0").upper() @@ -2885,8 +2884,9 @@ def from_pretrained( if low_cpu_mem_usage: if device_map is not None: - # The max memory utils require PyTorch >= 1.10 to have torch.cuda.mem_get_info. - require_version_core("torch>=1.10") + # The max memory utils require PyTorch >= 1.10 to have torch.cuda.mem_get_info: `transformers` require + # it to be satisfied now + pass if is_deepspeed_zero3_enabled(): raise ValueError( diff --git a/src/transformers/models/pix2struct/image_processing_pix2struct.py b/src/transformers/models/pix2struct/image_processing_pix2struct.py index ba9cc95fcb0cfe..945708d69a8a50 100644 --- a/src/transformers/models/pix2struct/image_processing_pix2struct.py +++ b/src/transformers/models/pix2struct/image_processing_pix2struct.py @@ -43,23 +43,10 @@ if is_torch_available(): import torch - from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 -else: - is_torch_greater_or_equal_than_1_11 = False - - logger = logging.get_logger(__name__) DEFAULT_FONT_PATH = "ybelkada/fonts" -def _check_torch_version(): - if is_torch_available() and not is_torch_greater_or_equal_than_1_11: - raise ImportError( - f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use " - "Pix2StructImageProcessor. Please upgrade torch." - ) - - # adapted from: https://discuss.pytorch.org/t/tf-image-extract-patches-in-pytorch/171409/2 def torch_extract_patches(image_tensor, patch_height, patch_width): """ @@ -75,7 +62,6 @@ def torch_extract_patches(image_tensor, patch_height, patch_width): The width of the patches to extract. """ requires_backends(torch_extract_patches, ["torch"]) - _check_torch_version() image_tensor = image_tensor.unsqueeze(0) patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width)) @@ -262,7 +248,6 @@ def extract_flattened_patches( A sequence of `max_patches` flattened patches. """ requires_backends(self.extract_flattened_patches, "torch") - _check_torch_version() # convert to torch image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) diff --git a/src/transformers/onnx/convert.py b/src/transformers/onnx/convert.py index be46f7cd31064b..36cd3232d4ba45 100644 --- a/src/transformers/onnx/convert.py +++ b/src/transformers/onnx/convert.py @@ -33,7 +33,6 @@ if is_torch_available(): from ..modeling_utils import PreTrainedModel - from ..pytorch_utils import is_torch_less_than_1_11 if is_tf_available(): from ..modeling_tf_utils import TFPreTrainedModel @@ -167,49 +166,16 @@ def export_pytorch( config.patch_ops() - # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, - # so we check the torch version for backwards compatibility - if is_torch_less_than_1_11: - # export can work with named args but the dict containing named args - # has to be the last element of the args tuple. - try: - onnx_export( - model, - (model_inputs,), - f=output.as_posix(), - input_names=list(config.inputs.keys()), - output_names=onnx_outputs, - dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), - do_constant_folding=True, - use_external_data_format=config.use_external_data_format(model.num_parameters()), - enable_onnx_checker=True, - opset_version=opset, - ) - except RuntimeError as err: - message = str(err) - if ( - message - == "Exporting model exceed maximum protobuf size of 2GB. Please call torch.onnx.export without" - " setting use_external_data_format parameter." - ): - message = ( - "Exporting model exceed maximum protobuf size of 2GB. Please call torch.onnx.export" - " without setting use_external_data_format parameter or try with torch 1.10+." - ) - raise RuntimeError(message) - else: - raise err - else: - onnx_export( - model, - (model_inputs,), - f=output.as_posix(), - input_names=list(config.inputs.keys()), - output_names=onnx_outputs, - dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), - do_constant_folding=True, - opset_version=opset, - ) + onnx_export( + model, + (model_inputs,), + f=output.as_posix(), + input_names=list(config.inputs.keys()), + output_names=onnx_outputs, + dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), + do_constant_folding=True, + opset_version=opset, + ) config.restore_ops() diff --git a/src/transformers/pytorch_utils.py b/src/transformers/pytorch_utils.py index 1a464b62a66513..eefd6707d45eeb 100644 --- a/src/transformers/pytorch_utils.py +++ b/src/transformers/pytorch_utils.py @@ -32,9 +32,6 @@ is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0") is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13") is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") -is_torch_greater_or_equal_than_1_11 = parsed_torch_version_base >= version.parse("1.11") -is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") -is_torch_1_8_0 = parsed_torch_version_base == version.parse("1.8.0") def softmax_backward_data(parent, grad_output, output, dim, self): @@ -45,10 +42,7 @@ def softmax_backward_data(parent, grad_output, output, dim, self): from torch import _softmax_backward_data - if is_torch_less_than_1_11: - return _softmax_backward_data(grad_output, output, parent.dim, self) - else: - return _softmax_backward_data(grad_output, output, parent.dim, self.dtype) + return _softmax_backward_data(grad_output, output, parent.dim, self.dtype) def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 2bac51fdf04910..3ac1195300c49a 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -64,7 +64,7 @@ from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES from .optimization import Adafactor, get_scheduler -from .pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_less_than_1_11 +from .pytorch_utils import ALL_LAYERNORM_LAYERS from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, @@ -1785,7 +1785,7 @@ def _inner_training_loop( if version.parse(accelerate_version) > version.parse("0.23.0"): sampler_kinds.append(SeedableRandomSampler) is_random_sampler = isinstance(sampler, tuple(sampler_kinds)) - if is_torch_less_than_1_11 or not is_random_sampler: + if not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. for _ in train_dataloader: break diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 9acb9d78d0a369..bf9fee0ce9272b 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1441,13 +1441,8 @@ def __post_init__(self): ) elif is_torch_npu_available(): # npu - from .pytorch_utils import is_torch_greater_or_equal_than_1_11 - - if not is_torch_greater_or_equal_than_1_11: - raise ValueError( - "Your setup doesn't support bf16/npu. You need torch>=1.11, using Ascend NPU with " - "`torch_npu` installed" - ) + # needs `torch >= 1.11` for `bf16/npu` support: `transformers` require it to be satisfied now + pass elif not is_torch_xpu_available(): # xpu from .pytorch_utils import is_torch_greater_or_equal_than_1_12 diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index eda34597a3964a..7a970b28ac5dc8 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -64,6 +64,7 @@ def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[ FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() +# `transformers` requires `torch>=1.11` but this variable is exposed publicly, and we can't simply remove it. # This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs. TORCH_FX_REQUIRED_VERSION = version.parse("1.10") diff --git a/tests/models/longt5/test_modeling_longt5.py b/tests/models/longt5/test_modeling_longt5.py index b2d17dc0e67a23..5a8075c2dbe7ea 100644 --- a/tests/models/longt5/test_modeling_longt5.py +++ b/tests/models/longt5/test_modeling_longt5.py @@ -40,7 +40,6 @@ LongT5Model, ) from transformers.models.longt5.modeling_longt5 import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.pytorch_utils import is_torch_less_than_1_11 class LongT5ModelTester: @@ -595,10 +594,6 @@ def test_model_from_pretrained(self): model = LongT5Model.from_pretrained(model_name) self.assertIsNotNone(model) - @unittest.skipIf( - not is_torch_available() or is_torch_less_than_1_11, - "Test failed with torch < 1.11 with an exception in a C++ file.", - ) @slow def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/pix2struct/test_image_processing_pix2struct.py b/tests/models/pix2struct/test_image_processing_pix2struct.py index 4b06573fe61ff6..f0b94c4cf5a071 100644 --- a/tests/models/pix2struct/test_image_processing_pix2struct.py +++ b/tests/models/pix2struct/test_image_processing_pix2struct.py @@ -28,10 +28,6 @@ if is_torch_available(): import torch - from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 -else: - is_torch_greater_or_equal_than_1_11 = False - if is_vision_available(): from PIL import Image @@ -85,10 +81,6 @@ def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=F ) -@unittest.skipIf( - not is_torch_greater_or_equal_than_1_11, - reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.", -) @require_torch @require_vision class Pix2StructImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): @@ -290,10 +282,6 @@ def test_call_pytorch(self): ) -@unittest.skipIf( - not is_torch_greater_or_equal_than_1_11, - reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.", -) @require_torch @require_vision class Pix2StructImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase): diff --git a/tests/models/pix2struct/test_modeling_pix2struct.py b/tests/models/pix2struct/test_modeling_pix2struct.py index 002b287fd9f6b1..204f726a247b97 100644 --- a/tests/models/pix2struct/test_modeling_pix2struct.py +++ b/tests/models/pix2struct/test_modeling_pix2struct.py @@ -49,9 +49,6 @@ Pix2StructVisionModel, ) from transformers.models.pix2struct.modeling_pix2struct import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 -else: - is_torch_greater_or_equal_than_1_11 = False if is_vision_available(): @@ -746,10 +743,6 @@ def prepare_img(): return im -@unittest.skipIf( - not is_torch_greater_or_equal_than_1_11, - reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.", -) @require_vision @require_torch @slow diff --git a/tests/models/pix2struct/test_processor_pix2struct.py b/tests/models/pix2struct/test_processor_pix2struct.py index e0ee398b3a4dce..318e6f301f6eb8 100644 --- a/tests/models/pix2struct/test_processor_pix2struct.py +++ b/tests/models/pix2struct/test_processor_pix2struct.py @@ -19,14 +19,9 @@ import pytest from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_vision_available +from transformers.utils import is_vision_available -if is_torch_available(): - from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 -else: - is_torch_greater_or_equal_than_1_11 = False - if is_vision_available(): from PIL import Image @@ -39,10 +34,6 @@ ) -@unittest.skipIf( - not is_torch_greater_or_equal_than_1_11, - reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`.", -) @require_vision @require_torch class Pix2StructProcessorTest(unittest.TestCase): diff --git a/tests/models/pop2piano/test_modeling_pop2piano.py b/tests/models/pop2piano/test_modeling_pop2piano.py index d19ddc10e153fc..a99f713a7bd098 100644 --- a/tests/models/pop2piano/test_modeling_pop2piano.py +++ b/tests/models/pop2piano/test_modeling_pop2piano.py @@ -45,10 +45,6 @@ from transformers import Pop2PianoForConditionalGeneration from transformers.models.pop2piano.modeling_pop2piano import POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.pytorch_utils import is_torch_1_8_0 - -else: - is_torch_1_8_0 = False @require_torch @@ -616,10 +612,6 @@ def test_model_from_pretrained(self): self.assertIsNotNone(model) @require_onnx - @unittest.skipIf( - is_torch_1_8_0, - reason="Test has a segmentation fault on torch 1.8.0", - ) def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = Pop2PianoForConditionalGeneration(config_and_inputs[0]).to(torch_device) diff --git a/tests/models/unispeech_sat/test_modeling_unispeech_sat.py b/tests/models/unispeech_sat/test_modeling_unispeech_sat.py index 79fa54717378bb..3e71449357225a 100644 --- a/tests/models/unispeech_sat/test_modeling_unispeech_sat.py +++ b/tests/models/unispeech_sat/test_modeling_unispeech_sat.py @@ -906,7 +906,6 @@ def test_inference_diarization(self): ) self.assertEqual(labels[0, :, 0].sum(), 270) self.assertEqual(labels[0, :, 1].sum(), 647) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): @@ -931,5 +930,4 @@ def test_inference_speaker_verification(self): # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.5616, 3) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertAlmostEqual(outputs.loss.item(), 18.5925, 2) diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py index 353606252c0dd4..a5757571a11a29 100644 --- a/tests/models/wav2vec2/test_modeling_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py @@ -1928,7 +1928,6 @@ def test_inference_diarization(self): ) self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 1].sum(), 299) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): @@ -1953,7 +1952,6 @@ def test_inference_speaker_verification(self): # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2) @require_torchaudio diff --git a/tests/models/wavlm/test_modeling_wavlm.py b/tests/models/wavlm/test_modeling_wavlm.py index 6db04e1841f16a..c0a8eed2096f35 100644 --- a/tests/models/wavlm/test_modeling_wavlm.py +++ b/tests/models/wavlm/test_modeling_wavlm.py @@ -515,7 +515,6 @@ def test_inference_base(self): EXPECTED_HIDDEN_STATES_SLICE = torch.tensor( [[[0.0577, 0.1161], [0.0579, 0.1165]], [[0.0199, 0.1237], [0.0059, 0.0605]]] ) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertTrue(torch.allclose(hidden_states_slice, EXPECTED_HIDDEN_STATES_SLICE, atol=5e-2)) def test_inference_large(self): @@ -567,7 +566,6 @@ def test_inference_diarization(self): ) self.assertEqual(labels[0, :, 0].sum(), 258) self.assertEqual(labels[0, :, 1].sum(), 647) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): @@ -592,5 +590,4 @@ def test_inference_speaker_verification(self): # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).item(), 0.4780, 3) - # TODO: update the tolerance after the CI moves to torch 1.10 self.assertAlmostEqual(outputs.loss.item(), 18.4154, 2) diff --git a/tests/pipelines/test_pipelines_image_to_text.py b/tests/pipelines/test_pipelines_image_to_text.py index 4f5b5780277bfc..b63589735d0777 100644 --- a/tests/pipelines/test_pipelines_image_to_text.py +++ b/tests/pipelines/test_pipelines_image_to_text.py @@ -20,7 +20,6 @@ from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, - is_torch_available, require_tf, require_torch, require_vision, @@ -30,12 +29,6 @@ from .test_pipelines_common import ANY -if is_torch_available(): - from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 -else: - is_torch_greater_or_equal_than_1_11 = False - - if is_vision_available(): from PIL import Image else: @@ -217,9 +210,6 @@ def test_conditional_generation_pt_git(self): with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) - @unittest.skipIf( - not is_torch_greater_or_equal_than_1_11, reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." - ) @slow @require_torch def test_conditional_generation_pt_pix2struct(self):