From 64486191b066d8d5c21400e4b9f935127fd6b69f Mon Sep 17 00:00:00 2001 From: staghado Date: Tue, 7 Nov 2023 10:10:28 +0100 Subject: [PATCH] remove check for overlap between cls token and pos embed --- src/transformers/models/vit/convert_vit_timm_to_pytorch.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/transformers/models/vit/convert_vit_timm_to_pytorch.py b/src/transformers/models/vit/convert_vit_timm_to_pytorch.py index 15c383473eda0e..0ccd9b9f6685fe 100644 --- a/src/transformers/models/vit/convert_vit_timm_to_pytorch.py +++ b/src/transformers/models/vit/convert_vit_timm_to_pytorch.py @@ -173,8 +173,6 @@ def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path): if not isinstance(timm_model.patch_embed, timm.layers.PatchEmbed): raise ValueError(f"{vit_name} is not supported in transformers because it is a hybrid ResNet-ViT.") - # non-overlapping position and class token embedding (to be added) - # get patch size and image size from the patch embedding submodule config.patch_size = timm_model.patch_embed.patch_size[0] config.image_size = timm_model.patch_embed.img_size[0]