diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 6e325e499f342d..ae910c1f5da793 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -643,6 +643,8 @@ title: DiNAT - local: model_doc/dinov2 title: DINOV2 + - local: model_doc/dinov2_with_registers + title: Dinov2WithRegisters - local: model_doc/dit title: DiT - local: model_doc/dpt diff --git a/docs/source/en/index.md b/docs/source/en/index.md index 181ec8b10fb1fc..971ab3f15f74a5 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -124,6 +124,7 @@ Flax), PyTorch, and/or TensorFlow. | [DialoGPT](model_doc/dialogpt) | ✅ | ✅ | ✅ | | [DiNAT](model_doc/dinat) | ✅ | ❌ | ❌ | | [DINOv2](model_doc/dinov2) | ✅ | ❌ | ✅ | +| [Dinov2WithRegisters](model_doc/dinov2_with_registers) | ✅ | ❌ | ❌ | | [DistilBERT](model_doc/distilbert) | ✅ | ✅ | ✅ | | [DiT](model_doc/dit) | ✅ | ❌ | ✅ | | [DonutSwin](model_doc/donut) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/dinov2_with_registers.md b/docs/source/en/model_doc/dinov2_with_registers.md new file mode 100644 index 00000000000000..f52b03d15bbd4d --- /dev/null +++ b/docs/source/en/model_doc/dinov2_with_registers.md @@ -0,0 +1,42 @@ + + +# Dinov2WithRegisters + +## Overview + +The Dinov2 With Registers model was proposed in [Vision Transformers Need Registers](https://arxiv.org/abs/2309.16588) by Timothée Darcet, Maxime Oquab, Julien Mairal, Piotr Bojanowski. + +This paper shows that by adding more tokens to the input sequence of a Vision Transformer useful for internal computations, one can enhance the performance. + +The abstract from the paper is the following: + +*Transformers have recently emerged as a powerful tool for learning visual representations. In this paper, we identify and characterize artifacts in feature maps of both supervised and self-supervised ViT networks. The artifacts correspond to high-norm tokens appearing during inference primarily in low-informative background areas of images, that are repurposed for internal computations. We propose a simple yet effective solution based on providing additional tokens to the input sequence of the Vision Transformer to fill that role. We show that this solution fixes that problem entirely for both supervised and self-supervised models, sets a new state of the art for self-supervised visual models on dense visual prediction tasks, enables object discovery methods with larger models, and most importantly leads to smoother feature maps and attention maps for downstream visual processing.* + +Tips: + +- Usage of Dinov2 with registers is identical to Dinov2 without, you'll just get better performance. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). +The original code can be found [here](https://github.com/facebookresearch/dinov2). + + +## Dinov2WithRegistersConfig + +[[autodoc]] Dinov2WithRegistersConfig + +## Dinov2WithRegistersModel + +[[autodoc]] Dinov2WithRegistersModel + - forward + +## Dinov2WithRegistersForImageClassification + +[[autodoc]] Dinov2WithRegistersForImageClassification + - forward \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 2eaec8f1def96e..ae6aa85cce7e15 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -18,7 +18,7 @@ # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). -__version__ = "4.48.0.dev0" +__version__ = "4.45.0.dev0" from typing import TYPE_CHECKING @@ -57,8 +57,7 @@ "agents": [ "Agent", "CodeAgent", - "HfApiEngine", - "ManagedAgent", + "HfEngine", "PipelineTool", "ReactAgent", "ReactCodeAgent", @@ -66,11 +65,9 @@ "Tool", "Toolbox", "ToolCollection", - "TransformersEngine", "launch_gradio_demo", "load_tool", "stream_to_gradio", - "tool", ], "audio_utils": [], "benchmark": [], @@ -115,6 +112,7 @@ "data.metrics": [], "data.processors": [], "debug_utils": [], + "deepspeed": [], "dependency_versions_check": [], "dependency_versions_table": [], "dynamic_module_utils": [], @@ -122,7 +120,6 @@ "feature_extraction_utils": ["BatchFeature", "FeatureExtractionMixin"], "file_utils": [], "generation": [ - "CompileConfig", "GenerationConfig", "TextIteratorStreamer", "TextStreamer", @@ -143,9 +140,7 @@ "is_tensorboard_available", "is_wandb_available", ], - "loss": [], "modelcard": ["ModelCard"], - # Losses "modeling_tf_pytorch_utils": [ "convert_tf_weight_name_to_pt_weight_name", "load_pytorch_checkpoint_in_tf2_model", @@ -170,11 +165,6 @@ "AltCLIPTextConfig", "AltCLIPVisionConfig", ], - "models.aria": [ - "AriaConfig", - "AriaProcessor", - "AriaTextConfig", - ], "models.audio_spectrogram_transformer": [ "ASTConfig", "ASTFeatureExtractor", @@ -397,6 +387,7 @@ "models.dialogpt": [], "models.dinat": ["DinatConfig"], "models.dinov2": ["Dinov2Config"], + "models.dinov2_with_registers": ["Dinov2WithRegistersConfig"], "models.distilbert": [ "DistilBertConfig", "DistilBertTokenizer", @@ -460,7 +451,6 @@ "GitProcessor", "GitVisionConfig", ], - "models.glm": ["GlmConfig"], "models.glpn": ["GLPNConfig"], "models.gpt2": [ "GPT2Config", @@ -472,8 +462,6 @@ "models.gpt_neox_japanese": ["GPTNeoXJapaneseConfig"], "models.gpt_sw3": [], "models.gptj": ["GPTJConfig"], - "models.granite": ["GraniteConfig"], - "models.granitemoe": ["GraniteMoeConfig"], "models.grounding_dino": [ "GroundingDinoConfig", "GroundingDinoProcessor", @@ -489,8 +477,6 @@ "models.ibert": ["IBertConfig"], "models.idefics": ["IdeficsConfig"], "models.idefics2": ["Idefics2Config"], - "models.idefics3": ["Idefics3Config"], - "models.ijepa": ["IJepaConfig"], "models.imagegpt": ["ImageGPTConfig"], "models.informer": ["InformerConfig"], "models.instructblip": [ @@ -546,7 +532,6 @@ "LlavaNextVideoConfig", "LlavaNextVideoProcessor", ], - "models.llava_onevision": ["LlavaOnevisionConfig", "LlavaOnevisionProcessor"], "models.longformer": [ "LongformerConfig", "LongformerTokenizer", @@ -584,13 +569,8 @@ "MgpstrProcessor", "MgpstrTokenizer", ], - "models.mimi": ["MimiConfig"], "models.mistral": ["MistralConfig"], "models.mixtral": ["MixtralConfig"], - "models.mllama": [ - "MllamaConfig", - "MllamaProcessor", - ], "models.mluke": [], "models.mobilebert": [ "MobileBertConfig", @@ -600,10 +580,6 @@ "models.mobilenet_v2": ["MobileNetV2Config"], "models.mobilevit": ["MobileViTConfig"], "models.mobilevitv2": ["MobileViTV2Config"], - "models.moshi": [ - "MoshiConfig", - "MoshiDepthConfig", - ], "models.mpnet": [ "MPNetConfig", "MPNetTokenizer", @@ -620,19 +596,12 @@ "MusicgenMelodyDecoderConfig", ], "models.mvp": ["MvpConfig", "MvpTokenizer"], - "models.myt5": ["MyT5Tokenizer"], "models.nemotron": ["NemotronConfig"], "models.nllb": [], "models.nllb_moe": ["NllbMoeConfig"], "models.nougat": ["NougatProcessor"], "models.nystromformer": ["NystromformerConfig"], "models.olmo": ["OlmoConfig"], - "models.olmo2": ["Olmo2Config"], - "models.olmoe": ["OlmoeConfig"], - "models.omdet_turbo": [ - "OmDetTurboConfig", - "OmDetTurboProcessor", - ], "models.oneformer": [ "OneFormerConfig", "OneFormerProcessor", @@ -669,7 +638,6 @@ "models.persimmon": ["PersimmonConfig"], "models.phi": ["PhiConfig"], "models.phi3": ["Phi3Config"], - "models.phimoe": ["PhimoeConfig"], "models.phobert": ["PhobertTokenizer"], "models.pix2struct": [ "Pix2StructConfig", @@ -677,7 +645,6 @@ "Pix2StructTextConfig", "Pix2StructVisionConfig", ], - "models.pixtral": ["PixtralProcessor", "PixtralVisionConfig"], "models.plbart": ["PLBartConfig"], "models.poolformer": ["PoolFormerConfig"], "models.pop2piano": ["Pop2PianoConfig"], @@ -697,10 +664,6 @@ "Qwen2AudioProcessor", ], "models.qwen2_moe": ["Qwen2MoeConfig"], - "models.qwen2_vl": [ - "Qwen2VLConfig", - "Qwen2VLProcessor", - ], "models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"], "models.recurrent_gemma": ["RecurrentGemmaConfig"], "models.reformer": ["ReformerConfig"], @@ -862,7 +825,6 @@ "models.xmod": ["XmodConfig"], "models.yolos": ["YolosConfig"], "models.yoso": ["YosoConfig"], - "models.zamba": ["ZambaConfig"], "models.zoedepth": ["ZoeDepthConfig"], "onnx": [], "pipelines": [ @@ -876,7 +838,6 @@ "ImageClassificationPipeline", "ImageFeatureExtractionPipeline", "ImageSegmentationPipeline", - "ImageTextToTextPipeline", "ImageToImagePipeline", "ImageToTextPipeline", "JsonPipelineDataFormat", @@ -951,6 +912,7 @@ "is_av_available", "is_bitsandbytes_available", "is_datasets_available", + "is_decord_available", "is_faiss_available", "is_flax_available", "is_keras_nlp_available", @@ -983,9 +945,7 @@ "utils.quantization_config": [ "AqlmConfig", "AwqConfig", - "BitNetConfig", "BitsAndBytesConfig", - "CompressedTensorsConfig", "EetqConfig", "FbgemmFp8Config", "GPTQConfig", @@ -1181,7 +1141,6 @@ _import_structure["image_processing_base"] = ["ImageProcessingMixin"] _import_structure["image_processing_utils"] = ["BaseImageProcessor"] _import_structure["image_utils"] = ["ImageFeatureExtractionMixin"] - _import_structure["models.aria"].extend(["AriaImageProcessor"]) _import_structure["models.beit"].extend(["BeitFeatureExtractor", "BeitImageProcessor"]) _import_structure["models.bit"].extend(["BitImageProcessor"]) _import_structure["models.blip"].extend(["BlipImageProcessor"]) @@ -1211,7 +1170,6 @@ _import_structure["models.grounding_dino"].extend(["GroundingDinoImageProcessor"]) _import_structure["models.idefics"].extend(["IdeficsImageProcessor"]) _import_structure["models.idefics2"].extend(["Idefics2ImageProcessor"]) - _import_structure["models.idefics3"].extend(["Idefics3ImageProcessor"]) _import_structure["models.imagegpt"].extend(["ImageGPTFeatureExtractor", "ImageGPTImageProcessor"]) _import_structure["models.instructblipvideo"].extend(["InstructBlipVideoImageProcessor"]) _import_structure["models.layoutlmv2"].extend(["LayoutLMv2FeatureExtractor", "LayoutLMv2ImageProcessor"]) @@ -1219,12 +1177,8 @@ _import_structure["models.levit"].extend(["LevitFeatureExtractor", "LevitImageProcessor"]) _import_structure["models.llava_next"].append("LlavaNextImageProcessor") _import_structure["models.llava_next_video"].append("LlavaNextVideoImageProcessor") - _import_structure["models.llava_onevision"].extend( - ["LlavaOnevisionImageProcessor", "LlavaOnevisionVideoProcessor"] - ) _import_structure["models.mask2former"].append("Mask2FormerImageProcessor") _import_structure["models.maskformer"].extend(["MaskFormerFeatureExtractor", "MaskFormerImageProcessor"]) - _import_structure["models.mllama"].extend(["MllamaImageProcessor"]) _import_structure["models.mobilenet_v1"].extend(["MobileNetV1FeatureExtractor", "MobileNetV1ImageProcessor"]) _import_structure["models.mobilenet_v2"].extend(["MobileNetV2FeatureExtractor", "MobileNetV2ImageProcessor"]) _import_structure["models.mobilevit"].extend(["MobileViTFeatureExtractor", "MobileViTImageProcessor"]) @@ -1234,10 +1188,8 @@ _import_structure["models.owlvit"].extend(["OwlViTFeatureExtractor", "OwlViTImageProcessor"]) _import_structure["models.perceiver"].extend(["PerceiverFeatureExtractor", "PerceiverImageProcessor"]) _import_structure["models.pix2struct"].extend(["Pix2StructImageProcessor"]) - _import_structure["models.pixtral"].append("PixtralImageProcessor") _import_structure["models.poolformer"].extend(["PoolFormerFeatureExtractor", "PoolFormerImageProcessor"]) _import_structure["models.pvt"].extend(["PvtImageProcessor"]) - _import_structure["models.qwen2_vl"].extend(["Qwen2VLImageProcessor"]) _import_structure["models.rt_detr"].extend(["RTDetrImageProcessor"]) _import_structure["models.sam"].extend(["SamImageProcessor"]) _import_structure["models.segformer"].extend(["SegformerFeatureExtractor", "SegformerImageProcessor"]) @@ -1266,10 +1218,6 @@ ] else: _import_structure["image_processing_utils_fast"] = ["BaseImageProcessorFast"] - _import_structure["models.deformable_detr"].append("DeformableDetrImageProcessorFast") - _import_structure["models.detr"].append("DetrImageProcessorFast") - _import_structure["models.pixtral"].append("PixtralImageProcessorFast") - _import_structure["models.rt_detr"].append("RTDetrImageProcessorFast") _import_structure["models.vit"].append("ViTImageProcessorFast") # PyTorch-backed objects @@ -1293,7 +1241,6 @@ "HybridCache", "MambaCache", "OffloadedCache", - "OffloadedStaticCache", "QuantizedCache", "QuantizedCacheConfig", "QuantoQuantizedCache", @@ -1315,8 +1262,6 @@ _import_structure["generation"].extend( [ "AlternatingCodebooksLogitsProcessor", - "BayesianDetectorConfig", - "BayesianDetectorModel", "BeamScorer", "BeamSearchScorer", "ClassifierFreeGuidanceLogitsProcessor", @@ -1332,6 +1277,7 @@ "ExponentialDecayLengthPenalty", "ForcedBOSTokenLogitsProcessor", "ForcedEOSTokenLogitsProcessor", + "ForceTokensLogitsProcessor", "GenerationMixin", "HammingDiversityLogitsProcessor", "InfNanRemoveLogitsProcessor", @@ -1355,9 +1301,6 @@ "StopStringCriteria", "SuppressTokensAtBeginLogitsProcessor", "SuppressTokensLogitsProcessor", - "SynthIDTextWatermarkDetector", - "SynthIDTextWatermarkingConfig", - "SynthIDTextWatermarkLogitsProcessor", "TemperatureLogitsWarper", "TopKLogitsWarper", "TopPLogitsWarper", @@ -1368,13 +1311,6 @@ "WhisperTimeStampLogitsProcessor", ] ) - - # PyTorch domain libraries integration - _import_structure["integrations.executorch"] = [ - "TorchExportableModuleWithStaticCache", - "convert_and_export_with_cache", - ] - _import_structure["modeling_flash_attention_utils"] = [] _import_structure["modeling_outputs"] = [] _import_structure["modeling_rope_utils"] = ["ROPE_INIT_FUNCTIONS"] @@ -1404,6 +1340,7 @@ "AlignVisionModel", ] ) + _import_structure["models.altclip"].extend( [ "AltCLIPModel", @@ -1412,15 +1349,6 @@ "AltCLIPVisionModel", ] ) - _import_structure["models.aria"].extend( - [ - "AriaForConditionalGeneration", - "AriaPreTrainedModel", - "AriaTextForCausalLM", - "AriaTextModel", - "AriaTextPreTrainedModel", - ] - ) _import_structure["models.audio_spectrogram_transformer"].extend( [ "ASTForAudioClassification", @@ -1442,7 +1370,6 @@ "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_IMAGE_MAPPING", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", - "MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING", "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", "MODEL_FOR_KEYPOINT_DETECTION_MAPPING", @@ -1484,7 +1411,6 @@ "AutoModelForDocumentQuestionAnswering", "AutoModelForImageClassification", "AutoModelForImageSegmentation", - "AutoModelForImageTextToText", "AutoModelForImageToImage", "AutoModelForInstanceSegmentation", "AutoModelForKeypointDetection", @@ -1562,6 +1488,7 @@ "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", + "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", @@ -1585,6 +1512,7 @@ "BigBirdForQuestionAnswering", "BigBirdForSequenceClassification", "BigBirdForTokenClassification", + "BigBirdLayer", "BigBirdModel", "BigBirdPreTrainedModel", "load_tf_weights_in_big_bird", @@ -1647,13 +1575,10 @@ _import_structure["models.blip_2"].extend( [ "Blip2ForConditionalGeneration", - "Blip2ForImageTextRetrieval", "Blip2Model", "Blip2PreTrainedModel", "Blip2QFormerModel", - "Blip2TextModelWithProjection", "Blip2VisionModel", - "Blip2VisionModelWithProjection", ] ) _import_structure["models.bloom"].extend( @@ -1703,6 +1628,7 @@ "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", + "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", @@ -1789,6 +1715,7 @@ "ConvBertForQuestionAnswering", "ConvBertForSequenceClassification", "ConvBertForTokenClassification", + "ConvBertLayer", "ConvBertModel", "ConvBertPreTrainedModel", "load_tf_weights_in_convbert", @@ -2017,6 +1944,7 @@ "QDQBertForQuestionAnswering", "QDQBertForSequenceClassification", "QDQBertForTokenClassification", + "QDQBertLayer", "QDQBertLMHeadModel", "QDQBertModel", "QDQBertPreTrainedModel", @@ -2122,6 +2050,14 @@ "Dinov2PreTrainedModel", ] ) + _import_structure["models.dinov2_with_registers"].extend( + [ + "Dinov2WithRegistersBackbone", + "Dinov2WithRegistersForImageClassification", + "Dinov2WithRegistersModel", + "Dinov2WithRegistersPreTrainedModel", + ] + ) _import_structure["models.distilbert"].extend( [ "DistilBertForMaskedLM", @@ -2268,6 +2204,7 @@ "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", + "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] @@ -2323,15 +2260,6 @@ "GitVisionModel", ] ) - _import_structure["models.glm"].extend( - [ - "GlmForCausalLM", - "GlmForSequenceClassification", - "GlmForTokenClassification", - "GlmModel", - "GlmPreTrainedModel", - ] - ) _import_structure["models.glpn"].extend( [ "GLPNForDepthEstimation", @@ -2377,6 +2305,7 @@ "GPTNeoXForQuestionAnswering", "GPTNeoXForSequenceClassification", "GPTNeoXForTokenClassification", + "GPTNeoXLayer", "GPTNeoXModel", "GPTNeoXPreTrainedModel", ] @@ -2384,6 +2313,7 @@ _import_structure["models.gpt_neox_japanese"].extend( [ "GPTNeoXJapaneseForCausalLM", + "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] @@ -2397,20 +2327,6 @@ "GPTJPreTrainedModel", ] ) - _import_structure["models.granite"].extend( - [ - "GraniteForCausalLM", - "GraniteModel", - "GranitePreTrainedModel", - ] - ) - _import_structure["models.granitemoe"].extend( - [ - "GraniteMoeForCausalLM", - "GraniteMoeModel", - "GraniteMoePreTrainedModel", - ] - ) _import_structure["models.grounding_dino"].extend( [ "GroundingDinoForObjectDetection", @@ -2470,23 +2386,6 @@ "Idefics2Processor", ] ) - _import_structure["models.idefics3"].extend( - [ - "Idefics3ForConditionalGeneration", - "Idefics3Model", - "Idefics3PreTrainedModel", - "Idefics3Processor", - "Idefics3VisionConfig", - "Idefics3VisionTransformer", - ] - ) - _import_structure["models.ijepa"].extend( - [ - "IJepaForImageClassification", - "IJepaModel", - "IJepaPreTrainedModel", - ] - ) _import_structure["models.imagegpt"].extend( [ "ImageGPTForCausalImageModeling", @@ -2624,12 +2523,6 @@ "LlavaNextVideoPreTrainedModel", ] ) - _import_structure["models.llava_onevision"].extend( - [ - "LlavaOnevisionForConditionalGeneration", - "LlavaOnevisionPreTrainedModel", - ] - ) _import_structure["models.longformer"].extend( [ "LongformerForMaskedLM", @@ -2639,6 +2532,7 @@ "LongformerForTokenClassification", "LongformerModel", "LongformerPreTrainedModel", + "LongformerSelfAttention", ] ) _import_structure["models.longt5"].extend( @@ -2671,6 +2565,7 @@ "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", + "LxmertXLayer", ] ) _import_structure["models.m2m_100"].extend( @@ -2694,9 +2589,7 @@ "Mamba2PreTrainedModel", ] ) - _import_structure["models.marian"].extend( - ["MarianForCausalLM", "MarianModel", "MarianMTModel", "MarianPreTrainedModel"] - ) + _import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"]) _import_structure["models.markuplm"].extend( [ "MarkupLMForQuestionAnswering", @@ -2752,16 +2645,9 @@ "MgpstrPreTrainedModel", ] ) - _import_structure["models.mimi"].extend( - [ - "MimiModel", - "MimiPreTrainedModel", - ] - ) _import_structure["models.mistral"].extend( [ "MistralForCausalLM", - "MistralForQuestionAnswering", "MistralForSequenceClassification", "MistralForTokenClassification", "MistralModel", @@ -2771,23 +2657,12 @@ _import_structure["models.mixtral"].extend( [ "MixtralForCausalLM", - "MixtralForQuestionAnswering", "MixtralForSequenceClassification", "MixtralForTokenClassification", "MixtralModel", "MixtralPreTrainedModel", ] ) - _import_structure["models.mllama"].extend( - [ - "MllamaForCausalLM", - "MllamaForConditionalGeneration", - "MllamaPreTrainedModel", - "MllamaProcessor", - "MllamaTextModel", - "MllamaVisionModel", - ] - ) _import_structure["models.mobilebert"].extend( [ "MobileBertForMaskedLM", @@ -2797,6 +2672,7 @@ "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", + "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", @@ -2835,14 +2711,6 @@ "MobileViTV2PreTrainedModel", ] ) - _import_structure["models.moshi"].extend( - [ - "MoshiForCausalLM", - "MoshiForConditionalGeneration", - "MoshiModel", - "MoshiPreTrainedModel", - ] - ) _import_structure["models.mpnet"].extend( [ "MPNetForMaskedLM", @@ -2850,6 +2718,7 @@ "MPNetForQuestionAnswering", "MPNetForSequenceClassification", "MPNetForTokenClassification", + "MPNetLayer", "MPNetModel", "MPNetPreTrainedModel", ] @@ -2939,6 +2808,7 @@ "NystromformerForQuestionAnswering", "NystromformerForSequenceClassification", "NystromformerForTokenClassification", + "NystromformerLayer", "NystromformerModel", "NystromformerPreTrainedModel", ] @@ -2950,26 +2820,6 @@ "OlmoPreTrainedModel", ] ) - _import_structure["models.olmo2"].extend( - [ - "Olmo2ForCausalLM", - "Olmo2Model", - "Olmo2PreTrainedModel", - ] - ) - _import_structure["models.olmoe"].extend( - [ - "OlmoeForCausalLM", - "OlmoeModel", - "OlmoePreTrainedModel", - ] - ) - _import_structure["models.omdet_turbo"].extend( - [ - "OmDetTurboForObjectDetection", - "OmDetTurboPreTrainedModel", - ] - ) _import_structure["models.oneformer"].extend( [ "OneFormerForUniversalSegmentation", @@ -3065,6 +2915,7 @@ "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", + "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] @@ -3096,14 +2947,6 @@ "Phi3PreTrainedModel", ] ) - _import_structure["models.phimoe"].extend( - [ - "PhimoeForCausalLM", - "PhimoeForSequenceClassification", - "PhimoeModel", - "PhimoePreTrainedModel", - ] - ) _import_structure["models.pix2struct"].extend( [ "Pix2StructForConditionalGeneration", @@ -3112,7 +2955,6 @@ "Pix2StructVisionModel", ] ) - _import_structure["models.pixtral"].extend(["PixtralPreTrainedModel", "PixtralVisionModel"]) _import_structure["models.plbart"].extend( [ "PLBartForCausalLM", @@ -3163,7 +3005,6 @@ _import_structure["models.qwen2"].extend( [ "Qwen2ForCausalLM", - "Qwen2ForQuestionAnswering", "Qwen2ForSequenceClassification", "Qwen2ForTokenClassification", "Qwen2Model", @@ -3180,20 +3021,12 @@ _import_structure["models.qwen2_moe"].extend( [ "Qwen2MoeForCausalLM", - "Qwen2MoeForQuestionAnswering", "Qwen2MoeForSequenceClassification", "Qwen2MoeForTokenClassification", "Qwen2MoeModel", "Qwen2MoePreTrainedModel", ] ) - _import_structure["models.qwen2_vl"].extend( - [ - "Qwen2VLForConditionalGeneration", - "Qwen2VLModel", - "Qwen2VLPreTrainedModel", - ] - ) _import_structure["models.rag"].extend( [ "RagModel", @@ -3211,9 +3044,11 @@ ) _import_structure["models.reformer"].extend( [ + "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", + "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", @@ -3234,6 +3069,7 @@ "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", + "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", @@ -3280,6 +3116,7 @@ "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", + "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", @@ -3293,6 +3130,7 @@ "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", + "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", @@ -3349,6 +3187,7 @@ "SegformerDecodeHead", "SegformerForImageClassification", "SegformerForSemanticSegmentation", + "SegformerLayer", "SegformerModel", "SegformerPreTrainedModel", ] @@ -3407,6 +3246,7 @@ [ "SplinterForPreTraining", "SplinterForQuestionAnswering", + "SplinterLayer", "SplinterModel", "SplinterPreTrainedModel", ] @@ -3419,6 +3259,7 @@ "SqueezeBertForSequenceClassification", "SqueezeBertForTokenClassification", "SqueezeBertModel", + "SqueezeBertModule", "SqueezeBertPreTrainedModel", ] ) @@ -3617,6 +3458,7 @@ "ViltForMaskedLM", "ViltForQuestionAnswering", "ViltForTokenClassification", + "ViltLayer", "ViltModel", "ViltPreTrainedModel", ] @@ -3636,6 +3478,7 @@ "VisualBertForQuestionAnswering", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", + "VisualBertLayer", "VisualBertModel", "VisualBertPreTrainedModel", ] @@ -3651,6 +3494,7 @@ _import_structure["models.vit_mae"].extend( [ "ViTMAEForPreTraining", + "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] @@ -3830,18 +3674,11 @@ "YosoForQuestionAnswering", "YosoForSequenceClassification", "YosoForTokenClassification", + "YosoLayer", "YosoModel", "YosoPreTrainedModel", ] ) - _import_structure["models.zamba"].extend( - [ - "ZambaForCausalLM", - "ZambaForSequenceClassification", - "ZambaModel", - "ZambaPreTrainedModel", - ] - ) _import_structure["models.zoedepth"].extend( [ "ZoeDepthForDepthEstimation", @@ -3984,6 +3821,7 @@ ) _import_structure["models.bert"].extend( [ + "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", @@ -4049,6 +3887,7 @@ "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", + "TFConvBertLayer", "TFConvBertModel", "TFConvBertPreTrainedModel", ] @@ -4279,6 +4118,7 @@ "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerPreTrainedModel", + "TFLongformerSelfAttention", ] ) _import_structure["models.lxmert"].extend( @@ -4379,6 +4219,7 @@ "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", + "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] @@ -4424,6 +4265,7 @@ "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", + "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] @@ -4950,8 +4792,7 @@ from .agents import ( Agent, CodeAgent, - HfApiEngine, - ManagedAgent, + HfEngine, PipelineTool, ReactAgent, ReactCodeAgent, @@ -4959,11 +4800,9 @@ Tool, Toolbox, ToolCollection, - TransformersEngine, launch_gradio_demo, load_tool, stream_to_gradio, - tool, ) from .configuration_utils import PretrainedConfig @@ -5007,7 +4846,7 @@ from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin # Generation - from .generation import CompileConfig, GenerationConfig, TextIteratorStreamer, TextStreamer, WatermarkingConfig + from .generation import GenerationConfig, TextIteratorStreamer, TextStreamer, WatermarkingConfig from .hf_argparser import HfArgumentParser # Integrations @@ -5050,11 +4889,6 @@ AltCLIPTextConfig, AltCLIPVisionConfig, ) - from .models.aria import ( - AriaConfig, - AriaProcessor, - AriaTextConfig, - ) from .models.audio_spectrogram_transformer import ( ASTConfig, ASTFeatureExtractor, @@ -5299,6 +5133,7 @@ from .models.detr import DetrConfig from .models.dinat import DinatConfig from .models.dinov2 import Dinov2Config + from .models.dinov2_with_registers import Dinov2WithRegistersConfig from .models.distilbert import ( DistilBertConfig, DistilBertTokenizer, @@ -5363,7 +5198,6 @@ GitProcessor, GitVisionConfig, ) - from .models.glm import GlmConfig from .models.glpn import GLPNConfig from .models.gpt2 import ( GPT2Config, @@ -5378,8 +5212,6 @@ GPTNeoXJapaneseConfig, ) from .models.gptj import GPTJConfig - from .models.granite import GraniteConfig - from .models.granitemoe import GraniteMoeConfig from .models.grounding_dino import ( GroundingDinoConfig, GroundingDinoProcessor, @@ -5397,8 +5229,6 @@ IdeficsConfig, ) from .models.idefics2 import Idefics2Config - from .models.idefics3 import Idefics3Config - from .models.ijepa import IJepaConfig from .models.imagegpt import ImageGPTConfig from .models.informer import InformerConfig from .models.instructblip import ( @@ -5454,10 +5284,6 @@ LlavaNextVideoConfig, LlavaNextVideoProcessor, ) - from .models.llava_onevision import ( - LlavaOnevisionConfig, - LlavaOnevisionProcessor, - ) from .models.longformer import ( LongformerConfig, LongformerTokenizer, @@ -5497,15 +5323,8 @@ MgpstrProcessor, MgpstrTokenizer, ) - from .models.mimi import ( - MimiConfig, - ) from .models.mistral import MistralConfig from .models.mixtral import MixtralConfig - from .models.mllama import ( - MllamaConfig, - MllamaProcessor, - ) from .models.mobilebert import ( MobileBertConfig, MobileBertTokenizer, @@ -5522,10 +5341,6 @@ from .models.mobilevitv2 import ( MobileViTV2Config, ) - from .models.moshi import ( - MoshiConfig, - MoshiDepthConfig, - ) from .models.mpnet import ( MPNetConfig, MPNetTokenizer, @@ -5542,7 +5357,6 @@ MusicgenMelodyDecoderConfig, ) from .models.mvp import MvpConfig, MvpTokenizer - from .models.myt5 import MyT5Tokenizer from .models.nemotron import NemotronConfig from .models.nllb_moe import NllbMoeConfig from .models.nougat import NougatProcessor @@ -5550,12 +5364,6 @@ NystromformerConfig, ) from .models.olmo import OlmoConfig - from .models.olmo2 import Olmo2Config - from .models.olmoe import OlmoeConfig - from .models.omdet_turbo import ( - OmDetTurboConfig, - OmDetTurboProcessor, - ) from .models.oneformer import ( OneFormerConfig, OneFormerProcessor, @@ -5600,7 +5408,6 @@ ) from .models.phi import PhiConfig from .models.phi3 import Phi3Config - from .models.phimoe import PhimoeConfig from .models.phobert import PhobertTokenizer from .models.pix2struct import ( Pix2StructConfig, @@ -5608,10 +5415,6 @@ Pix2StructTextConfig, Pix2StructVisionConfig, ) - from .models.pixtral import ( - PixtralProcessor, - PixtralVisionConfig, - ) from .models.plbart import PLBartConfig from .models.poolformer import ( PoolFormerConfig, @@ -5632,10 +5435,6 @@ Qwen2AudioProcessor, ) from .models.qwen2_moe import Qwen2MoeConfig - from .models.qwen2_vl import ( - Qwen2VLConfig, - Qwen2VLProcessor, - ) from .models.rag import RagConfig, RagRetriever, RagTokenizer from .models.recurrent_gemma import RecurrentGemmaConfig from .models.reformer import ReformerConfig @@ -5825,7 +5624,6 @@ from .models.xmod import XmodConfig from .models.yolos import YolosConfig from .models.yoso import YosoConfig - from .models.zamba import ZambaConfig from .models.zoedepth import ZoeDepthConfig # Pipelines @@ -5840,7 +5638,6 @@ ImageClassificationPipeline, ImageFeatureExtractionPipeline, ImageSegmentationPipeline, - ImageTextToTextPipeline, ImageToImagePipeline, ImageToTextPipeline, JsonPipelineDataFormat, @@ -5919,6 +5716,7 @@ is_av_available, is_bitsandbytes_available, is_datasets_available, + is_decord_available, is_faiss_available, is_flax_available, is_keras_nlp_available, @@ -5953,9 +5751,7 @@ from .utils.quantization_config import ( AqlmConfig, AwqConfig, - BitNetConfig, BitsAndBytesConfig, - CompressedTensorsConfig, EetqConfig, FbgemmFp8Config, GPTQConfig, @@ -5988,8 +5784,7 @@ from .models.llama import LlamaTokenizer from .models.m2m_100 import M2M100Tokenizer from .models.marian import MarianTokenizer - from .models.mbart import MBartTokenizer - from .models.mbart50 import MBart50Tokenizer + from .models.mbart import MBart50Tokenizer, MBartTokenizer from .models.mluke import MLukeTokenizer from .models.mt5 import MT5Tokenizer from .models.nllb import NllbTokenizer @@ -6118,7 +5913,6 @@ from .image_processing_base import ImageProcessingMixin from .image_processing_utils import BaseImageProcessor from .image_utils import ImageFeatureExtractionMixin - from .models.aria import AriaImageProcessor from .models.beit import BeitFeatureExtractor, BeitImageProcessor from .models.bit import BitImageProcessor from .models.blip import BlipImageProcessor @@ -6134,7 +5928,10 @@ ConditionalDetrImageProcessor, ) from .models.convnext import ConvNextFeatureExtractor, ConvNextImageProcessor - from .models.deformable_detr import DeformableDetrFeatureExtractor, DeformableDetrImageProcessor + from .models.deformable_detr import ( + DeformableDetrFeatureExtractor, + DeformableDetrImageProcessor, + ) from .models.deit import DeiTFeatureExtractor, DeiTImageProcessor from .models.deprecated.deta import DetaImageProcessor from .models.deprecated.efficientformer import EfficientFormerImageProcessor @@ -6154,7 +5951,6 @@ from .models.grounding_dino import GroundingDinoImageProcessor from .models.idefics import IdeficsImageProcessor from .models.idefics2 import Idefics2ImageProcessor - from .models.idefics3 import Idefics3ImageProcessor from .models.imagegpt import ImageGPTFeatureExtractor, ImageGPTImageProcessor from .models.instructblipvideo import InstructBlipVideoImageProcessor from .models.layoutlmv2 import ( @@ -6168,13 +5964,11 @@ from .models.levit import LevitFeatureExtractor, LevitImageProcessor from .models.llava_next import LlavaNextImageProcessor from .models.llava_next_video import LlavaNextVideoImageProcessor - from .models.llava_onevision import LlavaOnevisionImageProcessor, LlavaOnevisionVideoProcessor from .models.mask2former import Mask2FormerImageProcessor from .models.maskformer import ( MaskFormerFeatureExtractor, MaskFormerImageProcessor, ) - from .models.mllama import MllamaImageProcessor from .models.mobilenet_v1 import ( MobileNetV1FeatureExtractor, MobileNetV1ImageProcessor, @@ -6190,13 +5984,11 @@ from .models.owlvit import OwlViTFeatureExtractor, OwlViTImageProcessor from .models.perceiver import PerceiverFeatureExtractor, PerceiverImageProcessor from .models.pix2struct import Pix2StructImageProcessor - from .models.pixtral import PixtralImageProcessor from .models.poolformer import ( PoolFormerFeatureExtractor, PoolFormerImageProcessor, ) from .models.pvt import PvtImageProcessor - from .models.qwen2_vl import Qwen2VLImageProcessor from .models.rt_detr import RTDetrImageProcessor from .models.sam import SamImageProcessor from .models.segformer import SegformerFeatureExtractor, SegformerImageProcessor @@ -6221,10 +6013,6 @@ from .utils.dummy_torchvision_objects import * else: from .image_processing_utils_fast import BaseImageProcessorFast - from .models.deformable_detr import DeformableDetrImageProcessorFast - from .models.detr import DetrImageProcessorFast - from .models.pixtral import PixtralImageProcessorFast - from .models.rt_detr import RTDetrImageProcessorFast from .models.vit import ViTImageProcessorFast # Modeling @@ -6246,7 +6034,6 @@ HybridCache, MambaCache, OffloadedCache, - OffloadedStaticCache, QuantizedCache, QuantizedCacheConfig, QuantoQuantizedCache, @@ -6267,8 +6054,6 @@ ) from .generation import ( AlternatingCodebooksLogitsProcessor, - BayesianDetectorConfig, - BayesianDetectorModel, BeamScorer, BeamSearchScorer, ClassifierFreeGuidanceLogitsProcessor, @@ -6284,6 +6069,7 @@ ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, GenerationMixin, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, @@ -6307,9 +6093,6 @@ StopStringCriteria, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, - SynthIDTextWatermarkDetector, - SynthIDTextWatermarkingConfig, - SynthIDTextWatermarkLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, @@ -6319,10 +6102,6 @@ WatermarkLogitsProcessor, WhisperTimeStampLogitsProcessor, ) - from .integrations.executorch import ( - TorchExportableModuleWithStaticCache, - convert_and_export_with_cache, - ) from .modeling_rope_utils import ROPE_INIT_FUNCTIONS from .modeling_utils import PreTrainedModel from .models.albert import ( @@ -6348,13 +6127,6 @@ AltCLIPTextModel, AltCLIPVisionModel, ) - from .models.aria import ( - AriaForConditionalGeneration, - AriaPreTrainedModel, - AriaTextForCausalLM, - AriaTextModel, - AriaTextPreTrainedModel, - ) from .models.audio_spectrogram_transformer import ( ASTForAudioClassification, ASTModel, @@ -6373,7 +6145,6 @@ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, - MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_KEYPOINT_DETECTION_MAPPING, @@ -6415,7 +6186,6 @@ AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, - AutoModelForImageTextToText, AutoModelForImageToImage, AutoModelForInstanceSegmentation, AutoModelForKeypointDetection, @@ -6483,6 +6253,7 @@ BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, + BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, @@ -6502,6 +6273,7 @@ BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, + BigBirdLayer, BigBirdModel, BigBirdPreTrainedModel, load_tf_weights_in_big_bird, @@ -6550,13 +6322,10 @@ ) from .models.blip_2 import ( Blip2ForConditionalGeneration, - Blip2ForImageTextRetrieval, Blip2Model, Blip2PreTrainedModel, Blip2QFormerModel, - Blip2TextModelWithProjection, Blip2VisionModel, - Blip2VisionModelWithProjection, ) from .models.bloom import ( BloomForCausalLM, @@ -6596,6 +6365,7 @@ CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, + CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, @@ -6668,6 +6438,7 @@ ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, + ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, @@ -6852,6 +6623,7 @@ QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, + QDQBertLayer, QDQBertLMHeadModel, QDQBertModel, QDQBertPreTrainedModel, @@ -6933,6 +6705,12 @@ Dinov2Model, Dinov2PreTrainedModel, ) + from .models.dinov2_with_registers import ( + Dinov2WithRegistersBackbone, + Dinov2WithRegistersForImageClassification, + Dinov2WithRegistersModel, + Dinov2WithRegistersPreTrainedModel, + ) from .models.distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, @@ -7050,6 +6828,7 @@ FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, + FNetLayer, FNetModel, FNetPreTrainedModel, ) @@ -7101,13 +6880,6 @@ GitPreTrainedModel, GitVisionModel, ) - from .models.glm import ( - GlmForCausalLM, - GlmForSequenceClassification, - GlmForTokenClassification, - GlmModel, - GlmPreTrainedModel, - ) from .models.glpn import ( GLPNForDepthEstimation, GLPNModel, @@ -7144,11 +6916,13 @@ GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, + GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) from .models.gpt_neox_japanese import ( GPTNeoXJapaneseForCausalLM, + GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) @@ -7159,16 +6933,6 @@ GPTJModel, GPTJPreTrainedModel, ) - from .models.granite import ( - GraniteForCausalLM, - GraniteModel, - GranitePreTrainedModel, - ) - from .models.granitemoe import ( - GraniteMoeForCausalLM, - GraniteMoeModel, - GraniteMoePreTrainedModel, - ) from .models.grounding_dino import ( GroundingDinoForObjectDetection, GroundingDinoModel, @@ -7214,19 +6978,6 @@ Idefics2PreTrainedModel, Idefics2Processor, ) - from .models.idefics3 import ( - Idefics3ForConditionalGeneration, - Idefics3Model, - Idefics3PreTrainedModel, - Idefics3Processor, - Idefics3VisionConfig, - Idefics3VisionTransformer, - ) - from .models.ijepa import ( - IJepaForImageClassification, - IJepaModel, - IJepaPreTrainedModel, - ) from .models.imagegpt import ( ImageGPTForCausalImageModeling, ImageGPTForImageClassification, @@ -7330,10 +7081,6 @@ LlavaNextVideoForConditionalGeneration, LlavaNextVideoPreTrainedModel, ) - from .models.llava_onevision import ( - LlavaOnevisionForConditionalGeneration, - LlavaOnevisionPreTrainedModel, - ) from .models.longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, @@ -7342,6 +7089,7 @@ LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, + LongformerSelfAttention, ) from .models.longt5 import ( LongT5EncoderModel, @@ -7368,6 +7116,7 @@ LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, + LxmertXLayer, ) from .models.m2m_100 import ( M2M100ForConditionalGeneration, @@ -7384,7 +7133,7 @@ Mamba2Model, Mamba2PreTrainedModel, ) - from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel, MarianPreTrainedModel + from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel from .models.markuplm import ( MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, @@ -7428,13 +7177,8 @@ MgpstrModel, MgpstrPreTrainedModel, ) - from .models.mimi import ( - MimiModel, - MimiPreTrainedModel, - ) from .models.mistral import ( MistralForCausalLM, - MistralForQuestionAnswering, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, @@ -7442,20 +7186,11 @@ ) from .models.mixtral import ( MixtralForCausalLM, - MixtralForQuestionAnswering, MixtralForSequenceClassification, MixtralForTokenClassification, MixtralModel, MixtralPreTrainedModel, ) - from .models.mllama import ( - MllamaForCausalLM, - MllamaForConditionalGeneration, - MllamaPreTrainedModel, - MllamaProcessor, - MllamaTextModel, - MllamaVisionModel, - ) from .models.mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, @@ -7464,6 +7199,7 @@ MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, + MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, @@ -7493,18 +7229,13 @@ MobileViTV2Model, MobileViTV2PreTrainedModel, ) - from .models.moshi import ( - MoshiForCausalLM, - MoshiForConditionalGeneration, - MoshiModel, - MoshiPreTrainedModel, - ) from .models.mpnet import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, + MPNetLayer, MPNetModel, MPNetPreTrainedModel, ) @@ -7576,6 +7307,7 @@ NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, + NystromformerLayer, NystromformerModel, NystromformerPreTrainedModel, ) @@ -7584,20 +7316,6 @@ OlmoModel, OlmoPreTrainedModel, ) - from .models.olmo2 import ( - Olmo2ForCausalLM, - Olmo2Model, - Olmo2PreTrainedModel, - ) - from .models.olmoe import ( - OlmoeForCausalLM, - OlmoeModel, - OlmoePreTrainedModel, - ) - from .models.omdet_turbo import ( - OmDetTurboForObjectDetection, - OmDetTurboPreTrainedModel, - ) from .models.oneformer import ( OneFormerForUniversalSegmentation, OneFormerModel, @@ -7672,6 +7390,7 @@ PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, + PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) @@ -7696,22 +7415,12 @@ Phi3Model, Phi3PreTrainedModel, ) - from .models.phimoe import ( - PhimoeForCausalLM, - PhimoeForSequenceClassification, - PhimoeModel, - PhimoePreTrainedModel, - ) from .models.pix2struct import ( Pix2StructForConditionalGeneration, Pix2StructPreTrainedModel, Pix2StructTextModel, Pix2StructVisionModel, ) - from .models.pixtral import ( - PixtralPreTrainedModel, - PixtralVisionModel, - ) from .models.plbart import ( PLBartForCausalLM, PLBartForConditionalGeneration, @@ -7749,7 +7458,6 @@ ) from .models.qwen2 import ( Qwen2ForCausalLM, - Qwen2ForQuestionAnswering, Qwen2ForSequenceClassification, Qwen2ForTokenClassification, Qwen2Model, @@ -7762,17 +7470,11 @@ ) from .models.qwen2_moe import ( Qwen2MoeForCausalLM, - Qwen2MoeForQuestionAnswering, Qwen2MoeForSequenceClassification, Qwen2MoeForTokenClassification, Qwen2MoeModel, Qwen2MoePreTrainedModel, ) - from .models.qwen2_vl import ( - Qwen2VLForConditionalGeneration, - Qwen2VLModel, - Qwen2VLPreTrainedModel, - ) from .models.rag import ( RagModel, RagPreTrainedModel, @@ -7785,9 +7487,11 @@ RecurrentGemmaPreTrainedModel, ) from .models.reformer import ( + ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, + ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, @@ -7804,6 +7508,7 @@ RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, + RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, @@ -7842,6 +7547,7 @@ RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, + RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, @@ -7853,6 +7559,7 @@ RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, + RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, @@ -7897,6 +7604,7 @@ SegformerDecodeHead, SegformerForImageClassification, SegformerForSemanticSegmentation, + SegformerLayer, SegformerModel, SegformerPreTrainedModel, ) @@ -7941,6 +7649,7 @@ from .models.splinter import ( SplinterForPreTraining, SplinterForQuestionAnswering, + SplinterLayer, SplinterModel, SplinterPreTrainedModel, ) @@ -7951,6 +7660,7 @@ SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, + SqueezeBertModule, SqueezeBertPreTrainedModel, ) from .models.stablelm import ( @@ -8099,6 +7809,7 @@ ViltForMaskedLM, ViltForQuestionAnswering, ViltForTokenClassification, + ViltLayer, ViltModel, ViltPreTrainedModel, ) @@ -8114,6 +7825,7 @@ VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, + VisualBertLayer, VisualBertModel, VisualBertPreTrainedModel, ) @@ -8125,6 +7837,7 @@ ) from .models.vit_mae import ( ViTMAEForPreTraining, + ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) @@ -8266,15 +7979,10 @@ YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, + YosoLayer, YosoModel, YosoPreTrainedModel, ) - from .models.zamba import ( - ZambaForCausalLM, - ZambaForSequenceClassification, - ZambaModel, - ZambaPreTrainedModel, - ) from .models.zoedepth import ( ZoeDepthForDepthEstimation, ZoeDepthPreTrainedModel, @@ -8405,6 +8113,7 @@ TFBartPretrainedModel, ) from .models.bert import ( + TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, @@ -8458,6 +8167,7 @@ TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, + TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) @@ -8642,6 +8352,7 @@ TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, + TFLongformerSelfAttention, ) from .models.lxmert import ( TFLxmertForPreTraining, @@ -8731,6 +8442,7 @@ TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, + TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) @@ -8768,6 +8480,7 @@ TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, + TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) @@ -9214,4 +8927,4 @@ "None of PyTorch, TensorFlow >= 2.0, or Flax have been found. " "Models won't be available and only tokenizers, configuration " "and file/data utilities can be used." - ) + ) \ No newline at end of file diff --git a/src/transformers/commands/add_new_model_like.py b/src/transformers/commands/add_new_model_like.py index 85e1722aae324d..eb1986cfa292e1 100644 --- a/src/transformers/commands/add_new_model_like.py +++ b/src/transformers/commands/add_new_model_like.py @@ -766,7 +766,6 @@ def retrieve_info_for_model(model_type, frameworks: Optional[List[str]] = None): image_processor_class = image_processor_classes[0] # we take the slow image processor class. else: image_processor_class = image_processor_classes - feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None) processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None) @@ -1715,4 +1714,4 @@ def get_user_input(): ) frameworks = list(set(frameworks.split(" "))) - return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint) + return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint) \ No newline at end of file diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 116b71c81ad9df..59ec31a73c0e85 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -16,7 +16,6 @@ albert, align, altclip, - aria, audio_spectrogram_transformer, auto, autoformer, @@ -74,6 +73,7 @@ dialogpt, dinat, dinov2, + dinov2_with_registers, distilbert, dit, donut, @@ -98,7 +98,6 @@ gemma, gemma2, git, - glm, glpn, gpt2, gpt_bigcode, @@ -107,8 +106,6 @@ gpt_neox_japanese, gpt_sw3, gptj, - granite, - granitemoe, grounding_dino, groupvit, herbert, @@ -117,8 +114,6 @@ ibert, idefics, idefics2, - idefics3, - ijepa, imagegpt, informer, instructblip, @@ -137,7 +132,6 @@ llava, llava_next, llava_next_video, - llava_onevision, longformer, longt5, luke, @@ -154,17 +148,14 @@ megatron_bert, megatron_gpt2, mgp_str, - mimi, mistral, mixtral, - mllama, mluke, mobilebert, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, - moshi, mpnet, mpt, mra, @@ -172,16 +163,12 @@ musicgen, musicgen_melody, mvp, - myt5, nemotron, nllb, nllb_moe, nougat, nystromformer, olmo, - olmo2, - olmoe, - omdet_turbo, oneformer, openai, opt, @@ -196,10 +183,8 @@ persimmon, phi, phi3, - phimoe, phobert, pix2struct, - pixtral, plbart, poolformer, pop2piano, @@ -209,7 +194,6 @@ qwen2, qwen2_audio, qwen2_moe, - qwen2_vl, rag, recurrent_gemma, reformer, @@ -287,6 +271,5 @@ xmod, yolos, yoso, - zamba, zoedepth, -) +) \ No newline at end of file diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index cc3a7d5baaeb49..493c02dc32fb08 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -35,8 +35,6 @@ ("albert", "AlbertConfig"), ("align", "AlignConfig"), ("altclip", "AltCLIPConfig"), - ("aria", "AriaConfig"), - ("aria_text", "AriaTextConfig"), ("audio-spectrogram-transformer", "ASTConfig"), ("autoformer", "AutoformerConfig"), ("bark", "BarkConfig"), @@ -62,7 +60,6 @@ ("chinese_clip_vision_model", "ChineseCLIPVisionConfig"), ("clap", "ClapConfig"), ("clip", "CLIPConfig"), - ("clip_text_model", "CLIPTextConfig"), ("clip_vision_model", "CLIPVisionConfig"), ("clipseg", "CLIPSegConfig"), ("clvp", "ClvpConfig"), @@ -91,6 +88,7 @@ ("detr", "DetrConfig"), ("dinat", "DinatConfig"), ("dinov2", "Dinov2Config"), + ("dinov2_with_registers", "Dinov2WithRegistersConfig"), ("distilbert", "DistilBertConfig"), ("donut-swin", "DonutSwinConfig"), ("dpr", "DPRConfig"), @@ -116,7 +114,6 @@ ("gemma", "GemmaConfig"), ("gemma2", "Gemma2Config"), ("git", "GitConfig"), - ("glm", "GlmConfig"), ("glpn", "GLPNConfig"), ("gpt-sw3", "GPT2Config"), ("gpt2", "GPT2Config"), @@ -126,8 +123,6 @@ ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"), ("gptj", "GPTJConfig"), ("gptsan-japanese", "GPTSanJapaneseConfig"), - ("granite", "GraniteConfig"), - ("granitemoe", "GraniteMoeConfig"), ("graphormer", "GraphormerConfig"), ("grounding-dino", "GroundingDinoConfig"), ("groupvit", "GroupViTConfig"), @@ -136,9 +131,6 @@ ("ibert", "IBertConfig"), ("idefics", "IdeficsConfig"), ("idefics2", "Idefics2Config"), - ("idefics3", "Idefics3Config"), - ("idefics3_vision", "Idefics3VisionConfig"), - ("ijepa", "IJepaConfig"), ("imagegpt", "ImageGPTConfig"), ("informer", "InformerConfig"), ("instructblip", "InstructBlipConfig"), @@ -157,7 +149,6 @@ ("llava", "LlavaConfig"), ("llava_next", "LlavaNextConfig"), ("llava_next_video", "LlavaNextVideoConfig"), - ("llava_onevision", "LlavaOnevisionConfig"), ("longformer", "LongformerConfig"), ("longt5", "LongT5Config"), ("luke", "LukeConfig"), @@ -175,16 +166,13 @@ ("mega", "MegaConfig"), ("megatron-bert", "MegatronBertConfig"), ("mgp-str", "MgpstrConfig"), - ("mimi", "MimiConfig"), ("mistral", "MistralConfig"), ("mixtral", "MixtralConfig"), - ("mllama", "MllamaConfig"), ("mobilebert", "MobileBertConfig"), ("mobilenet_v1", "MobileNetV1Config"), ("mobilenet_v2", "MobileNetV2Config"), ("mobilevit", "MobileViTConfig"), ("mobilevitv2", "MobileViTV2Config"), - ("moshi", "MoshiConfig"), ("mpnet", "MPNetConfig"), ("mpt", "MptConfig"), ("mra", "MraConfig"), @@ -199,9 +187,6 @@ ("nougat", "VisionEncoderDecoderConfig"), ("nystromformer", "NystromformerConfig"), ("olmo", "OlmoConfig"), - ("olmo2", "Olmo2Config"), - ("olmoe", "OlmoeConfig"), - ("omdet-turbo", "OmDetTurboConfig"), ("oneformer", "OneFormerConfig"), ("open-llama", "OpenLlamaConfig"), ("openai-gpt", "OpenAIGPTConfig"), @@ -217,9 +202,7 @@ ("persimmon", "PersimmonConfig"), ("phi", "PhiConfig"), ("phi3", "Phi3Config"), - ("phimoe", "PhimoeConfig"), ("pix2struct", "Pix2StructConfig"), - ("pixtral", "PixtralVisionConfig"), ("plbart", "PLBartConfig"), ("poolformer", "PoolFormerConfig"), ("pop2piano", "Pop2PianoConfig"), @@ -231,7 +214,6 @@ ("qwen2_audio", "Qwen2AudioConfig"), ("qwen2_audio_encoder", "Qwen2AudioEncoderConfig"), ("qwen2_moe", "Qwen2MoeConfig"), - ("qwen2_vl", "Qwen2VLConfig"), ("rag", "RagConfig"), ("realm", "RealmConfig"), ("recurrent_gemma", "RecurrentGemmaConfig"), @@ -318,7 +300,6 @@ ("xmod", "XmodConfig"), ("yolos", "YolosConfig"), ("yoso", "YosoConfig"), - ("zamba", "ZambaConfig"), ("zoedepth", "ZoeDepthConfig"), ] ) @@ -330,8 +311,6 @@ ("albert", "ALBERT"), ("align", "ALIGN"), ("altclip", "AltCLIP"), - ("aria", "Aria"), - ("aria_text", "AriaText"), ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"), ("autoformer", "Autoformer"), ("bark", "Bark"), @@ -363,7 +342,6 @@ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"), ("clap", "CLAP"), ("clip", "CLIP"), - ("clip_text_model", "CLIPTextModel"), ("clip_vision_model", "CLIPVisionModel"), ("clipseg", "CLIPSeg"), ("clvp", "CLVP"), @@ -396,6 +374,7 @@ ("dialogpt", "DialoGPT"), ("dinat", "DiNAT"), ("dinov2", "DINOv2"), + ("dinov2_with_registers", "Dinov2WithRegisters"), ("distilbert", "DistilBERT"), ("dit", "DiT"), ("donut-swin", "DonutSwin"), @@ -424,7 +403,6 @@ ("gemma", "Gemma"), ("gemma2", "Gemma2"), ("git", "GIT"), - ("glm", "GLM"), ("glpn", "GLPN"), ("gpt-sw3", "GPT-Sw3"), ("gpt2", "OpenAI GPT-2"), @@ -434,8 +412,6 @@ ("gpt_neox_japanese", "GPT NeoX Japanese"), ("gptj", "GPT-J"), ("gptsan-japanese", "GPTSAN-japanese"), - ("granite", "Granite"), - ("granitemoe", "GraniteMoeMoe"), ("graphormer", "Graphormer"), ("grounding-dino", "Grounding DINO"), ("groupvit", "GroupViT"), @@ -445,9 +421,6 @@ ("ibert", "I-BERT"), ("idefics", "IDEFICS"), ("idefics2", "Idefics2"), - ("idefics3", "Idefics3"), - ("idefics3_vision", "Idefics3VisionTransformer"), - ("ijepa", "I-JEPA"), ("imagegpt", "ImageGPT"), ("informer", "Informer"), ("instructblip", "InstructBLIP"), @@ -469,7 +442,6 @@ ("llava", "LLaVa"), ("llava_next", "LLaVA-NeXT"), ("llava_next_video", "LLaVa-NeXT-Video"), - ("llava_onevision", "LLaVA-Onevision"), ("longformer", "Longformer"), ("longt5", "LongT5"), ("luke", "LUKE"), @@ -491,10 +463,8 @@ ("megatron-bert", "Megatron-BERT"), ("megatron_gpt2", "Megatron-GPT2"), ("mgp-str", "MGP-STR"), - ("mimi", "Mimi"), ("mistral", "Mistral"), ("mixtral", "Mixtral"), - ("mllama", "Mllama"), ("mluke", "mLUKE"), ("mms", "MMS"), ("mobilebert", "MobileBERT"), @@ -502,7 +472,6 @@ ("mobilenet_v2", "MobileNetV2"), ("mobilevit", "MobileViT"), ("mobilevitv2", "MobileViTV2"), - ("moshi", "Moshi"), ("mpnet", "MPNet"), ("mpt", "MPT"), ("mra", "MRA"), @@ -510,7 +479,6 @@ ("musicgen", "MusicGen"), ("musicgen_melody", "MusicGen Melody"), ("mvp", "MVP"), - ("myt5", "myt5"), ("nat", "NAT"), ("nemotron", "Nemotron"), ("nezha", "Nezha"), @@ -519,9 +487,6 @@ ("nougat", "Nougat"), ("nystromformer", "Nyströmformer"), ("olmo", "OLMo"), - ("olmo2", "OLMo2"), - ("olmoe", "OLMoE"), - ("omdet-turbo", "OmDet-Turbo"), ("oneformer", "OneFormer"), ("open-llama", "OpenLlama"), ("openai-gpt", "OpenAI GPT"), @@ -537,10 +502,8 @@ ("persimmon", "Persimmon"), ("phi", "Phi"), ("phi3", "Phi3"), - ("phimoe", "Phimoe"), ("phobert", "PhoBERT"), ("pix2struct", "Pix2Struct"), - ("pixtral", "Pixtral"), ("plbart", "PLBart"), ("poolformer", "PoolFormer"), ("pop2piano", "Pop2Piano"), @@ -552,7 +515,6 @@ ("qwen2_audio", "Qwen2Audio"), ("qwen2_audio_encoder", "Qwen2AudioEncoder"), ("qwen2_moe", "Qwen2MoE"), - ("qwen2_vl", "Qwen2VL"), ("rag", "RAG"), ("realm", "REALM"), ("recurrent_gemma", "RecurrentGemma"), @@ -646,7 +608,6 @@ ("xmod", "X-MOD"), ("yolos", "YOLOS"), ("yoso", "YOSO"), - ("zamba", "Zamba"), ("zoedepth", "ZoeDepth"), ] ) @@ -692,9 +653,6 @@ ("xclip", "x_clip"), ("clip_vision_model", "clip"), ("qwen2_audio_encoder", "qwen2_audio"), - ("clip_text_model", "clip"), - ("aria_text", "aria"), - ("idefics3_vision", "idefics3"), ("siglip_vision_model", "siglip"), ("chinese_clip_vision_model", "chinese_clip"), ("rt_detr_resnet", "rt_detr"), @@ -1079,4 +1037,4 @@ def register(model_type, config, exist_ok=False): f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they " "match!" ) - CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok) + CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok) \ No newline at end of file diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index e8e4814e6a0f6a..1ef052f7bf3f8e 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -35,8 +35,6 @@ ("albert", "AlbertModel"), ("align", "AlignModel"), ("altclip", "AltCLIPModel"), - ("aria", "AriaForConditionalGeneration"), - ("aria_text", "AriaTextModel"), ("audio-spectrogram-transformer", "ASTModel"), ("autoformer", "AutoformerModel"), ("bark", "BarkModel"), @@ -62,7 +60,6 @@ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"), ("clap", "ClapModel"), ("clip", "CLIPModel"), - ("clip_text_model", "CLIPTextModel"), ("clip_vision_model", "CLIPVisionModel"), ("clipseg", "CLIPSegModel"), ("clvp", "ClvpModelForConditionalGeneration"), @@ -90,6 +87,7 @@ ("detr", "DetrModel"), ("dinat", "DinatModel"), ("dinov2", "Dinov2Model"), + ("dinov2_with_registers", "Dinov2WithRegistersModel"), ("distilbert", "DistilBertModel"), ("donut-swin", "DonutSwinModel"), ("dpr", "DPRQuestionEncoder"), @@ -113,7 +111,6 @@ ("gemma", "GemmaModel"), ("gemma2", "Gemma2Model"), ("git", "GitModel"), - ("glm", "GlmModel"), ("glpn", "GLPNModel"), ("gpt-sw3", "GPT2Model"), ("gpt2", "GPT2Model"), @@ -123,8 +120,6 @@ ("gpt_neox_japanese", "GPTNeoXJapaneseModel"), ("gptj", "GPTJModel"), ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), - ("granite", "GraniteModel"), - ("granitemoe", "GraniteMoeModel"), ("graphormer", "GraphormerModel"), ("grounding-dino", "GroundingDinoModel"), ("groupvit", "GroupViTModel"), @@ -133,9 +128,6 @@ ("ibert", "IBertModel"), ("idefics", "IdeficsModel"), ("idefics2", "Idefics2Model"), - ("idefics3", "Idefics3Model"), - ("idefics3_vision", "Idefics3VisionTransformer"), - ("ijepa", "IJepaModel"), ("imagegpt", "ImageGPTModel"), ("informer", "InformerModel"), ("jamba", "JambaModel"), @@ -166,7 +158,6 @@ ("mega", "MegaModel"), ("megatron-bert", "MegatronBertModel"), ("mgp-str", "MgpstrForSceneTextRecognition"), - ("mimi", "MimiModel"), ("mistral", "MistralModel"), ("mixtral", "MixtralModel"), ("mobilebert", "MobileBertModel"), @@ -174,7 +165,6 @@ ("mobilenet_v2", "MobileNetV2Model"), ("mobilevit", "MobileViTModel"), ("mobilevitv2", "MobileViTV2Model"), - ("moshi", "MoshiModel"), ("mpnet", "MPNetModel"), ("mpt", "MptModel"), ("mra", "MraModel"), @@ -188,9 +178,6 @@ ("nllb-moe", "NllbMoeModel"), ("nystromformer", "NystromformerModel"), ("olmo", "OlmoModel"), - ("olmo2", "Olmo2Model"), - ("olmoe", "OlmoeModel"), - ("omdet-turbo", "OmDetTurboForObjectDetection"), ("oneformer", "OneFormerModel"), ("open-llama", "OpenLlamaModel"), ("openai-gpt", "OpenAIGPTModel"), @@ -205,8 +192,6 @@ ("persimmon", "PersimmonModel"), ("phi", "PhiModel"), ("phi3", "Phi3Model"), - ("phimoe", "PhimoeModel"), - ("pixtral", "PixtralVisionModel"), ("plbart", "PLBartModel"), ("poolformer", "PoolFormerModel"), ("prophetnet", "ProphetNetModel"), @@ -216,7 +201,6 @@ ("qwen2", "Qwen2Model"), ("qwen2_audio_encoder", "Qwen2AudioEncoder"), ("qwen2_moe", "Qwen2MoeModel"), - ("qwen2_vl", "Qwen2VLModel"), ("recurrent_gemma", "RecurrentGemmaModel"), ("reformer", "ReformerModel"), ("regnet", "RegNetModel"), @@ -291,7 +275,6 @@ ("xmod", "XmodModel"), ("yolos", "YolosModel"), ("yoso", "YosoModel"), - ("zamba", "ZambaModel"), ] ) @@ -325,12 +308,10 @@ ("ibert", "IBertForMaskedLM"), ("idefics", "IdeficsForVisionText2Text"), ("idefics2", "Idefics2ForConditionalGeneration"), - ("idefics3", "Idefics3ForConditionalGeneration"), ("layoutlm", "LayoutLMForMaskedLM"), ("llava", "LlavaForConditionalGeneration"), ("llava_next", "LlavaNextForConditionalGeneration"), ("llava_next_video", "LlavaNextVideoForConditionalGeneration"), - ("llava_onevision", "LlavaOnevisionForConditionalGeneration"), ("longformer", "LongformerForMaskedLM"), ("luke", "LukeForMaskedLM"), ("lxmert", "LxmertForPreTraining"), @@ -338,7 +319,6 @@ ("mamba2", "Mamba2ForCausalLM"), ("mega", "MegaForMaskedLM"), ("megatron-bert", "MegatronBertForPreTraining"), - ("mllama", "MllamaForConditionalGeneration"), ("mobilebert", "MobileBertForPreTraining"), ("mpnet", "MPNetForMaskedLM"), ("mpt", "MptForCausalLM"), @@ -467,7 +447,6 @@ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( [ # Model for Causal LM mapping - ("aria_text", "AriaTextForCausalLM"), ("bart", "BartForCausalLM"), ("bert", "BertLMHeadModel"), ("bert-generation", "BertGenerationDecoder"), @@ -493,7 +472,6 @@ ("gemma", "GemmaForCausalLM"), ("gemma2", "Gemma2ForCausalLM"), ("git", "GitForCausalLM"), - ("glm", "GlmForCausalLM"), ("gpt-sw3", "GPT2LMHeadModel"), ("gpt2", "GPT2LMHeadModel"), ("gpt_bigcode", "GPTBigCodeForCausalLM"), @@ -501,8 +479,6 @@ ("gpt_neox", "GPTNeoXForCausalLM"), ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), ("gptj", "GPTJForCausalLM"), - ("granite", "GraniteForCausalLM"), - ("granitemoe", "GraniteMoeForCausalLM"), ("jamba", "JambaForCausalLM"), ("jetmoe", "JetMoeForCausalLM"), ("llama", "LlamaForCausalLM"), @@ -514,16 +490,12 @@ ("megatron-bert", "MegatronBertForCausalLM"), ("mistral", "MistralForCausalLM"), ("mixtral", "MixtralForCausalLM"), - ("mllama", "MllamaForCausalLM"), - ("moshi", "MoshiForCausalLM"), ("mpt", "MptForCausalLM"), ("musicgen", "MusicgenForCausalLM"), ("musicgen_melody", "MusicgenMelodyForCausalLM"), ("mvp", "MvpForCausalLM"), ("nemotron", "NemotronForCausalLM"), ("olmo", "OlmoForCausalLM"), - ("olmo2", "Olmo2ForCausalLM"), - ("olmoe", "OlmoeForCausalLM"), ("open-llama", "OpenLlamaForCausalLM"), ("openai-gpt", "OpenAIGPTLMHeadModel"), ("opt", "OPTForCausalLM"), @@ -531,7 +503,6 @@ ("persimmon", "PersimmonForCausalLM"), ("phi", "PhiForCausalLM"), ("phi3", "Phi3ForCausalLM"), - ("phimoe", "PhimoeForCausalLM"), ("plbart", "PLBartForCausalLM"), ("prophetnet", "ProphetNetForCausalLM"), ("qdqbert", "QDQBertLMHeadModel"), @@ -558,7 +529,6 @@ ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"), ("xlnet", "XLNetLMHeadModel"), ("xmod", "XmodForCausalLM"), - ("zamba", "ZambaForCausalLM"), ] ) @@ -577,16 +547,15 @@ ("detr", "DetrModel"), ("dinat", "DinatModel"), ("dinov2", "Dinov2Model"), + ("dinov2_with_registers", "Dinov2WithRegistersModel"), ("dpt", "DPTModel"), ("efficientformer", "EfficientFormerModel"), ("efficientnet", "EfficientNetModel"), ("focalnet", "FocalNetModel"), ("glpn", "GLPNModel"), ("hiera", "HieraModel"), - ("ijepa", "IJepaModel"), ("imagegpt", "ImageGPTModel"), ("levit", "LevitModel"), - ("mllama", "MllamaVisionModel"), ("mobilenet_v1", "MobileNetV1Model"), ("mobilenet_v2", "MobileNetV2Model"), ("mobilevit", "MobileViTModel"), @@ -651,6 +620,7 @@ ), ("dinat", "DinatForImageClassification"), ("dinov2", "Dinov2ForImageClassification"), + ("dinov2_with_registers", "Dinov2WithRegistersForImageClassification"), ( "efficientformer", ( @@ -661,7 +631,6 @@ ("efficientnet", "EfficientNetForImageClassification"), ("focalnet", "FocalNetForImageClassification"), ("hiera", "HieraForImageClassification"), - ("ijepa", "IJepaForImageClassification"), ("imagegpt", "ImageGPTForImageClassification"), ( "levit", @@ -752,51 +721,20 @@ ("chameleon", "ChameleonForConditionalGeneration"), ("git", "GitForCausalLM"), ("idefics2", "Idefics2ForConditionalGeneration"), - ("idefics3", "Idefics3ForConditionalGeneration"), ("instructblip", "InstructBlipForConditionalGeneration"), ("instructblipvideo", "InstructBlipVideoForConditionalGeneration"), ("kosmos-2", "Kosmos2ForConditionalGeneration"), ("llava", "LlavaForConditionalGeneration"), ("llava_next", "LlavaNextForConditionalGeneration"), ("llava_next_video", "LlavaNextVideoForConditionalGeneration"), - ("llava_onevision", "LlavaOnevisionForConditionalGeneration"), - ("mllama", "MllamaForConditionalGeneration"), ("paligemma", "PaliGemmaForConditionalGeneration"), ("pix2struct", "Pix2StructForConditionalGeneration"), - ("qwen2_vl", "Qwen2VLForConditionalGeneration"), ("video_llava", "VideoLlavaForConditionalGeneration"), ("vipllava", "VipLlavaForConditionalGeneration"), ("vision-encoder-decoder", "VisionEncoderDecoderModel"), ] ) -MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES = OrderedDict( - [ - ("aria", "AriaForConditionalGeneration"), - ("blip", "BlipForConditionalGeneration"), - ("blip-2", "Blip2ForConditionalGeneration"), - ("chameleon", "ChameleonForConditionalGeneration"), - ("fuyu", "FuyuForCausalLM"), - ("git", "GitForCausalLM"), - ("idefics", "IdeficsForVisionText2Text"), - ("idefics2", "Idefics2ForConditionalGeneration"), - ("idefics3", "Idefics3ForConditionalGeneration"), - ("instructblip", "InstructBlipForConditionalGeneration"), - ("kosmos-2", "Kosmos2ForConditionalGeneration"), - ("llava", "LlavaForConditionalGeneration"), - ("llava_next", "LlavaNextForConditionalGeneration"), - ("llava_onevision", "LlavaOnevisionForConditionalGeneration"), - ("mllama", "MllamaForConditionalGeneration"), - ("paligemma", "PaliGemmaForConditionalGeneration"), - ("pix2struct", "Pix2StructForConditionalGeneration"), - ("pixtral", "LlavaForConditionalGeneration"), - ("qwen2_vl", "Qwen2VLForConditionalGeneration"), - ("udop", "UdopForConditionalGeneration"), - ("vipllava", "VipLlavaForConditionalGeneration"), - ("vision-encoder-decoder", "VisionEncoderDecoderModel"), - ] -) - MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( [ # Model for Masked LM mapping @@ -865,7 +803,6 @@ [ # Model for Zero Shot Object Detection mapping ("grounding-dino", "GroundingDinoForObjectDetection"), - ("omdet-turbo", "OmDetTurboForObjectDetection"), ("owlv2", "Owlv2ForObjectDetection"), ("owlvit", "OwlViTForObjectDetection"), ] @@ -953,7 +890,6 @@ ("funnel", "FunnelForSequenceClassification"), ("gemma", "GemmaForSequenceClassification"), ("gemma2", "Gemma2ForSequenceClassification"), - ("glm", "GlmForSequenceClassification"), ("gpt-sw3", "GPT2ForSequenceClassification"), ("gpt2", "GPT2ForSequenceClassification"), ("gpt_bigcode", "GPTBigCodeForSequenceClassification"), @@ -993,7 +929,6 @@ ("persimmon", "PersimmonForSequenceClassification"), ("phi", "PhiForSequenceClassification"), ("phi3", "Phi3ForSequenceClassification"), - ("phimoe", "PhimoeForSequenceClassification"), ("plbart", "PLBartForSequenceClassification"), ("qdqbert", "QDQBertForSequenceClassification"), ("qwen2", "Qwen2ForSequenceClassification"), @@ -1017,7 +952,6 @@ ("xlnet", "XLNetForSequenceClassification"), ("xmod", "XmodForSequenceClassification"), ("yoso", "YosoForSequenceClassification"), - ("zamba", "ZambaForSequenceClassification"), ] ) @@ -1061,8 +995,6 @@ ("mbart", "MBartForQuestionAnswering"), ("mega", "MegaForQuestionAnswering"), ("megatron-bert", "MegatronBertForQuestionAnswering"), - ("mistral", "MistralForQuestionAnswering"), - ("mixtral", "MixtralForQuestionAnswering"), ("mobilebert", "MobileBertForQuestionAnswering"), ("mpnet", "MPNetForQuestionAnswering"), ("mpt", "MptForQuestionAnswering"), @@ -1074,8 +1006,6 @@ ("nystromformer", "NystromformerForQuestionAnswering"), ("opt", "OPTForQuestionAnswering"), ("qdqbert", "QDQBertForQuestionAnswering"), - ("qwen2", "Qwen2ForQuestionAnswering"), - ("qwen2_moe", "Qwen2MoeForQuestionAnswering"), ("reformer", "ReformerForQuestionAnswering"), ("rembert", "RemBertForQuestionAnswering"), ("roberta", "RobertaForQuestionAnswering"), @@ -1144,7 +1074,6 @@ ("funnel", "FunnelForTokenClassification"), ("gemma", "GemmaForTokenClassification"), ("gemma2", "Gemma2ForTokenClassification"), - ("glm", "GlmForTokenClassification"), ("gpt-sw3", "GPT2ForTokenClassification"), ("gpt2", "GPT2ForTokenClassification"), ("gpt_bigcode", "GPTBigCodeForTokenClassification"), @@ -1338,7 +1267,6 @@ ("align", "AlignModel"), ("altclip", "AltCLIPModel"), ("blip", "BlipModel"), - ("blip-2", "Blip2ForImageTextRetrieval"), ("chinese_clip", "ChineseCLIPModel"), ("clip", "CLIPModel"), ("clipseg", "CLIPSegModel"), @@ -1355,6 +1283,7 @@ ("convnextv2", "ConvNextV2Backbone"), ("dinat", "DinatBackbone"), ("dinov2", "Dinov2Backbone"), + ("dinov2_with_registers", "Dinov2WithRegistersBackbone"), ("focalnet", "FocalNetBackbone"), ("hiera", "HieraBackbone"), ("maskformer-swin", "MaskFormerSwinBackbone"), @@ -1388,7 +1317,6 @@ ("albert", "AlbertModel"), ("bert", "BertModel"), ("big_bird", "BigBirdModel"), - ("clip_text_model", "CLIPTextModel"), ("data2vec-text", "Data2VecTextModel"), ("deberta", "DebertaModel"), ("deberta-v2", "DebertaV2Model"), @@ -1397,7 +1325,6 @@ ("flaubert", "FlaubertModel"), ("ibert", "IBertModel"), ("longformer", "LongformerModel"), - ("mllama", "MllamaTextModel"), ("mobilebert", "MobileBertModel"), ("mt5", "MT5EncoderModel"), ("nystromformer", "NystromformerModel"), @@ -1465,9 +1392,6 @@ CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES ) MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) -MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES -) MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES ) @@ -1762,13 +1686,6 @@ class AutoModelForVision2Seq(_BaseAutoModelClass): AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling") -class AutoModelForImageTextToText(_BaseAutoModelClass): - _model_mapping = MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING - - -AutoModelForImageTextToText = auto_class_update(AutoModelForImageTextToText, head_doc="image-text-to-text modeling") - - class AutoModelForAudioClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING @@ -1846,4 +1763,4 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): "`AutoModelForSeq2SeqLM` for encoder-decoder models.", FutureWarning, ) - return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) \ No newline at end of file diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index bae21dacb95b0f..7ea58d19ea894d 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -38,7 +38,6 @@ add_start_docstrings_to_model_forward, logging, replace_return_docstrings, - torch_int, ) from ...utils.backbone_utils import BackboneMixin from .configuration_dinov2 import Dinov2Config @@ -72,48 +71,42 @@ def __init__(self, config: Dinov2Config) -> None: num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ - This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution - images. This method is also adapted to support torch.jit tracing and interpolation at torch.float32 precision. + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher + resolution images. - Adapted from: - - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 + Source: + https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 - - # always interpolate when tracing to ensure the exported model works for dynamic input shapes - if not torch.jit.is_tracing() and num_patches == num_positions and height == width: + if num_patches == num_positions and height == width: return self.position_embeddings - - class_pos_embed = self.position_embeddings[:, :1] + class_pos_embed = self.position_embeddings[:, 0] patch_pos_embed = self.position_embeddings[:, 1:] - dim = embeddings.shape[-1] - - new_height = height // self.patch_size - new_width = width // self.patch_size - - sqrt_num_positions = torch_int(num_positions**0.5) - patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) + height = height // self.config.patch_size + width = width // self.config.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + height, width = height + 0.1, width + 0.1 + patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) target_dtype = patch_pos_embed.dtype patch_pos_embed = nn.functional.interpolate( - patch_pos_embed.to(torch.float32), - size=(new_height, new_width), + patch_pos_embed.to(dtype=torch.float32), + scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))), mode="bicubic", align_corners=False, ).to(dtype=target_dtype) - + if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]: + raise ValueError("Width or height does not match with the interpolated position embeddings") patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) - - return torch.cat((class_pos_embed, patch_pos_embed), dim=1) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape @@ -231,47 +224,6 @@ def forward( return outputs -class Dinov2SdpaSelfAttention(Dinov2SelfAttention): - def __init__(self, config: Dinov2Config) -> None: - super().__init__(config) - self.attention_probs_dropout_prob = config.attention_probs_dropout_prob - - def forward( - self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False - ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: - if output_attentions: - # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. - logger.warning_once( - "Dinov2Model is using Dinov2SdpaSelfAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " - 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' - ) - return super().forward( - hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions - ) - - mixed_query_layer = self.query(hidden_states) - - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - query_layer = self.transpose_for_scores(mixed_query_layer) - - context_layer = torch.nn.functional.scaled_dot_product_attention( - query_layer, - key_layer, - value_layer, - head_mask, - self.attention_probs_dropout_prob if self.training else 0.0, - is_causal=False, - scale=None, - ) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - return context_layer, None - - # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2 class Dinov2SelfOutput(nn.Module): """ @@ -331,13 +283,6 @@ def forward( return outputs -# Copied from transformers.models.vit.modeling_vit.ViTSdpaAttention with ViT->Dinov2 -class Dinov2SdpaAttention(Dinov2Attention): - def __init__(self, config: Dinov2Config) -> None: - super().__init__(config) - self.attention = Dinov2SdpaSelfAttention(config) - - class Dinov2LayerScale(nn.Module): def __init__(self, config) -> None: super().__init__() @@ -347,28 +292,6 @@ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: return hidden_state * self.lambda1 -# Copied from transformers.models.beit.modeling_beit.drop_path -def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: - """ - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, - however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the - layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the - argument. - """ - if drop_prob == 0.0 or not training: - return input - keep_prob = 1 - drop_prob - shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) - random_tensor.floor_() # binarize - output = input.div(keep_prob) * random_tensor - return output - - -# Copied from transformers.models.beit.modeling_beit.BeitDropPath class Dinov2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" @@ -376,8 +299,27 @@ def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob + def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - return drop_path(hidden_states, self.drop_prob, self.training) + return self.drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) @@ -419,12 +361,6 @@ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: return self.weights_out(hidden) -DINOV2_ATTENTION_CLASSES = { - "eager": Dinov2Attention, - "sdpa": Dinov2SdpaAttention, -} - - class Dinov2Layer(nn.Module): """This corresponds to the Block class in the original implementation.""" @@ -432,7 +368,7 @@ def __init__(self, config: Dinov2Config) -> None: super().__init__() self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attention = DINOV2_ATTENTION_CLASSES[config._attn_implementation](config) + self.attention = Dinov2Attention(config) self.layer_scale1 = Dinov2LayerScale(config) self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() @@ -539,7 +475,6 @@ class Dinov2PreTrainedModel(PreTrainedModel): main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["Dinov2SwiGLUFFN"] - _supports_sdpa = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" @@ -912,4 +847,4 @@ def forward( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions if output_attentions else None, - ) + ) \ No newline at end of file diff --git a/src/transformers/models/dinov2_with_registers/__init__.py b/src/transformers/models/dinov2_with_registers/__init__.py new file mode 100644 index 00000000000000..59d0e0109c9a7c --- /dev/null +++ b/src/transformers/models/dinov2_with_registers/__init__.py @@ -0,0 +1,57 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = {"configuration_dinov2_with_registers": ["Dinov2WithRegistersConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_dinov2_with_registers"] = [ + "Dinov2WithRegistersForImageClassification", + "Dinov2WithRegistersModel", + "Dinov2WithRegistersPreTrainedModel", + "Dinov2WithRegistersBackbone", + ] + +if TYPE_CHECKING: + from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_dinov2_with_registers import ( + Dinov2WithRegistersBackbone, + Dinov2WithRegistersForImageClassification, + Dinov2WithRegistersModel, + Dinov2WithRegistersPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) \ No newline at end of file diff --git a/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py new file mode 100644 index 00000000000000..0a8cedd5c879e0 --- /dev/null +++ b/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py @@ -0,0 +1,172 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from . +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the diff. If any change should be done, please apply the change to the +# diff.py file directly. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from transformers import PretrainedConfig + +from ...utils import ( + logging, +) +from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices + + +logger = logging.get_logger(__name__) + + +class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an + Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Dinov2WithRegisters + [google/dinov2-with-registers-base-patch16-224](https://huggingface.co/google/dinov2-with-registers-base-patch16-224) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + mlp_ratio (`int`, *optional*, defaults to 4): + Ratio of the hidden size of the MLPs relative to the `hidden_size`. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + layerscale_value (`float`, *optional*, defaults to 1.0): + Initial value to use for layer scale. + drop_path_rate (`float`, *optional*, defaults to 0.0): + Stochastic depth rate per sample (when applied in the main path of residual layers). + use_swiglu_ffn (`bool`, *optional*, defaults to `False`): + Whether to use the SwiGLU feedforward neural network. + num_register_tokens (`int`, *optional*, defaults to 4): + Number of register tokens to use. + interpolate_antialias (`bool`, *optional*, defaults to `True`): + Whether to use antialiasing when interpolating the image patches. + interpolate_offset (`float`, *optional*, defaults to 0.0): + Offset to use when interpolating the image patches. + out_features (`List[str]`, *optional*): + If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. + (depending on how many stages the model has). If unset and `out_indices` is set, will default to the + corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the + same order as defined in the `stage_names` attribute. + out_indices (`List[int]`, *optional*): + If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how + many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. + If unset and `out_features` is unset, will default to the last stage. Must be in the + same order as defined in the `stage_names` attribute. + apply_layernorm (`bool`, *optional*, defaults to `True`): + Whether to apply layer normalization to the feature maps in case the model is used as backbone. + reshape_hidden_states (`bool`, *optional*, defaults to `True`): + Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in + case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size, + seq_len, hidden_size)`. + + Example: + + ```python + >>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel + + >>> # Initializing a Dinov2WithRegisters dinov2-with-registers-base-patch16-224 style configuration + >>> configuration = Dinov2WithRegistersConfig() + + >>> # Initializing a model (with random weights) from the dinov2-with-registers-base-patch16-224 style configuration + >>> model = Dinov2WithRegistersModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "dinov2_with_registers" + + def __init__( + self, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + mlp_ratio=4, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-6, + image_size=224, + patch_size=16, + num_channels=3, + qkv_bias=True, + layerscale_value=1.0, + drop_path_rate=0.0, + use_swiglu_ffn=False, + num_register_tokens=4, + interpolate_antialias=True, + interpolate_offset=0.0, + out_features=None, + out_indices=None, + apply_layernorm=True, + reshape_hidden_states=True, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.mlp_ratio = mlp_ratio + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias + self.layerscale_value = layerscale_value + self.drop_path_rate = drop_path_rate + self.use_swiglu_ffn = use_swiglu_ffn + self.num_register_tokens = num_register_tokens + self.interpolate_antialias = interpolate_antialias + self.interpolate_offset = interpolate_offset + self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=self.stage_names + ) + self.apply_layernorm = apply_layernorm + self.reshape_hidden_states = reshape_hidden_states \ No newline at end of file diff --git a/src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py b/src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py new file mode 100644 index 00000000000000..96753b9fa1ce32 --- /dev/null +++ b/src/transformers/models/dinov2_with_registers/convert_dinov2_with_registers_to_hf.py @@ -0,0 +1,291 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Dinov2 With Registers checkpoints from the original repository. + +URL: https://github.com/facebookresearch/dinov2/tree/main +""" + +import argparse +import json +from pathlib import Path + +import requests +import torch +import torch.nn as nn +from huggingface_hub import hf_hub_download +from PIL import Image +from torchvision import transforms + +from transformers import ( + BitImageProcessor, + Dinov2WithRegistersConfig, + Dinov2WithRegistersForImageClassification, + Dinov2WithRegistersModel, +) +from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def get_dinov2_with_registers_config(model_name, image_classifier=False): + config = Dinov2WithRegistersConfig(image_size=518, patch_size=14) + + # size of the architecture + if "vits" in model_name: + config.hidden_size = 384 + config.num_attention_heads = 6 + elif "vitb" in model_name: + pass + elif "vitl" in model_name: + config.hidden_size = 1024 + config.num_hidden_layers = 24 + config.num_attention_heads = 16 + elif "vitg" in model_name: + config.use_swiglu_ffn = True + config.hidden_size = 1536 + config.num_hidden_layers = 40 + config.num_attention_heads = 24 + else: + raise ValueError("Model not supported") + + if image_classifier: + repo_id = "huggingface/label-files" + filename = "imagenet-1k-id2label.json" + config.num_labels = 1000 + config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) + config.id2label = {int(k): v for k, v in config.id2label.items()} + + return config + + +def create_rename_keys(config): + rename_keys = [] + # fmt: off + + # patch embedding layer + rename_keys.append(("cls_token", "embeddings.cls_token")) + rename_keys.append(("mask_token", "embeddings.mask_token")) + rename_keys.append(("pos_embed", "embeddings.position_embeddings")) + rename_keys.append(("register_tokens", "embeddings.register_tokens")) + rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight")) + rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias")) + + for i in range(config.num_hidden_layers): + # layernorms + rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight")) + rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias")) + rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight")) + rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias")) + # MLP + if config.use_swiglu_ffn: + rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight")) + rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias")) + rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight")) + rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias")) + else: + rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias")) + rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias")) + # layerscale + rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1")) + rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1")) + # attention projection layer + rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight")) + rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias")) + + # final layernorm + rename_keys.append(("norm.weight", "layernorm.weight")) + rename_keys.append(("norm.bias", "layernorm.bias")) + + # fmt: on + return rename_keys + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_q_k_v(state_dict, config): + for i in range(config.num_hidden_layers): + # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") + in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] + state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] + state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ + config.hidden_size : config.hidden_size * 2, : + ] + state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ + config.hidden_size : config.hidden_size * 2 + ] + state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] + state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + return image + + +@torch.no_grad() +def convert_dinov2_with_registers_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): + """ + Copy/paste/tweak model's weights to our Dinov2WithRegisters structure. + """ + + # define default Dinov2WithRegisters configuration + image_classifier = "1layer" in model_name + config = get_dinov2_with_registers_config(model_name, image_classifier=image_classifier) + + # load original model from torch hub + original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", "")) + original_model.eval() + + # load state_dict of original model, remove and rename some keys + state_dict = original_model.state_dict() + rename_keys = create_rename_keys(config) + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + read_in_q_k_v(state_dict, config) + + for key, val in state_dict.copy().items(): + val = state_dict.pop(key) + if "w12" in key: + key = key.replace("w12", "weights_in") + if "w3" in key: + key = key.replace("w3", "weights_out") + state_dict[key] = val + + # load HuggingFace model + if image_classifier: + model = Dinov2WithRegistersForImageClassification(config).eval() + model.dinov2_with_registers.load_state_dict(state_dict) + model_name_to_classifier_dict_url = { + "dinov2_vits14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_linear_head.pth", + "dinov2_vitb14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_linear_head.pth", + "dinov2_vitl14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_linear_head.pth", + "dinov2_vitg14_reg_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_linear_head.pth", + } + url = model_name_to_classifier_dict_url[model_name] + classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu") + model.classifier.weight = nn.Parameter(classifier_state_dict["weight"]) + model.classifier.bias = nn.Parameter(classifier_state_dict["bias"]) + else: + model = Dinov2WithRegistersModel(config).eval() + model.load_state_dict(state_dict) + + # load image + image = prepare_img() + + # preprocess image + transformations = transforms.Compose( + [ + transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize( + mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values + std=IMAGENET_DEFAULT_STD, # across a large photo dataset. + ), + ] + ) + + original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension + + processor = BitImageProcessor( + size={"shortest_edge": 256}, + resample=PILImageResampling.BICUBIC, + image_mean=IMAGENET_DEFAULT_MEAN, + image_std=IMAGENET_DEFAULT_STD, + ) + pixel_values = processor(image, return_tensors="pt").pixel_values + + assert torch.allclose(original_pixel_values, pixel_values) + + with torch.no_grad(): + outputs = model(pixel_values, output_hidden_states=True) + original_outputs = original_model(pixel_values) + + # assert values + if image_classifier: + print("Predicted class:") + class_idx = outputs.logits.argmax(-1).item() + print(model.config.id2label[class_idx]) + else: + assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape + assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + print(f"Saving image processor to {pytorch_dump_folder_path}") + processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + model_name_to_hf_name = { + "dinov2_vits14_reg": "dinov2-with-registers-small", + "dinov2_vitb14_reg": "dinov2-with-registers-base", + "dinov2_vitl14_reg": "dinov2-with-registers-large", + "dinov2_vitg14_reg": "dinov2-with-registers-giant", + "dinov2_vits14_reg_1layer": "dinov2-with-registers-small-imagenet1k-1-layer", + "dinov2_vitb14_reg_1layer": "dinov2-with-registers-base-imagenet1k-1-layer", + "dinov2_vitl14_reg_1layer": "dinov2-with-registers-large-imagenet1k-1-layer", + "dinov2_vitg14_reg_1layer": "dinov2-with-registers-giant-imagenet1k-1-layer", + } + + name = model_name_to_hf_name[model_name] + model.push_to_hub(f"facebook/{name}") + processor.push_to_hub(f"facebook/{name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="dinov2_vits14_reg", + type=str, + choices=[ + "dinov2_vits14_reg", + "dinov2_vitb14_reg", + "dinov2_vitl14_reg", + "dinov2_vitg14_reg", + "dinov2_vits14_reg_1layer", + "dinov2_vitb14_reg_1layer", + "dinov2_vitl14_reg_1layer", + "dinov2_vitg14_reg_1layer", + ], + help="Name of the model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_dinov2_with_registers_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) \ No newline at end of file diff --git a/src/transformers/models/dinov2_with_registers/diff_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/diff_dinov2_with_registers.py new file mode 100644 index 00000000000000..be59410a886f7e --- /dev/null +++ b/src/transformers/models/dinov2_with_registers/diff_dinov2_with_registers.py @@ -0,0 +1,367 @@ +# coding=utf-8 +# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Optional + +import torch +import torch.utils.checkpoint +from torch import nn + +from transformers import PretrainedConfig +from transformers.models.dinov2.modeling_dinov2 import ( + Dinov2Backbone, + Dinov2Encoder, + Dinov2ForImageClassification, + Dinov2Model, + Dinov2PatchEmbeddings, +) + +from ...modeling_outputs import BackboneOutput +from ...utils import logging +from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices + + +logger = logging.get_logger(__name__) + + +class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an + Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Dinov2WithRegisters + [google/dinov2-with-registers-base-patch16-224](https://huggingface.co/google/dinov2-with-registers-base-patch16-224) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + mlp_ratio (`int`, *optional*, defaults to 4): + Ratio of the hidden size of the MLPs relative to the `hidden_size`. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + layerscale_value (`float`, *optional*, defaults to 1.0): + Initial value to use for layer scale. + drop_path_rate (`float`, *optional*, defaults to 0.0): + Stochastic depth rate per sample (when applied in the main path of residual layers). + use_swiglu_ffn (`bool`, *optional*, defaults to `False`): + Whether to use the SwiGLU feedforward neural network. + num_register_tokens (`int`, *optional*, defaults to 4): + Number of register tokens to use. + interpolate_antialias (`bool`, *optional*, defaults to `True`): + Whether to use antialiasing when interpolating the image patches. + interpolate_offset (`float`, *optional*, defaults to 0.0): + Offset to use when interpolating the image patches. + out_features (`List[str]`, *optional*): + If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. + (depending on how many stages the model has). If unset and `out_indices` is set, will default to the + corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the + same order as defined in the `stage_names` attribute. + out_indices (`List[int]`, *optional*): + If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how + many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. + If unset and `out_features` is unset, will default to the last stage. Must be in the + same order as defined in the `stage_names` attribute. + apply_layernorm (`bool`, *optional*, defaults to `True`): + Whether to apply layer normalization to the feature maps in case the model is used as backbone. + reshape_hidden_states (`bool`, *optional*, defaults to `True`): + Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in + case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size, + seq_len, hidden_size)`. + + Example: + + ```python + >>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel + + >>> # Initializing a Dinov2WithRegisters dinov2-with-registers-base-patch16-224 style configuration + >>> configuration = Dinov2WithRegistersConfig() + + >>> # Initializing a model (with random weights) from the dinov2-with-registers-base-patch16-224 style configuration + >>> model = Dinov2WithRegistersModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "dinov2_with_registers" + + def __init__( + self, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + mlp_ratio=4, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-6, + image_size=224, + patch_size=16, + num_channels=3, + qkv_bias=True, + layerscale_value=1.0, + drop_path_rate=0.0, + use_swiglu_ffn=False, + num_register_tokens=4, + interpolate_antialias=True, + interpolate_offset=0.0, + out_features=None, + out_indices=None, + apply_layernorm=True, + reshape_hidden_states=True, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.mlp_ratio = mlp_ratio + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias + self.layerscale_value = layerscale_value + self.drop_path_rate = drop_path_rate + self.use_swiglu_ffn = use_swiglu_ffn + self.num_register_tokens = num_register_tokens + self.interpolate_antialias = interpolate_antialias + self.interpolate_offset = interpolate_offset + self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=self.stage_names + ) + self.apply_layernorm = apply_layernorm + self.reshape_hidden_states = reshape_hidden_states + + +class Dinov2WithRegistersPatchEmbeddings(Dinov2PatchEmbeddings): + pass + + +class Dinov2WithRegistersEmbeddings(nn.Module): + """ + Construct the CLS token, mask token, register tokens, position and patch embeddings. + """ + + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + + self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) + self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size)) + self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size)) + self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: + """ + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher + resolution images. + + Source: + https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 + """ + + num_patches = embeddings.shape[1] - 1 + num_positions = self.position_embeddings.shape[1] - 1 + if num_patches == num_positions and height == width: + return self.position_embeddings + class_pos_embed = self.position_embeddings[:, 0] + patch_pos_embed = self.position_embeddings[:, 1:] + dim = embeddings.shape[-1] + height = height // self.config.patch_size + width = width // self.config.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + height, width = height + self.config.interpolate_offset, width + self.config.interpolate_offset + patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) + patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) + target_dtype = patch_pos_embed.dtype + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.to(dtype=torch.float32), + scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))), + mode="bicubic", + align_corners=False, + antialias=self.config.interpolate_antialias, + ) + patch_pos_embed = patch_pos_embed.to(dtype=target_dtype) + if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]: + raise ValueError("Width or height does not match with the interpolated position embeddings") + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) + + def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor: + batch_size, _, height, width = pixel_values.shape + target_dtype = self.patch_embeddings.projection.weight.dtype + embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype)) + + if bool_masked_pos is not None: + embeddings = torch.where( + bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings + ) + + # add the [CLS] token to the embedded patch tokens + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, embeddings), dim=1) + + # add positional encoding to each token + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + + # add register tokens + embeddings = torch.cat((embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1)), dim=1) + embeddings = torch.cat((embeddings, embeddings[:, 1:]), dim=1) + + embeddings = self.dropout(embeddings) + + return embeddings + + +class Dinov2WithRegistersEncoder(Dinov2Encoder): + pass + + +class Dinov2WithRegistersModel(Dinov2Model): + pass + + +class Dinov2WithRegistersForImageClassification(Dinov2ForImageClassification): + pass + + +class Dinov2WithRegistersBackbone(Dinov2Backbone): + def __init__(self, config): + super().__init__(config) + super()._init_backbone(config) + + self.num_register_tokens = config.num_register_tokens + self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] + self.embeddings = Dinov2WithRegistersEmbeddings(config) + self.encoder = Dinov2WithRegistersEncoder(config) + + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings: + return self.embeddings.patch_embeddings + + def forward( + self, + pixel_values: torch.Tensor, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> BackboneOutput: + """ + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, AutoBackbone + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2_with_registers") + >>> model = AutoBackbone.from_pretrained( + ... "facebook/dinov2_with_registers", out_features=["stage2", "stage5", "stage8", "stage11"] + ... ) + + >>> inputs = processor(image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> feature_maps = outputs.feature_maps + >>> list(feature_maps[-1].shape) + [1, 768, 16, 16] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + + embedding_output = self.embeddings(pixel_values) + + outputs = self.encoder( + embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict + ) + + hidden_states = outputs.hidden_states if return_dict else outputs[1] + + feature_maps = () + for stage, hidden_state in zip(self.stage_names, hidden_states): + if stage in self.out_features: + if self.config.apply_layernorm: + hidden_state = self.layernorm(hidden_state) + if self.config.reshape_hidden_states: + hidden_state = hidden_state[:, self.num_register_tokens + 1 :] + # this was actually a bug in the original implementation that we copied here, + # cause normally the order is height, width + batch_size, _, height, width = pixel_values.shape + patch_size = self.config.patch_size + hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1) + hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() + feature_maps += (hidden_state,) + + if not return_dict: + if output_hidden_states: + output = (feature_maps,) + outputs[1:] + else: + output = (feature_maps,) + outputs[2:] + return output + + return BackboneOutput( + feature_maps=feature_maps, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=outputs.attentions if output_attentions else None, + ) \ No newline at end of file diff --git a/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py new file mode 100644 index 00000000000000..bd9a4d76f71d5c --- /dev/null +++ b/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py @@ -0,0 +1,879 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from . +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the diff. If any change should be done, please apply the change to the +# diff.py file directly. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# coding=utf-8 +# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections.abc +import math +from typing import Dict, List, Optional, Set, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BackboneOutput, + BaseModelOutput, + BaseModelOutputWithPooling, + ImageClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from ...utils.backbone_utils import BackboneMixin +from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig + + +logger = logging.get_logger(__name__) + + +class Dinov2WithRegistersPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: + num_channels = pixel_values.shape[1] + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + f" Expected {self.num_channels} but got {num_channels}." + ) + embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) + return embeddings + + +# General docstring +_CONFIG_FOR_DOC = "Dinov2WithRegistersConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "facebook/dinov2_with_registers-base" +_EXPECTED_OUTPUT_SHAPE = [1, 257, 768] + + +class Dinov2WithRegistersEmbeddings(nn.Module): + """ + Construct the CLS token, mask token, register tokens, position and patch embeddings. + """ + + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + + self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) + self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size)) + self.register_tokens = ( + nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size)) + if config.num_register_tokens + else None + ) + self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: + """ + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher + resolution images. + + Source: + https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 + """ + + num_patches = embeddings.shape[1] - 1 + num_positions = self.position_embeddings.shape[1] - 1 + if num_patches == num_positions and height == width: + return self.position_embeddings + class_pos_embed = self.position_embeddings[:, 0] + patch_pos_embed = self.position_embeddings[:, 1:] + dim = embeddings.shape[-1] + height = height // self.config.patch_size + width = width // self.config.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + height, width = height + self.config.interpolate_offset, width + self.config.interpolate_offset + patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) + patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) + target_dtype = patch_pos_embed.dtype + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.to(dtype=torch.float32), + scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))), + mode="bicubic", + align_corners=False, + antialias=self.config.interpolate_antialias, + ).to(dtype=target_dtype) + if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]: + raise ValueError("Width or height does not match with the interpolated position embeddings") + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) + + def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor: + batch_size, _, height, width = pixel_values.shape + target_dtype = self.patch_embeddings.projection.weight.dtype + embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype)) + + if bool_masked_pos is not None: + embeddings = torch.where( + bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings + ) + + # add the [CLS] token to the embedded patch tokens + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, embeddings), dim=1) + + # add positional encoding to each token + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + + if self.register_tokens is not None: + embeddings = torch.cat( + ( + embeddings[:, :1], + self.register_tokens.expand(embeddings.shape[0], -1, -1), + embeddings[:, 1:], + ), + dim=1, + ) + + embeddings = self.dropout(embeddings) + + return embeddings + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2WithRegisters +class Dinov2WithRegistersSelfAttention(nn.Module): + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2WithRegisters +class Dinov2WithRegistersSelfOutput(nn.Module): + """ + The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the + layernorm applied before each block. + """ + + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2WithRegisters +class Dinov2WithRegistersAttention(nn.Module): + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + self.attention = Dinov2WithRegistersSelfAttention(config) + self.output = Dinov2WithRegistersSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads: Set[int]) -> None: + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.attention.query = prune_linear_layer(self.attention.query, index) + self.attention.key = prune_linear_layer(self.attention.key, index) + self.attention.value = prune_linear_layer(self.attention.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) + self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_outputs = self.attention(hidden_states, head_mask, output_attentions) + + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class Dinov2WithRegistersLayerScale(nn.Module): + def __init__(self, config) -> None: + super().__init__() + self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size)) + + def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: + return hidden_state * self.lambda1 + + +class Dinov2WithRegistersDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return self.drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class Dinov2WithRegistersMLP(nn.Module): + def __init__(self, config) -> None: + super().__init__() + in_features = out_features = config.hidden_size + hidden_features = int(config.hidden_size * config.mlp_ratio) + self.fc1 = nn.Linear(in_features, hidden_features, bias=True) + if isinstance(config.hidden_act, str): + self.activation = ACT2FN[config.hidden_act] + else: + self.activation = config.hidden_act + self.fc2 = nn.Linear(hidden_features, out_features, bias=True) + + def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: + hidden_state = self.fc1(hidden_state) + hidden_state = self.activation(hidden_state) + hidden_state = self.fc2(hidden_state) + return hidden_state + + +class Dinov2WithRegistersSwiGLUFFN(nn.Module): + def __init__(self, config) -> None: + super().__init__() + in_features = out_features = config.hidden_size + hidden_features = int(config.hidden_size * config.mlp_ratio) + hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 + + self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True) + self.weights_out = nn.Linear(hidden_features, out_features, bias=True) + + def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: + hidden_state = self.weights_in(hidden_state) + x1, x2 = hidden_state.chunk(2, dim=-1) + hidden = nn.functional.silu(x1) * x2 + return self.weights_out(hidden) + + +class Dinov2WithRegistersLayer(nn.Module): + """This corresponds to the Block class in the original implementation.""" + + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + + self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.attention = Dinov2WithRegistersAttention(config) + self.layer_scale1 = Dinov2WithRegistersLayerScale(config) + self.drop_path = ( + Dinov2WithRegistersDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() + ) + + self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + if config.use_swiglu_ffn: + self.mlp = Dinov2WithRegistersSwiGLUFFN(config) + else: + self.mlp = Dinov2WithRegistersMLP(config) + self.layer_scale2 = Dinov2WithRegistersLayerScale(config) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.norm1(hidden_states), # in Dinov2WithRegisters, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + + attention_output = self.layer_scale1(attention_output) + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + # first residual connection + hidden_states = self.drop_path(attention_output) + hidden_states + + # in Dinov2WithRegisters, layernorm is also applied after self-attention + layer_output = self.norm2(hidden_states) + layer_output = self.mlp(layer_output) + layer_output = self.layer_scale2(layer_output) + + # second residual connection + layer_output = self.drop_path(layer_output) + hidden_states + + outputs = (layer_output,) + outputs + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2WithRegisters +class Dinov2WithRegistersEncoder(nn.Module): + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__() + self.config = config + self.layer = nn.ModuleList([Dinov2WithRegistersLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + layer_head_mask, + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2_with_registers-small-imagenet1k-1-layer" + + +class Dinov2WithRegistersPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = Dinov2WithRegistersConfig + base_model_prefix = "dinov2_with_registers" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + _no_split_modules = ["Dinov2WithRegistersSwiGLUFFN"] + + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid + # `trunc_normal_cpu` not implemented in `half` issues + module.weight.data = nn.init.trunc_normal_( + module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range + ).to(module.weight.dtype) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, Dinov2WithRegistersEmbeddings): + module.position_embeddings.data = nn.init.trunc_normal_( + module.position_embeddings.data.to(torch.float32), + mean=0.0, + std=self.config.initializer_range, + ).to(module.position_embeddings.dtype) + + module.cls_token.data = nn.init.trunc_normal_( + module.cls_token.data.to(torch.float32), + mean=0.0, + std=self.config.initializer_range, + ).to(module.cls_token.dtype) + + +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +DINOV2_WITH_REGISTERS_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`Dinov2WithRegistersConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`BitImageProcessor.preprocess`] for details. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`BitImageProcessor.preprocess`] for details. + + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for + pre-training. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top.", + DINOV2_WITH_REGISTERS_START_DOCSTRING, +) +class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel): + def __init__(self, config: Dinov2WithRegistersConfig): + super().__init__(config) + self.config = config + + self.embeddings = Dinov2WithRegistersEmbeddings(config) + self.encoder = Dinov2WithRegistersEncoder(config) + + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPooling, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + bool_masked_pos: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) + + encoder_outputs = self.encoder( + embedding_output, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + pooled_output = sequence_output[:, 0, :] + + if not return_dict: + head_outputs = (sequence_output, pooled_output) + return head_outputs + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + """ + Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state + of the [CLS] token) e.g. for ImageNet. + """, + DINOV2_WITH_REGISTERS_START_DOCSTRING, +) +class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel): + def __init__(self, config: Dinov2WithRegistersConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.dinov2_with_registers = Dinov2WithRegistersModel(config) + + # Classifier head + self.classifier = ( + nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.dinov2_with_registers( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] # batch_size, sequence_length, hidden_size + + cls_token = sequence_output[:, 0] + patch_tokens = sequence_output[:, 1:] + + linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1) + + logits = self.classifier(linear_input) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer. + """, + DINOV2_WITH_REGISTERS_START_DOCSTRING, +) +class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin): + def __init__(self, config): + super().__init__(config) + super()._init_backbone(config) + + self.num_register_tokens = config.num_register_tokens + self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] + self.embeddings = Dinov2WithRegistersEmbeddings(config) + self.encoder = Dinov2WithRegistersEncoder(config) + + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings: + return self.embeddings.patch_embeddings + + @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.Tensor, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> BackboneOutput: + """ + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, AutoBackbone + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2_with_registers") + >>> model = AutoBackbone.from_pretrained( + ... "facebook/dinov2_with_registers", out_features=["stage2", "stage5", "stage8", "stage11"] + ... ) + + >>> inputs = processor(image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> feature_maps = outputs.feature_maps + >>> list(feature_maps[-1].shape) + [1, 768, 16, 16] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + + embedding_output = self.embeddings(pixel_values) + + outputs = self.encoder( + embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict + ) + + hidden_states = outputs.hidden_states if return_dict else outputs[1] + + feature_maps = () + for stage, hidden_state in zip(self.stage_names, hidden_states): + if stage in self.out_features: + if self.config.apply_layernorm: + hidden_state = self.layernorm(hidden_state) + if self.config.reshape_hidden_states: + hidden_state = hidden_state[:, self.num_register_tokens + 1 :] + # this was actually a bug in the original implementation that we copied here, + # cause normally the order is height, width + batch_size, _, height, width = pixel_values.shape + patch_size = self.config.patch_size + hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1) + hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() + feature_maps += (hidden_state,) + + if not return_dict: + if output_hidden_states: + output = (feature_maps,) + outputs[1:] + else: + output = (feature_maps,) + outputs[2:] + return output + + return BackboneOutput( + feature_maps=feature_maps, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=outputs.attentions if output_attentions else None, + ) \ No newline at end of file diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 8747939a62ce2a..34c767cf2b0429 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -72,13 +72,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class OffloadedStaticCache(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class QuantizedCache(metaclass=DummyObject): _backends = ["torch"] @@ -191,20 +184,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class BayesianDetectorConfig(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class BayesianDetectorModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class BeamScorer(metaclass=DummyObject): _backends = ["torch"] @@ -310,6 +289,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class ForceTokensLogitsProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class GenerationMixin(metaclass=DummyObject): _backends = ["torch"] @@ -471,27 +457,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class SynthIDTextWatermarkDetector(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class SynthIDTextWatermarkingConfig(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class SynthIDTextWatermarkLogitsProcessor(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class TemperatureLogitsWarper(metaclass=DummyObject): _backends = ["torch"] @@ -548,17 +513,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class TorchExportableModuleWithStaticCache(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -def convert_and_export_with_cache(*args, **kwargs): - requires_backends(convert_and_export_with_cache, ["torch"]) - - ROPE_INIT_FUNCTIONS = None @@ -685,41 +639,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class AriaForConditionalGeneration(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class AriaPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class AriaTextForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class AriaTextModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class AriaTextPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class ASTForAudioClassification(metaclass=DummyObject): _backends = ["torch"] @@ -777,9 +696,6 @@ def __init__(self, *args, **kwargs): MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None -MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING = None - - MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = None @@ -947,13 +863,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class AutoModelForImageTextToText(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class AutoModelForImageToImage(metaclass=DummyObject): _backends = ["torch"] @@ -1353,6 +1262,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class BertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class BertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] @@ -1452,6 +1368,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class BigBirdLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class BigBirdModel(metaclass=DummyObject): _backends = ["torch"] @@ -1687,13 +1610,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Blip2ForImageTextRetrieval(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class Blip2Model(metaclass=DummyObject): _backends = ["torch"] @@ -1715,13 +1631,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Blip2TextModelWithProjection(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class Blip2VisionModel(metaclass=DummyObject): _backends = ["torch"] @@ -1729,13 +1638,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Blip2VisionModelWithProjection(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class BloomForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1939,6 +1841,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class CanineLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class CanineModel(metaclass=DummyObject): _backends = ["torch"] @@ -2300,6 +2209,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class ConvBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class ConvBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -3207,6 +3123,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class QDQBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class QDQBertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] @@ -3576,6 +3499,34 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class Dinov2WithRegistersBackbone(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2WithRegistersForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2WithRegistersModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class Dinov2WithRegistersPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class DistilBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -4189,6 +4140,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class FNetLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class FNetModel(metaclass=DummyObject): _backends = ["torch"] @@ -4438,41 +4396,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GlmForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GlmForSequenceClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GlmForTokenClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GlmModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GlmPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class GLPNForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] @@ -4656,112 +4579,84 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTNeoXModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GPTNeoXPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GPTNeoXJapaneseModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): +class GPTNeoXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTJForCausalLM(metaclass=DummyObject): +class GPTNeoXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTJForQuestionAnswering(metaclass=DummyObject): +class GPTNeoXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTJForSequenceClassification(metaclass=DummyObject): +class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTJModel(metaclass=DummyObject): +class GPTNeoXJapaneseLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTJPreTrainedModel(metaclass=DummyObject): +class GPTNeoXJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GraniteForCausalLM(metaclass=DummyObject): +class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GraniteModel(metaclass=DummyObject): +class GPTJForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GranitePreTrainedModel(metaclass=DummyObject): +class GPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GraniteMoeForCausalLM(metaclass=DummyObject): +class GPTJForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GraniteMoeModel(metaclass=DummyObject): +class GPTJModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GraniteMoePreTrainedModel(metaclass=DummyObject): +class GPTJPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): @@ -4985,137 +4880,74 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Idefics3ForConditionalGeneration(metaclass=DummyObject): +class ImageGPTForCausalImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Idefics3Model(metaclass=DummyObject): +class ImageGPTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Idefics3PreTrainedModel(metaclass=DummyObject): +class ImageGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Idefics3Processor(metaclass=DummyObject): +class ImageGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Idefics3VisionConfig(metaclass=DummyObject): +def load_tf_weights_in_imagegpt(*args, **kwargs): + requires_backends(load_tf_weights_in_imagegpt, ["torch"]) + + +class InformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Idefics3VisionTransformer(metaclass=DummyObject): +class InformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class IJepaForImageClassification(metaclass=DummyObject): +class InformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class IJepaModel(metaclass=DummyObject): +class InstructBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class IJepaPreTrainedModel(metaclass=DummyObject): +class InstructBlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ImageGPTForCausalImageModeling(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class ImageGPTForImageClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class ImageGPTModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class ImageGPTPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -def load_tf_weights_in_imagegpt(*args, **kwargs): - requires_backends(load_tf_weights_in_imagegpt, ["torch"]) - - -class InformerForPrediction(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class InformerModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class InformerPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class InstructBlipForConditionalGeneration(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class InstructBlipPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class InstructBlipQFormerModel(metaclass=DummyObject): +class InstructBlipQFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): @@ -5528,20 +5360,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class LlavaOnevisionForConditionalGeneration(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class LlavaOnevisionPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class LongformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -5591,6 +5409,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class LongformerSelfAttention(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class LongT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] @@ -5731,6 +5556,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class LxmertXLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -5815,13 +5647,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MarianPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MarkupLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] @@ -6039,20 +5864,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MimiModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MimiPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MistralForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6060,13 +5871,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MistralForQuestionAnswering(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MistralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] @@ -6102,13 +5906,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MixtralForQuestionAnswering(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MixtralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] @@ -6137,48 +5934,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MllamaForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MllamaForConditionalGeneration(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MllamaPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MllamaProcessor(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MllamaTextModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MllamaVisionModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MobileBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -6228,6 +5983,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class MobileBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class MobileBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -6359,63 +6121,42 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MoshiForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MoshiForConditionalGeneration(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MoshiModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class MoshiPreTrainedModel(metaclass=DummyObject): +class MPNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MPNetForMaskedLM(metaclass=DummyObject): +class MPNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MPNetForMultipleChoice(metaclass=DummyObject): +class MPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MPNetForQuestionAnswering(metaclass=DummyObject): +class MPNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MPNetForSequenceClassification(metaclass=DummyObject): +class MPNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MPNetForTokenClassification(metaclass=DummyObject): +class MPNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): @@ -6793,91 +6534,42 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class NystromformerModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class NystromformerPreTrainedModel(metaclass=DummyObject): +class NystromformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class OlmoForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class OlmoModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class OlmoPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class Olmo2ForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class Olmo2Model(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class Olmo2PreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class OlmoeForCausalLM(metaclass=DummyObject): +class NystromformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class OlmoeModel(metaclass=DummyObject): +class NystromformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class OlmoePreTrainedModel(metaclass=DummyObject): +class OlmoForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class OmDetTurboForObjectDetection(metaclass=DummyObject): +class OlmoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class OmDetTurboPreTrainedModel(metaclass=DummyObject): +class OlmoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): @@ -7252,6 +6944,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class PerceiverLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class PerceiverModel(metaclass=DummyObject): _backends = ["torch"] @@ -7371,34 +7070,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class PhimoeForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PhimoeForSequenceClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PhimoeModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PhimoePreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class Pix2StructForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -7427,20 +7098,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class PixtralPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class PixtralVisionModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class PLBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7609,13 +7266,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Qwen2ForQuestionAnswering(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class Qwen2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] @@ -7672,13 +7322,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Qwen2MoeForQuestionAnswering(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class Qwen2MoeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] @@ -7707,91 +7350,84 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Qwen2VLForConditionalGeneration(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class Qwen2VLModel(metaclass=DummyObject): +class RagModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class Qwen2VLPreTrainedModel(metaclass=DummyObject): +class RagPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RagModel(metaclass=DummyObject): +class RagSequenceForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RagPreTrainedModel(metaclass=DummyObject): +class RagTokenForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RagSequenceForGeneration(metaclass=DummyObject): +class RecurrentGemmaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RagTokenForGeneration(metaclass=DummyObject): +class RecurrentGemmaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RecurrentGemmaForCausalLM(metaclass=DummyObject): +class RecurrentGemmaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RecurrentGemmaModel(metaclass=DummyObject): +class ReformerAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RecurrentGemmaPreTrainedModel(metaclass=DummyObject): +class ReformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ReformerForMaskedLM(metaclass=DummyObject): +class ReformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ReformerForQuestionAnswering(metaclass=DummyObject): +class ReformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ReformerForSequenceClassification(metaclass=DummyObject): +class ReformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): @@ -7882,6 +7518,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class RemBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class RemBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -8089,6 +7732,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class RoCBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class RoCBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -8149,6 +7799,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class RoFormerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class RoFormerModel(metaclass=DummyObject): _backends = ["torch"] @@ -8370,6 +8027,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class SegformerLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class SegformerModel(metaclass=DummyObject): _backends = ["torch"] @@ -8580,6 +8244,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class SplinterLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class SplinterModel(metaclass=DummyObject): _backends = ["torch"] @@ -8636,6 +8307,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class SqueezeBertModule(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class SqueezeBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] @@ -9344,6 +9022,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class ViltLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class ViltModel(metaclass=DummyObject): _backends = ["torch"] @@ -9421,6 +9106,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class VisualBertLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class VisualBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -9470,6 +9162,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class ViTMAELayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class ViTMAEModel(metaclass=DummyObject): _backends = ["torch"] @@ -10188,42 +9887,21 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class YosoModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class YosoPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class ZambaForCausalLM(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class ZambaForSequenceClassification(metaclass=DummyObject): +class YosoLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ZambaModel(metaclass=DummyObject): +class YosoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ZambaPreTrainedModel(metaclass=DummyObject): +class YosoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): @@ -10324,4 +10002,4 @@ class Seq2SeqTrainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) + requires_backends(self, ["torch"]) \ No newline at end of file diff --git a/tests/models/dinov2_with_registers/__init__.py b/tests/models/dinov2_with_registers/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py b/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py new file mode 100644 index 00000000000000..bf27ceac5fcfc2 --- /dev/null +++ b/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py @@ -0,0 +1,362 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Testing suite for the PyTorch Dinov2WithRegisters model.""" + +import unittest + +from transformers import Dinov2WithRegistersConfig +from transformers.testing_utils import ( + require_torch, + require_vision, + slow, + torch_device, +) +from transformers.utils import cached_property, is_torch_available, is_vision_available + +from ...test_backbone_common import BackboneTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_torch_available(): + import torch + from torch import nn + + from transformers import ( + Dinov2WithRegistersBackbone, + Dinov2WithRegistersForImageClassification, + Dinov2WithRegistersModel, + ) + + +if is_vision_available(): + from PIL import Image + + from transformers import AutoImageProcessor + + +class Dinov2WithRegistersModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=30, + patch_size=2, + num_channels=3, + is_training=True, + use_labels=True, + hidden_size=32, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + type_sequence_label_size=10, + initializer_range=0.02, + num_register_tokens=2, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.is_training = is_training + self.use_labels = use_labels + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_register_tokens = num_register_tokens + self.scope = scope + + # in Dinov2 With Registers, the seq length equals the number of patches + 1 + num_register_tokens (we add 1 for the [CLS] token) + num_patches = (image_size // patch_size) ** 2 + self.seq_length = num_patches + 1 + self.num_register_tokens + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + + config = self.get_config() + + return config, pixel_values, labels + + def get_config(self): + return Dinov2WithRegistersConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + is_decoder=False, + initializer_range=self.initializer_range, + num_register_tokens=self.num_register_tokens, + ) + + def create_and_check_model(self, config, pixel_values, labels): + model = Dinov2WithRegistersModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_backbone(self, config, pixel_values, labels): + model = Dinov2WithRegistersBackbone(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + + # verify hidden states + self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) + expected_size = self.image_size // config.patch_size + self.parent.assertListEqual( + list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] + ) + + # verify channels + self.parent.assertEqual(len(model.channels), len(config.out_features)) + + # verify backbone works with out_features=None + config.out_features = None + model = Dinov2WithRegistersBackbone(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + + # verify feature maps + self.parent.assertEqual(len(result.feature_maps), 1) + self.parent.assertListEqual( + list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] + ) + + # verify channels + self.parent.assertEqual(len(model.channels), 1) + + # verify backbone works with apply_layernorm=False and reshape_hidden_states=False + config.apply_layernorm = False + config.reshape_hidden_states = False + + model = Dinov2WithRegistersBackbone(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + + # verify feature maps + self.parent.assertEqual(len(result.feature_maps), 1) + self.parent.assertListEqual( + list(result.feature_maps[0].shape), [self.batch_size, self.seq_length, self.hidden_size] + ) + + def create_and_check_for_image_classification(self, config, pixel_values, labels): + config.num_labels = self.type_sequence_label_size + model = Dinov2WithRegistersForImageClassification(config) + model.to(torch_device) + model.eval() + result = model(pixel_values, labels=labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) + + # test greyscale images + config.num_channels = 1 + model = Dinov2WithRegistersForImageClassification(config) + model.to(torch_device) + model.eval() + + pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) + result = model(pixel_values) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + pixel_values, + labels, + ) = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class Dinov2WithRegistersModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as Dinov2WithRegisters does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = ( + ( + Dinov2WithRegistersModel, + Dinov2WithRegistersForImageClassification, + Dinov2WithRegistersBackbone, + ) + if is_torch_available() + else () + ) + pipeline_model_mapping = ( + { + "image-feature-extraction": Dinov2WithRegistersModel, + "image-classification": Dinov2WithRegistersForImageClassification, + } + if is_torch_available() + else {} + ) + fx_compatible = False + + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = Dinov2WithRegistersModelTester(self) + self.config_tester = ConfigTester( + self, config_class=Dinov2WithRegistersConfig, has_text_modality=False, hidden_size=37 + ) + + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad and "register_tokens" not in name: + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="Dinov2WithRegisters does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant(self): + pass + + @unittest.skip( + reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" + ) + def test_training_gradient_checkpointing_use_reentrant_false(self): + pass + + def test_model_get_set_embeddings(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, nn.Linear)) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_backbone(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_backbone(*config_and_inputs) + + def test_for_image_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_image_classification(*config_and_inputs) + + @unittest.skip(reason="Dinov2WithRegisters does not support feedforward chunking yet") + def test_feed_forward_chunking(self): + pass + + @slow + def test_model_from_pretrained(self): + model_name = "facebook/dinov2-with-registers" + model = Dinov2WithRegistersModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_torch +@require_vision +class Dinov2WithRegistersModelIntegrationTest(unittest.TestCase): + @cached_property + def default_image_processor(self): + return ( + AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base") + if is_vision_available() + else None + ) + + @slow + def test_inference_no_head(self): + model = Dinov2WithRegistersModel.from_pretrained("facebook/dinov2-with-registers-base").to(torch_device) + + image_processor = self.default_image_processor + image = prepare_img() + inputs = image_processor(image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the last hidden states + expected_shape = torch.Size((1, 257, 768)) + self.assertEqual(outputs.last_hidden_state.shape, expected_shape) + + expected_slice = torch.tensor( + [[-2.1747, -0.4729, 1.0936], [-3.2780, -0.8269, -0.9210], [-2.9129, 1.1284, -0.7306]], + device=torch_device, + ) + self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) + + +@require_torch +class Dinov2WithRegistersBackboneTest(unittest.TestCase, BackboneTesterMixin): + all_model_classes = (Dinov2WithRegistersBackbone,) if is_torch_available() else () + config_class = Dinov2WithRegistersConfig + + has_attentions = False + + def setUp(self): + self.model_tester = Dinov2WithRegistersModelTester(self) \ No newline at end of file diff --git a/utils/check_repo.py b/utils/check_repo.py index 3dbe59f192293a..cafa2e42bd3563 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -31,6 +31,7 @@ It has no auto-fix mode. """ +import inspect import os import re import sys @@ -69,7 +70,6 @@ "UMT5Stack", "Pop2PianoStack", "Qwen2AudioEncoder", - "Qwen2VisionTransformerPretrainedModel", "SwitchTransformersStack", "TFDPRSpanPredictor", "MaskFormerSwinModel", @@ -82,65 +82,54 @@ "SeamlessM4Tv2TextToUnitModel", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2TextToUnitForConditionalGeneration", - "Idefics2PerceiverResampler", - "Idefics2VisionTransformer", - "Idefics3VisionTransformer", - "AriaTextForCausalLM", - "AriaTextModel", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. -IGNORE_NON_TESTED = ( - PRIVATE_MODELS.copy() - + [ - # models to ignore for not tested - "RecurrentGemmaModel", # Building part of bigger (tested) model. - "FuyuForCausalLM", # Not tested fort now - "InstructBlipQFormerModel", # Building part of bigger (tested) model. - "InstructBlipVideoQFormerModel", # Building part of bigger (tested) model. - "UMT5EncoderModel", # Building part of bigger (tested) model. - "Blip2QFormerModel", # Building part of bigger (tested) model. - "ErnieMForInformationExtraction", - "FastSpeech2ConformerHifiGan", # Already tested by SpeechT5HifiGan (# Copied from) - "FastSpeech2ConformerWithHifiGan", # Built with two smaller (tested) models. - "GraphormerDecoderHead", # Building part of bigger (tested) model. - "JukeboxVQVAE", # Building part of bigger (tested) model. - "JukeboxPrior", # Building part of bigger (tested) model. - "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. - "SegformerDecodeHead", # Building part of bigger (tested) model. - "MgpstrModel", # Building part of bigger (tested) model. - "BertLMHeadModel", # Needs to be setup as decoder. - "MegatronBertLMHeadModel", # Building part of bigger (tested) model. - "RealmBertModel", # Building part of bigger (tested) model. - "RealmReader", # Not regular model. - "RealmScorer", # Not regular model. - "RealmForOpenQA", # Not regular model. - "ReformerForMaskedLM", # Needs to be setup as decoder. - "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) - "TFRobertaForMultipleChoice", # TODO: fix - "TFRobertaPreLayerNormForMultipleChoice", # TODO: fix - "SeparableConv1D", # Building part of bigger (tested) model. - "FlaxBartForCausalLM", # Building part of bigger (tested) model. - "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. - "OPTDecoderWrapper", - "TFSegformerDecodeHead", # Not a regular model. - "AltRobertaModel", # Building part of bigger (tested) model. - "BlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models - "TFBlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models - "BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model. - "BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model. - "BarkCausalModel", # Building part of bigger (tested) model. - "BarkModel", # Does not have a forward signature - generation tested with integration tests. - "SeamlessM4TTextToUnitModel", # Building part of bigger (tested) model. - "SeamlessM4TCodeHifiGan", # Building part of bigger (tested) model. - "SeamlessM4TTextToUnitForConditionalGeneration", # Building part of bigger (tested) model. - "ChameleonVQVAE", # VQVAE here is used only for encoding (discretizing) and is tested as part of bigger model - "Qwen2VLModel", # Building part of bigger (tested) model. Tested implicitly through Qwen2VLForConditionalGeneration. - "MllamaTextModel", # Building part of bigger (tested) model. # TODO: add tests - "MllamaVisionModel", # Building part of bigger (tested) model. # TODO: add tests - ] -) +IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [ + # models to ignore for not tested + "RecurrentGemmaModel", # Building part of bigger (tested) model. + "FuyuForCausalLM", # Not tested fort now + "InstructBlipQFormerModel", # Building part of bigger (tested) model. + "InstructBlipVideoQFormerModel", # Building part of bigger (tested) model. + "UMT5EncoderModel", # Building part of bigger (tested) model. + "Blip2QFormerModel", # Building part of bigger (tested) model. + "ErnieMForInformationExtraction", + "FastSpeech2ConformerHifiGan", # Already tested by SpeechT5HifiGan (# Copied from) + "FastSpeech2ConformerWithHifiGan", # Built with two smaller (tested) models. + "GraphormerDecoderHead", # Building part of bigger (tested) model. + "JukeboxVQVAE", # Building part of bigger (tested) model. + "JukeboxPrior", # Building part of bigger (tested) model. + "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. + "SegformerDecodeHead", # Building part of bigger (tested) model. + "MgpstrModel", # Building part of bigger (tested) model. + "BertLMHeadModel", # Needs to be setup as decoder. + "MegatronBertLMHeadModel", # Building part of bigger (tested) model. + "RealmBertModel", # Building part of bigger (tested) model. + "RealmReader", # Not regular model. + "RealmScorer", # Not regular model. + "RealmForOpenQA", # Not regular model. + "ReformerForMaskedLM", # Needs to be setup as decoder. + "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) + "TFRobertaForMultipleChoice", # TODO: fix + "TFRobertaPreLayerNormForMultipleChoice", # TODO: fix + "SeparableConv1D", # Building part of bigger (tested) model. + "FlaxBartForCausalLM", # Building part of bigger (tested) model. + "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. + "OPTDecoderWrapper", + "TFSegformerDecodeHead", # Not a regular model. + "AltRobertaModel", # Building part of bigger (tested) model. + "BlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models + "TFBlipTextLMHeadModel", # No need to test it as it is tested by BlipTextVision models + "BridgeTowerTextModel", # No need to test it as it is tested by BridgeTowerModel model. + "BridgeTowerVisionModel", # No need to test it as it is tested by BridgeTowerModel model. + "BarkCausalModel", # Building part of bigger (tested) model. + "BarkModel", # Does not have a forward signature - generation tested with integration tests. + "SeamlessM4TTextToUnitModel", # Building part of bigger (tested) model. + "SeamlessM4TCodeHifiGan", # Building part of bigger (tested) model. + "SeamlessM4TTextToUnitForConditionalGeneration", # Building part of bigger (tested) model. + "ChameleonVQVAE", # VQVAE here is used only for encoding (discretizing) and is tested as part of bigger model +] # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. @@ -174,8 +163,7 @@ "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", - "Blip2TextModelWithProjection", - "Blip2VisionModelWithProjection", + "Blip2ForConditionalGeneration", "Blip2QFormerModel", "Blip2VisionModel", "ErnieMForInformationExtraction", @@ -184,6 +172,7 @@ "GitVisionModel", "GraphormerModel", "GraphormerForGraphClassification", + "BlipForConditionalGeneration", "BlipForImageTextRetrieval", "BlipForQuestionAnswering", "BlipVisionModel", @@ -229,6 +218,7 @@ "BeitForMaskedImageModeling", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", + "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModelWithProjection", "ClvpForCausalLM", @@ -246,6 +236,7 @@ "DetrForSegmentation", "Pix2StructVisionModel", "Pix2StructTextModel", + "Pix2StructForConditionalGeneration", "ConditionalDetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", @@ -322,6 +313,7 @@ "SeamlessM4TCodeHifiGan", "SeamlessM4TForSpeechToSpeech", # no auto class for speech-to-speech "TvpForVideoGrounding", + "UdopForConditionalGeneration", "SeamlessM4Tv2NARTextToUnitModel", "SeamlessM4Tv2NARTextToUnitForConditionalGeneration", "SeamlessM4Tv2CodeHifiGan", @@ -330,8 +322,6 @@ "SiglipVisionModel", "SiglipTextModel", "ChameleonVQVAE", # no autoclass for VQ-VAE models - "CLIPTextModel", - "MoshiForConditionalGeneration", # no auto class for speech-to-speech ] # DO NOT edit this list! @@ -424,15 +414,22 @@ def get_model_modules() -> List[str]: "modeling_auto", "modeling_encoder_decoder", "modeling_marian", + "modeling_mmbt", + "modeling_outputs", "modeling_retribert", + "modeling_utils", "modeling_flax_auto", "modeling_flax_encoder_decoder", + "modeling_flax_utils", "modeling_speech_encoder_decoder", "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_timm_backbone", "modeling_tf_auto", "modeling_tf_encoder_decoder", + "modeling_tf_outputs", + "modeling_tf_pytorch_utils", + "modeling_tf_utils", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] @@ -446,7 +443,8 @@ def get_model_modules() -> List[str]: for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) - modules.append(modeling_module) + if inspect.ismodule(modeling_module): + modules.append(modeling_module) return modules @@ -908,26 +906,19 @@ def find_all_documented_objects() -> List[str]: Returns: `List[str]`: The list of all object names being documented. - `Dict[str, List[str]]`: A dictionary mapping the object name (full import path, e.g. - `integrations.PeftAdapterMixin`) to its documented methods """ documented_obj = [] - documented_methods_map = {} + for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"): + with open(doc_file, "r", encoding="utf-8", newline="\n") as f: + content = f.read() + raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content) + documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] - - for obj in raw_doc_objs: - obj_public_methods = re.findall(rf"\[\[autodoc\]\] {obj}((\n\s+-.*)+)", content) - # Some objects have no methods documented - if len(obj_public_methods) == 0: - continue - else: - documented_methods_map[obj] = re.findall(r"(?<=-\s).*", obj_public_methods[0][0]) - - return documented_obj, documented_methods_map + return documented_obj # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. @@ -1009,6 +1000,7 @@ def find_all_documented_objects() -> List[str]: "ConvNextV2Backbone", "DinatBackbone", "Dinov2Backbone", + "Dinov2WithRegistersBackbone", "FocalNetBackbone", "HieraBackbone", "MaskFormerSwinBackbone", @@ -1065,7 +1057,7 @@ def ignore_undocumented(name: str) -> bool: def check_all_objects_are_documented(): """Check all models are properly documented.""" - documented_objs, documented_methods_map = find_all_documented_objects() + documented_objs = find_all_documented_objects() modules = transformers._modules objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] @@ -1074,57 +1066,8 @@ def check_all_objects_are_documented(): "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) + check_docstrings_are_in_md() check_model_type_doc_match() - check_public_method_exists(documented_methods_map) - - -def check_public_method_exists(documented_methods_map): - """Check that all explicitly documented public methods are defined in the corresponding class.""" - failures = [] - for obj, methods in documented_methods_map.items(): - # Let's ensure there is no repetition - if len(set(methods)) != len(methods): - failures.append(f"Error in the documentation of {obj}: there are repeated documented methods.") - - # Navigates into the object, given the full import path - nested_path = obj.split(".") - submodule = transformers - if len(nested_path) > 1: - nested_submodules = nested_path[:-1] - for submodule_name in nested_submodules: - if submodule_name == "transformers": - continue - - try: - submodule = getattr(submodule, submodule_name) - except AttributeError: - failures.append(f"Could not parse {submodule_name}. Are the required dependencies installed?") - continue - - class_name = nested_path[-1] - - try: - obj_class = getattr(submodule, class_name) - except AttributeError: - failures.append(f"Could not parse {submodule_name}. Are the required dependencies installed?") - continue - - # Checks that all explicitly documented methods are defined in the class - for method in methods: - if method == "all": # Special keyword to document all public methods - continue - try: - if not hasattr(obj_class, method): - failures.append( - "The following public method is explicitly documented but not defined in the corresponding " - f"class. class: {obj}, method: {method}. If the method is defined, this error can be due to " - f"lacking dependencies." - ) - except ImportError: - pass - - if len(failures) > 0: - raise Exception("\n".join(failures)) def check_model_type_doc_match(): @@ -1154,6 +1097,50 @@ def check_model_type_doc_match(): ) +# Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`. +_re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`") +# Re pattern to catch things between double backquotes. +_re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)") +# Re pattern to catch example introduction. +_re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE) + + +def is_rst_docstring(docstring: str) -> True: + """ + Returns `True` if `docstring` is written in rst. + """ + if _re_rst_special_words.search(docstring) is not None: + return True + if _re_double_backquotes.search(docstring) is not None: + return True + if _re_rst_example.search(docstring) is not None: + return True + return False + + +def check_docstrings_are_in_md(): + """Check all docstrings are written in md and nor rst.""" + files_with_rst = [] + for file in Path(PATH_TO_TRANSFORMERS).glob("**/*.py"): + with open(file, encoding="utf-8") as f: + code = f.read() + docstrings = code.split('"""') + + for idx, docstring in enumerate(docstrings): + if idx % 2 == 0 or not is_rst_docstring(docstring): + continue + files_with_rst.append(file) + break + + if len(files_with_rst) > 0: + raise ValueError( + "The following files have docstrings written in rst:\n" + + "\n".join([f"- {f}" for f in files_with_rst]) + + "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n" + "(`pip install git+https://github.com/huggingface/doc-builder`)" + ) + + def check_deprecated_constant_is_up_to_date(): """ Check if the constant `DEPRECATED_MODELS` in `models/auto/configuration_auto.py` is up to date. @@ -1184,30 +1171,29 @@ def check_deprecated_constant_is_up_to_date(): def check_repo_quality(): - """Check all models are tested and documented.""" - print("Repository-wide checks:") - print(" - checking all models are included.") + """Check all models are properly tested and documented.""" + print("Checking all models are included.") check_model_list() - print(" - checking all models are public.") + print("Checking all models are public.") check_models_are_in_init() - print(" - checking all models have tests.") + print("Checking all models are properly tested.") check_all_decorator_order() check_all_models_are_tested() - print(" - checking all objects have documentation.") + print("Checking all objects are properly documented.") check_all_objects_are_documented() - print(" - checking all models are in at least one auto class.") + print("Checking all models are in at least one auto class.") check_all_models_are_auto_configured() - print(" - checking all names in auto name mappings are defined.") + print("Checking all names in auto name mappings are defined.") check_all_auto_object_names_being_defined() - print(" - checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.") + print("Checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.") check_all_auto_mapping_names_in_config_mapping_names() - print(" - checking all auto mappings could be imported.") + print("Checking all auto mappings could be imported.") check_all_auto_mappings_importable() - print(" - checking all objects are equally (across frameworks) in the main __init__.") + print("Checking all objects are equally (across frameworks) in the main __init__.") check_objects_being_equally_in_main_init() - print(" - checking the DEPRECATED_MODELS constant is up to date.") + print("Checking the DEPRECATED_MODELS constant is up to date.") check_deprecated_constant_is_up_to_date() if __name__ == "__main__": - check_repo_quality() + check_repo_quality() \ No newline at end of file