diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 74e6404ecc934b..40d53aacd5b768 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -145,28 +145,24 @@ ], # Models "models": [], - "models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"], + "models.albert": ["AlbertConfig"], "models.align": [ - "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlignConfig", "AlignProcessor", "AlignTextConfig", "AlignVisionConfig", ], "models.altclip": [ - "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPProcessor", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "models.audio_spectrogram_transformer": [ - "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", "ASTFeatureExtractor", ], "models.auto": [ - "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "FEATURE_EXTRACTOR_MAPPING", "IMAGE_PROCESSOR_MAPPING", @@ -179,10 +175,7 @@ "AutoProcessor", "AutoTokenizer", ], - "models.autoformer": [ - "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "AutoformerConfig", - ], + "models.autoformer": ["AutoformerConfig"], "models.bark": [ "BarkCoarseConfig", "BarkConfig", @@ -193,9 +186,8 @@ "models.bart": ["BartConfig", "BartTokenizer"], "models.barthez": [], "models.bartpho": [], - "models.beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig"], + "models.beit": ["BeitConfig"], "models.bert": [ - "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BasicTokenizer", "BertConfig", "BertTokenizer", @@ -208,77 +200,63 @@ "MecabTokenizer", ], "models.bertweet": ["BertweetTokenizer"], - "models.big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig"], - "models.bigbird_pegasus": [ - "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", - "BigBirdPegasusConfig", - ], + "models.big_bird": ["BigBirdConfig"], + "models.bigbird_pegasus": ["BigBirdPegasusConfig"], "models.biogpt": [ - "BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig", "BioGptTokenizer", ], - "models.bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig"], + "models.bit": ["BitConfig"], "models.blenderbot": [ - "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotTokenizer", ], "models.blenderbot_small": [ - "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallTokenizer", ], "models.blip": [ - "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlipConfig", "BlipProcessor", "BlipTextConfig", "BlipVisionConfig", ], "models.blip_2": [ - "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2Processor", "Blip2QFormerConfig", "Blip2VisionConfig", ], - "models.bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig"], + "models.bloom": ["BloomConfig"], "models.bridgetower": [ - "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerProcessor", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "models.bros": [ - "BROS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BrosConfig", "BrosProcessor", ], "models.byt5": ["ByT5Tokenizer"], - "models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"], + "models.camembert": ["CamembertConfig"], "models.canine": [ - "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig", "CanineTokenizer", ], "models.chinese_clip": [ - "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPProcessor", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "models.clap": [ - "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapProcessor", "ClapTextConfig", ], "models.clip": [ - "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPProcessor", "CLIPTextConfig", @@ -286,14 +264,12 @@ "CLIPVisionConfig", ], "models.clipseg": [ - "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegProcessor", "CLIPSegTextConfig", "CLIPSegVisionConfig", ], "models.clvp": [ - "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ClvpConfig", "ClvpDecoderConfig", "ClvpEncoderConfig", @@ -303,230 +279,164 @@ ], "models.code_llama": [], "models.codegen": [ - "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenTokenizer", ], - "models.cohere": ["COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CohereConfig"], - "models.conditional_detr": [ - "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ConditionalDetrConfig", - ], + "models.cohere": ["CohereConfig"], + "models.conditional_detr": ["ConditionalDetrConfig"], "models.convbert": [ - "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer", ], - "models.convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig"], - "models.convnextv2": [ - "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ConvNextV2Config", - ], + "models.convnext": ["ConvNextConfig"], + "models.convnextv2": ["ConvNextV2Config"], "models.cpm": [], "models.cpmant": [ - "CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig", "CpmAntTokenizer", ], "models.ctrl": [ - "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer", ], - "models.cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"], + "models.cvt": ["CvtConfig"], "models.data2vec": [ - "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig", "Data2VecTextConfig", "Data2VecVisionConfig", ], "models.dbrx": ["DbrxConfig"], "models.deberta": [ - "DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer", ], - "models.deberta_v2": [ - "DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", - "DebertaV2Config", - ], - "models.decision_transformer": [ - "DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "DecisionTransformerConfig", - ], - "models.deformable_detr": [ - "DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", - "DeformableDetrConfig", - ], - "models.deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig"], + "models.deberta_v2": ["DebertaV2Config"], + "models.decision_transformer": ["DecisionTransformerConfig"], + "models.deformable_detr": ["DeformableDetrConfig"], + "models.deit": ["DeiTConfig"], "models.deprecated": [], "models.deprecated.bort": [], "models.deprecated.mctct": [ - "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig", "MCTCTFeatureExtractor", "MCTCTProcessor", ], "models.deprecated.mmbt": ["MMBTConfig"], - "models.deprecated.open_llama": [ - "OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", - "OpenLlamaConfig", - ], + "models.deprecated.open_llama": ["OpenLlamaConfig"], "models.deprecated.retribert": [ - "RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig", "RetriBertTokenizer", ], "models.deprecated.tapex": ["TapexTokenizer"], - "models.deprecated.trajectory_transformer": [ - "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TrajectoryTransformerConfig", - ], + "models.deprecated.trajectory_transformer": ["TrajectoryTransformerConfig"], "models.deprecated.transfo_xl": [ - "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig", "TransfoXLCorpus", "TransfoXLTokenizer", ], - "models.deprecated.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"], - "models.depth_anything": ["DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP", "DepthAnythingConfig"], - "models.deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"], - "models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"], + "models.deprecated.van": ["VanConfig"], + "models.depth_anything": ["DepthAnythingConfig"], + "models.deta": ["DetaConfig"], + "models.detr": ["DetrConfig"], "models.dialogpt": [], - "models.dinat": ["DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DinatConfig"], - "models.dinov2": ["DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Dinov2Config"], + "models.dinat": ["DinatConfig"], + "models.dinov2": ["Dinov2Config"], "models.distilbert": [ - "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer", ], "models.dit": [], "models.donut": [ - "DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutProcessor", "DonutSwinConfig", ], "models.dpr": [ - "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPRConfig", "DPRContextEncoderTokenizer", "DPRQuestionEncoderTokenizer", "DPRReaderOutput", "DPRReaderTokenizer", ], - "models.dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"], - "models.efficientformer": [ - "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "EfficientFormerConfig", - ], - "models.efficientnet": [ - "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", - "EfficientNetConfig", - ], + "models.dpt": ["DPTConfig"], + "models.efficientformer": ["EfficientFormerConfig"], + "models.efficientnet": ["EfficientNetConfig"], "models.electra": [ - "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer", ], "models.encodec": [ - "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", "EncodecFeatureExtractor", ], "models.encoder_decoder": ["EncoderDecoderConfig"], - "models.ernie": [ - "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ErnieConfig", - ], - "models.ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"], - "models.esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig", "EsmTokenizer"], - "models.falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], + "models.ernie": ["ErnieConfig"], + "models.ernie_m": ["ErnieMConfig"], + "models.esm": ["EsmConfig", "EsmTokenizer"], + "models.falcon": ["FalconConfig"], "models.fastspeech2_conformer": [ - "FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP", - "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "FastSpeech2ConformerConfig", "FastSpeech2ConformerHifiGanConfig", "FastSpeech2ConformerTokenizer", "FastSpeech2ConformerWithHifiGanConfig", ], - "models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"], + "models.flaubert": ["FlaubertConfig", "FlaubertTokenizer"], "models.flava": [ - "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlavaConfig", "FlavaImageCodebookConfig", "FlavaImageConfig", "FlavaMultimodalConfig", "FlavaTextConfig", ], - "models.fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"], - "models.focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"], + "models.fnet": ["FNetConfig"], + "models.focalnet": ["FocalNetConfig"], "models.fsmt": [ - "FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig", "FSMTTokenizer", ], "models.funnel": [ - "FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig", "FunnelTokenizer", ], - "models.fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"], - "models.gemma": ["GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "GemmaConfig"], + "models.fuyu": ["FuyuConfig"], + "models.gemma": ["GemmaConfig"], "models.git": [ - "GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitProcessor", "GitVisionConfig", ], - "models.glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"], + "models.glpn": ["GLPNConfig"], "models.gpt2": [ - "GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer", ], - "models.gpt_bigcode": [ - "GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", - "GPTBigCodeConfig", - ], - "models.gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig"], - "models.gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"], - "models.gpt_neox_japanese": [ - "GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", - "GPTNeoXJapaneseConfig", - ], + "models.gpt_bigcode": ["GPTBigCodeConfig"], + "models.gpt_neo": ["GPTNeoConfig"], + "models.gpt_neox": ["GPTNeoXConfig"], + "models.gpt_neox_japanese": ["GPTNeoXJapaneseConfig"], "models.gpt_sw3": [], - "models.gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig"], + "models.gptj": ["GPTJConfig"], "models.gptsan_japanese": [ - "GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig", "GPTSanJapaneseTokenizer", ], - "models.graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], + "models.graphormer": ["GraphormerConfig"], "models.grounding_dino": [ - "GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroundingDinoConfig", "GroundingDinoProcessor", ], "models.groupvit": [ - "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], "models.herbert": ["HerbertTokenizer"], - "models.hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"], - "models.ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig"], - "models.idefics": [ - "IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP", - "IdeficsConfig", - ], + "models.hubert": ["HubertConfig"], + "models.ibert": ["IBertConfig"], + "models.idefics": ["IdeficsConfig"], "models.idefics2": ["Idefics2Config"], - "models.imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig"], - "models.informer": ["INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig"], + "models.imagegpt": ["ImageGPTConfig"], + "models.informer": ["InformerConfig"], "models.instructblip": [ - "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipProcessor", "InstructBlipQFormerConfig", @@ -534,24 +444,20 @@ ], "models.jamba": ["JambaConfig"], "models.jukebox": [ - "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxTokenizer", "JukeboxVQVAEConfig", ], "models.kosmos2": [ - "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Kosmos2Config", "Kosmos2Processor", ], "models.layoutlm": [ - "LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMTokenizer", ], "models.layoutlmv2": [ - "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config", "LayoutLMv2FeatureExtractor", "LayoutLMv2ImageProcessor", @@ -559,7 +465,6 @@ "LayoutLMv2Tokenizer", ], "models.layoutlmv3": [ - "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv3Config", "LayoutLMv3FeatureExtractor", "LayoutLMv3ImageProcessor", @@ -567,230 +472,171 @@ "LayoutLMv3Tokenizer", ], "models.layoutxlm": ["LayoutXLMProcessor"], - "models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"], - "models.levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig"], - "models.lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], - "models.llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], + "models.led": ["LEDConfig", "LEDTokenizer"], + "models.levit": ["LevitConfig"], + "models.lilt": ["LiltConfig"], + "models.llama": ["LlamaConfig"], "models.llava": [ - "LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaConfig", "LlavaProcessor", ], "models.llava_next": [ - "LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaNextConfig", "LlavaNextProcessor", ], "models.longformer": [ - "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer", ], - "models.longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config"], + "models.longt5": ["LongT5Config"], "models.luke": [ - "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig", "LukeTokenizer", ], "models.lxmert": [ - "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer", ], - "models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"], - "models.mamba": ["MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MambaConfig"], + "models.m2m_100": ["M2M100Config"], + "models.mamba": ["MambaConfig"], "models.marian": ["MarianConfig"], "models.markuplm": [ - "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarkupLMConfig", "MarkupLMFeatureExtractor", "MarkupLMProcessor", "MarkupLMTokenizer", ], - "models.mask2former": [ - "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Mask2FormerConfig", - ], + "models.mask2former": ["Mask2FormerConfig"], "models.maskformer": [ - "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig", "MaskFormerSwinConfig", ], "models.mbart": ["MBartConfig"], "models.mbart50": [], - "models.mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig"], - "models.megatron_bert": [ - "MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "MegatronBertConfig", - ], + "models.mega": ["MegaConfig"], + "models.megatron_bert": ["MegatronBertConfig"], "models.megatron_gpt2": [], "models.mgp_str": [ - "MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig", "MgpstrProcessor", "MgpstrTokenizer", ], - "models.mistral": ["MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MistralConfig"], - "models.mixtral": ["MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MixtralConfig"], + "models.mistral": ["MistralConfig"], + "models.mixtral": ["MixtralConfig"], "models.mluke": [], "models.mobilebert": [ - "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertTokenizer", ], - "models.mobilenet_v1": [ - "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP", - "MobileNetV1Config", - ], - "models.mobilenet_v2": [ - "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", - "MobileNetV2Config", - ], - "models.mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig"], - "models.mobilevitv2": [ - "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP", - "MobileViTV2Config", - ], + "models.mobilenet_v1": ["MobileNetV1Config"], + "models.mobilenet_v2": ["MobileNetV2Config"], + "models.mobilevit": ["MobileViTConfig"], + "models.mobilevitv2": ["MobileViTV2Config"], "models.mpnet": [ - "MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig", "MPNetTokenizer", ], - "models.mpt": ["MPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MptConfig"], - "models.mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"], + "models.mpt": ["MptConfig"], + "models.mra": ["MraConfig"], "models.mt5": ["MT5Config"], "models.musicgen": [ - "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "MusicgenConfig", "MusicgenDecoderConfig", ], "models.musicgen_melody": [ - "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", "MusicgenMelodyConfig", "MusicgenMelodyDecoderConfig", ], "models.mvp": ["MvpConfig", "MvpTokenizer"], - "models.nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"], - "models.nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], + "models.nat": ["NatConfig"], + "models.nezha": ["NezhaConfig"], "models.nllb": [], - "models.nllb_moe": ["NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig"], + "models.nllb_moe": ["NllbMoeConfig"], "models.nougat": ["NougatProcessor"], - "models.nystromformer": [ - "NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "NystromformerConfig", - ], - "models.olmo": ["OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP", "OlmoConfig"], + "models.nystromformer": ["NystromformerConfig"], + "models.olmo": ["OlmoConfig"], "models.oneformer": [ - "ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "OneFormerConfig", "OneFormerProcessor", ], "models.openai": [ - "OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig", "OpenAIGPTTokenizer", ], "models.opt": ["OPTConfig"], "models.owlv2": [ - "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Owlv2Config", "Owlv2Processor", "Owlv2TextConfig", "Owlv2VisionConfig", ], "models.owlvit": [ - "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OwlViTConfig", "OwlViTProcessor", "OwlViTTextConfig", "OwlViTVisionConfig", ], - "models.patchtsmixer": [ - "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "PatchTSMixerConfig", - ], - "models.patchtst": ["PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", "PatchTSTConfig"], + "models.patchtsmixer": ["PatchTSMixerConfig"], + "models.patchtst": ["PatchTSTConfig"], "models.pegasus": [ - "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig", "PegasusTokenizer", ], - "models.pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], + "models.pegasus_x": ["PegasusXConfig"], "models.perceiver": [ - "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverTokenizer", ], - "models.persimmon": ["PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP", "PersimmonConfig"], - "models.phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"], - "models.phi3": ["PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP", "Phi3Config"], + "models.persimmon": ["PersimmonConfig"], + "models.phi": ["PhiConfig"], + "models.phi3": ["Phi3Config"], "models.phobert": ["PhobertTokenizer"], "models.pix2struct": [ - "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructProcessor", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], - "models.plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"], - "models.poolformer": [ - "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "PoolFormerConfig", - ], - "models.pop2piano": [ - "POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Pop2PianoConfig", - ], + "models.plbart": ["PLBartConfig"], + "models.poolformer": ["PoolFormerConfig"], + "models.pop2piano": ["Pop2PianoConfig"], "models.prophetnet": [ - "PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig", "ProphetNetTokenizer", ], - "models.pvt": ["PVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtConfig"], + "models.pvt": ["PvtConfig"], "models.pvt_v2": ["PvtV2Config"], - "models.qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"], + "models.qdqbert": ["QDQBertConfig"], "models.qwen2": [ - "QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2Config", "Qwen2Tokenizer", ], - "models.qwen2_moe": [ - "QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Qwen2MoeConfig", - ], + "models.qwen2_moe": ["Qwen2MoeConfig"], "models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"], "models.realm": [ - "REALM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RealmConfig", "RealmTokenizer", ], "models.recurrent_gemma": ["RecurrentGemmaConfig"], - "models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"], - "models.regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"], - "models.rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig"], - "models.resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig"], + "models.reformer": ["ReformerConfig"], + "models.regnet": ["RegNetConfig"], + "models.rembert": ["RemBertConfig"], + "models.resnet": ["ResNetConfig"], "models.roberta": [ - "ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaTokenizer", ], - "models.roberta_prelayernorm": [ - "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", - "RobertaPreLayerNormConfig", - ], + "models.roberta_prelayernorm": ["RobertaPreLayerNormConfig"], "models.roc_bert": [ - "ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig", "RoCBertTokenizer", ], "models.roformer": [ - "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerTokenizer", ], - "models.rwkv": ["RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP", "RwkvConfig"], + "models.rwkv": ["RwkvConfig"], "models.sam": [ - "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP", "SamConfig", "SamMaskDecoderConfig", "SamProcessor", @@ -798,21 +644,16 @@ "SamVisionConfig", ], "models.seamless_m4t": [ - "SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4TConfig", "SeamlessM4TFeatureExtractor", "SeamlessM4TProcessor", ], - "models.seamless_m4t_v2": [ - "SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", - "SeamlessM4Tv2Config", - ], - "models.segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig"], - "models.seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig"], - "models.sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"], - "models.sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"], + "models.seamless_m4t_v2": ["SeamlessM4Tv2Config"], + "models.segformer": ["SegformerConfig"], + "models.seggpt": ["SegGptConfig"], + "models.sew": ["SEWConfig"], + "models.sew_d": ["SEWDConfig"], "models.siglip": [ - "SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "SiglipConfig", "SiglipProcessor", "SiglipTextConfig", @@ -820,145 +661,97 @@ ], "models.speech_encoder_decoder": ["SpeechEncoderDecoderConfig"], "models.speech_to_text": [ - "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig", "Speech2TextFeatureExtractor", "Speech2TextProcessor", ], "models.speech_to_text_2": [ - "SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2Text2Config", "Speech2Text2Processor", "Speech2Text2Tokenizer", ], "models.speecht5": [ - "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", - "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5FeatureExtractor", "SpeechT5HifiGanConfig", "SpeechT5Processor", ], "models.splinter": [ - "SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig", "SplinterTokenizer", ], "models.squeezebert": [ - "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertTokenizer", ], - "models.stablelm": ["STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP", "StableLmConfig"], - "models.starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"], - "models.superpoint": ["SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SuperPointConfig"], - "models.swiftformer": [ - "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "SwiftFormerConfig", - ], - "models.swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig"], - "models.swin2sr": ["SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swin2SRConfig"], - "models.swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], - "models.switch_transformers": [ - "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP", - "SwitchTransformersConfig", - ], - "models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"], - "models.table_transformer": [ - "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TableTransformerConfig", - ], + "models.stablelm": ["StableLmConfig"], + "models.starcoder2": ["Starcoder2Config"], + "models.superpoint": ["SuperPointConfig"], + "models.swiftformer": ["SwiftFormerConfig"], + "models.swin": ["SwinConfig"], + "models.swin2sr": ["Swin2SRConfig"], + "models.swinv2": ["Swinv2Config"], + "models.switch_transformers": ["SwitchTransformersConfig"], + "models.t5": ["T5Config"], + "models.table_transformer": ["TableTransformerConfig"], "models.tapas": [ - "TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer", ], - "models.time_series_transformer": [ - "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TimeSeriesTransformerConfig", - ], - "models.timesformer": [ - "TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TimesformerConfig", - ], + "models.time_series_transformer": ["TimeSeriesTransformerConfig"], + "models.timesformer": ["TimesformerConfig"], "models.timm_backbone": ["TimmBackboneConfig"], "models.trocr": [ - "TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig", "TrOCRProcessor", ], "models.tvlt": [ - "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvltConfig", "TvltFeatureExtractor", "TvltProcessor", ], "models.tvp": [ - "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvpConfig", "TvpProcessor", ], "models.udop": [ - "UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP", "UdopConfig", "UdopProcessor", ], "models.umt5": ["UMT5Config"], - "models.unispeech": [ - "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", - "UniSpeechConfig", - ], - "models.unispeech_sat": [ - "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "UniSpeechSatConfig", - ], + "models.unispeech": ["UniSpeechConfig"], + "models.unispeech_sat": ["UniSpeechSatConfig"], "models.univnet": [ - "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "UnivNetConfig", "UnivNetFeatureExtractor", ], "models.upernet": ["UperNetConfig"], - "models.videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], + "models.videomae": ["VideoMAEConfig"], "models.vilt": [ - "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig", "ViltFeatureExtractor", "ViltImageProcessor", "ViltProcessor", ], - "models.vipllava": [ - "VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", - "VipLlavaConfig", - ], + "models.vipllava": ["VipLlavaConfig"], "models.vision_encoder_decoder": ["VisionEncoderDecoderConfig"], "models.vision_text_dual_encoder": [ "VisionTextDualEncoderConfig", "VisionTextDualEncoderProcessor", ], - "models.visual_bert": [ - "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "VisualBertConfig", - ], - "models.vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], - "models.vit_hybrid": [ - "VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ViTHybridConfig", - ], - "models.vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"], - "models.vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"], - "models.vitdet": ["VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitDetConfig"], - "models.vitmatte": ["VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitMatteConfig"], + "models.visual_bert": ["VisualBertConfig"], + "models.vit": ["ViTConfig"], + "models.vit_hybrid": ["ViTHybridConfig"], + "models.vit_mae": ["ViTMAEConfig"], + "models.vit_msn": ["ViTMSNConfig"], + "models.vitdet": ["VitDetConfig"], + "models.vitmatte": ["VitMatteConfig"], "models.vits": [ - "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitsConfig", "VitsTokenizer", ], - "models.vivit": [ - "VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "VivitConfig", - ], + "models.vivit": ["VivitConfig"], "models.wav2vec2": [ - "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config", "Wav2Vec2CTCTokenizer", "Wav2Vec2FeatureExtractor", @@ -966,52 +759,34 @@ "Wav2Vec2Tokenizer", ], "models.wav2vec2_bert": [ - "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2BertConfig", "Wav2Vec2BertProcessor", ], - "models.wav2vec2_conformer": [ - "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Wav2Vec2ConformerConfig", - ], + "models.wav2vec2_conformer": ["Wav2Vec2ConformerConfig"], "models.wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"], "models.wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"], - "models.wavlm": [ - "WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", - "WavLMConfig", - ], + "models.wavlm": ["WavLMConfig"], "models.whisper": [ - "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperFeatureExtractor", "WhisperProcessor", "WhisperTokenizer", ], "models.x_clip": [ - "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPProcessor", "XCLIPTextConfig", "XCLIPVisionConfig", ], - "models.xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"], - "models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"], - "models.xlm_prophetnet": [ - "XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", - "XLMProphetNetConfig", - ], - "models.xlm_roberta": [ - "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", - "XLMRobertaConfig", - ], - "models.xlm_roberta_xl": [ - "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", - "XLMRobertaXLConfig", - ], - "models.xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"], - "models.xmod": ["XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig"], - "models.yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig"], - "models.yoso": ["YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP", "YosoConfig"], + "models.xglm": ["XGLMConfig"], + "models.xlm": ["XLMConfig", "XLMTokenizer"], + "models.xlm_prophetnet": ["XLMProphetNetConfig"], + "models.xlm_roberta": ["XLMRobertaConfig"], + "models.xlm_roberta_xl": ["XLMRobertaXLConfig"], + "models.xlnet": ["XLNetConfig"], + "models.xmod": ["XmodConfig"], + "models.yolos": ["YolosConfig"], + "models.yoso": ["YosoConfig"], "onnx": [], "pipelines": [ "AudioClassificationPipeline", @@ -1466,7 +1241,6 @@ _import_structure["models.albert"].extend( [ - "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", @@ -1481,7 +1255,6 @@ _import_structure["models.align"].extend( [ - "ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST", "AlignModel", "AlignPreTrainedModel", "AlignTextModel", @@ -1491,7 +1264,6 @@ _import_structure["models.altclip"].extend( [ - "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPModel", "AltCLIPPreTrainedModel", "AltCLIPTextModel", @@ -1500,7 +1272,6 @@ ) _import_structure["models.audio_spectrogram_transformer"].extend( [ - "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", @@ -1592,7 +1363,6 @@ ) _import_structure["models.autoformer"].extend( [ - "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "AutoformerForPrediction", "AutoformerModel", "AutoformerPreTrainedModel", @@ -1600,7 +1370,6 @@ ) _import_structure["models.bark"].extend( [ - "BARK_PRETRAINED_MODEL_ARCHIVE_LIST", "BarkCausalModel", "BarkCoarseModel", "BarkFineModel", @@ -1611,7 +1380,6 @@ ) _import_structure["models.bart"].extend( [ - "BART_PRETRAINED_MODEL_ARCHIVE_LIST", "BartForCausalLM", "BartForConditionalGeneration", "BartForQuestionAnswering", @@ -1624,7 +1392,6 @@ ) _import_structure["models.beit"].extend( [ - "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitBackbone", "BeitForImageClassification", "BeitForMaskedImageModeling", @@ -1635,7 +1402,6 @@ ) _import_structure["models.bert"].extend( [ - "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", @@ -1660,7 +1426,6 @@ ) _import_structure["models.big_bird"].extend( [ - "BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdForCausalLM", "BigBirdForMaskedLM", "BigBirdForMultipleChoice", @@ -1676,7 +1441,6 @@ ) _import_structure["models.bigbird_pegasus"].extend( [ - "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdPegasusForCausalLM", "BigBirdPegasusForConditionalGeneration", "BigBirdPegasusForQuestionAnswering", @@ -1687,7 +1451,6 @@ ) _import_structure["models.biogpt"].extend( [ - "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForSequenceClassification", "BioGptForTokenClassification", @@ -1697,7 +1460,6 @@ ) _import_structure["models.bit"].extend( [ - "BIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BitBackbone", "BitForImageClassification", "BitModel", @@ -1706,7 +1468,6 @@ ) _import_structure["models.blenderbot"].extend( [ - "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", @@ -1715,7 +1476,6 @@ ) _import_structure["models.blenderbot_small"].extend( [ - "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", @@ -1724,7 +1484,6 @@ ) _import_structure["models.blip"].extend( [ - "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "BlipForConditionalGeneration", "BlipForImageTextRetrieval", "BlipForQuestionAnswering", @@ -1736,7 +1495,6 @@ ) _import_structure["models.blip_2"].extend( [ - "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2ForConditionalGeneration", "Blip2Model", "Blip2PreTrainedModel", @@ -1746,7 +1504,6 @@ ) _import_structure["models.bloom"].extend( [ - "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomForQuestionAnswering", "BloomForSequenceClassification", @@ -1757,7 +1514,6 @@ ) _import_structure["models.bridgetower"].extend( [ - "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", @@ -1767,7 +1523,6 @@ ) _import_structure["models.bros"].extend( [ - "BROS_PRETRAINED_MODEL_ARCHIVE_LIST", "BrosForTokenClassification", "BrosModel", "BrosPreTrainedModel", @@ -1778,7 +1533,6 @@ ) _import_structure["models.camembert"].extend( [ - "CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "CamembertForCausalLM", "CamembertForMaskedLM", "CamembertForMultipleChoice", @@ -1791,7 +1545,6 @@ ) _import_structure["models.canine"].extend( [ - "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", @@ -1804,7 +1557,6 @@ ) _import_structure["models.chinese_clip"].extend( [ - "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", @@ -1813,7 +1565,6 @@ ) _import_structure["models.clap"].extend( [ - "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioModel", "ClapAudioModelWithProjection", "ClapFeatureExtractor", @@ -1825,7 +1576,6 @@ ) _import_structure["models.clip"].extend( [ - "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPForImageClassification", "CLIPModel", "CLIPPreTrainedModel", @@ -1837,7 +1587,6 @@ ) _import_structure["models.clipseg"].extend( [ - "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegForImageSegmentation", "CLIPSegModel", "CLIPSegPreTrainedModel", @@ -1847,7 +1596,6 @@ ) _import_structure["models.clvp"].extend( [ - "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClvpDecoder", "ClvpEncoder", "ClvpForCausalLM", @@ -1858,7 +1606,6 @@ ) _import_structure["models.codegen"].extend( [ - "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST", "CodeGenForCausalLM", "CodeGenModel", "CodeGenPreTrainedModel", @@ -1867,7 +1614,6 @@ _import_structure["models.cohere"].extend(["CohereForCausalLM", "CohereModel", "CoherePreTrainedModel"]) _import_structure["models.conditional_detr"].extend( [ - "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", @@ -1876,7 +1622,6 @@ ) _import_structure["models.convbert"].extend( [ - "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvBertForMaskedLM", "ConvBertForMultipleChoice", "ConvBertForQuestionAnswering", @@ -1890,7 +1635,6 @@ ) _import_structure["models.convnext"].extend( [ - "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextBackbone", "ConvNextForImageClassification", "ConvNextModel", @@ -1899,7 +1643,6 @@ ) _import_structure["models.convnextv2"].extend( [ - "CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextV2Backbone", "ConvNextV2ForImageClassification", "ConvNextV2Model", @@ -1908,7 +1651,6 @@ ) _import_structure["models.cpmant"].extend( [ - "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", @@ -1916,7 +1658,6 @@ ) _import_structure["models.ctrl"].extend( [ - "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", @@ -1925,7 +1666,6 @@ ) _import_structure["models.cvt"].extend( [ - "CVT_PRETRAINED_MODEL_ARCHIVE_LIST", "CvtForImageClassification", "CvtModel", "CvtPreTrainedModel", @@ -1933,9 +1673,6 @@ ) _import_structure["models.data2vec"].extend( [ - "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", - "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", - "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", @@ -1965,7 +1702,6 @@ ) _import_structure["models.deberta"].extend( [ - "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", @@ -1976,7 +1712,6 @@ ) _import_structure["models.deberta_v2"].extend( [ - "DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaV2ForMaskedLM", "DebertaV2ForMultipleChoice", "DebertaV2ForQuestionAnswering", @@ -1988,7 +1723,6 @@ ) _import_structure["models.decision_transformer"].extend( [ - "DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "DecisionTransformerGPT2Model", "DecisionTransformerGPT2PreTrainedModel", "DecisionTransformerModel", @@ -1997,7 +1731,6 @@ ) _import_structure["models.deformable_detr"].extend( [ - "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "DeformableDetrForObjectDetection", "DeformableDetrModel", "DeformableDetrPreTrainedModel", @@ -2005,7 +1738,6 @@ ) _import_structure["models.deit"].extend( [ - "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", @@ -2015,7 +1747,6 @@ ) _import_structure["models.deprecated.mctct"].extend( [ - "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", @@ -2032,21 +1763,18 @@ ) _import_structure["models.deprecated.retribert"].extend( [ - "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RetriBertModel", "RetriBertPreTrainedModel", ] ) _import_structure["models.deprecated.trajectory_transformer"].extend( [ - "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", ] ) _import_structure["models.deprecated.transfo_xl"].extend( [ - "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", @@ -2057,7 +1785,6 @@ ) _import_structure["models.deprecated.van"].extend( [ - "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", "VanForImageClassification", "VanModel", "VanPreTrainedModel", @@ -2065,14 +1792,12 @@ ) _import_structure["models.depth_anything"].extend( [ - "DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST", "DepthAnythingForDepthEstimation", "DepthAnythingPreTrainedModel", ] ) _import_structure["models.deta"].extend( [ - "DETA_PRETRAINED_MODEL_ARCHIVE_LIST", "DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel", @@ -2080,7 +1805,6 @@ ) _import_structure["models.detr"].extend( [ - "DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "DetrForObjectDetection", "DetrForSegmentation", "DetrModel", @@ -2089,7 +1813,6 @@ ) _import_structure["models.dinat"].extend( [ - "DINAT_PRETRAINED_MODEL_ARCHIVE_LIST", "DinatBackbone", "DinatForImageClassification", "DinatModel", @@ -2098,7 +1821,6 @@ ) _import_structure["models.dinov2"].extend( [ - "DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Dinov2Backbone", "Dinov2ForImageClassification", "Dinov2Model", @@ -2107,7 +1829,6 @@ ) _import_structure["models.distilbert"].extend( [ - "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", @@ -2119,16 +1840,12 @@ ) _import_structure["models.donut"].extend( [ - "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "DonutSwinModel", "DonutSwinPreTrainedModel", ] ) _import_structure["models.dpr"].extend( [ - "DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", "DPRContextEncoder", "DPRPretrainedContextEncoder", "DPRPreTrainedModel", @@ -2140,7 +1857,6 @@ ) _import_structure["models.dpt"].extend( [ - "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", @@ -2149,7 +1865,6 @@ ) _import_structure["models.efficientformer"].extend( [ - "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", @@ -2158,7 +1873,6 @@ ) _import_structure["models.efficientnet"].extend( [ - "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", @@ -2166,7 +1880,6 @@ ) _import_structure["models.electra"].extend( [ - "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForCausalLM", "ElectraForMaskedLM", "ElectraForMultipleChoice", @@ -2181,7 +1894,6 @@ ) _import_structure["models.encodec"].extend( [ - "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] @@ -2189,7 +1901,6 @@ _import_structure["models.encoder_decoder"].append("EncoderDecoderModel") _import_structure["models.ernie"].extend( [ - "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", @@ -2204,7 +1915,6 @@ ) _import_structure["models.ernie_m"].extend( [ - "ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieMForInformationExtraction", "ErnieMForMultipleChoice", "ErnieMForQuestionAnswering", @@ -2216,7 +1926,6 @@ ) _import_structure["models.esm"].extend( [ - "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", "EsmFoldPreTrainedModel", "EsmForMaskedLM", "EsmForProteinFolding", @@ -2228,7 +1937,6 @@ ) _import_structure["models.falcon"].extend( [ - "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconForQuestionAnswering", "FalconForSequenceClassification", @@ -2239,7 +1947,6 @@ ) _import_structure["models.fastspeech2_conformer"].extend( [ - "FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FastSpeech2ConformerHifiGan", "FastSpeech2ConformerModel", "FastSpeech2ConformerPreTrainedModel", @@ -2248,7 +1955,6 @@ ) _import_structure["models.flaubert"].extend( [ - "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaubertForMultipleChoice", "FlaubertForQuestionAnswering", "FlaubertForQuestionAnsweringSimple", @@ -2261,7 +1967,6 @@ ) _import_structure["models.flava"].extend( [ - "FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlavaForPreTraining", "FlavaImageCodebook", "FlavaImageModel", @@ -2273,7 +1978,6 @@ ) _import_structure["models.fnet"].extend( [ - "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", @@ -2288,7 +1992,6 @@ ) _import_structure["models.focalnet"].extend( [ - "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetBackbone", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", @@ -2299,7 +2002,6 @@ _import_structure["models.fsmt"].extend(["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]) _import_structure["models.funnel"].extend( [ - "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", @@ -2323,7 +2025,6 @@ ) _import_structure["models.git"].extend( [ - "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", @@ -2332,7 +2033,6 @@ ) _import_structure["models.glpn"].extend( [ - "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNModel", "GLPNPreTrainedModel", @@ -2340,7 +2040,6 @@ ) _import_structure["models.gpt2"].extend( [ - "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", "GPT2DoubleHeadsModel", "GPT2ForQuestionAnswering", "GPT2ForSequenceClassification", @@ -2353,7 +2052,6 @@ ) _import_structure["models.gpt_bigcode"].extend( [ - "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForCausalLM", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", @@ -2363,7 +2061,6 @@ ) _import_structure["models.gpt_neo"].extend( [ - "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", @@ -2375,7 +2072,6 @@ ) _import_structure["models.gpt_neox"].extend( [ - "GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXForCausalLM", "GPTNeoXForQuestionAnswering", "GPTNeoXForSequenceClassification", @@ -2387,7 +2083,6 @@ ) _import_structure["models.gpt_neox_japanese"].extend( [ - "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", @@ -2396,7 +2091,6 @@ ) _import_structure["models.gptj"].extend( [ - "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTJForCausalLM", "GPTJForQuestionAnswering", "GPTJForSequenceClassification", @@ -2406,7 +2100,6 @@ ) _import_structure["models.gptsan_japanese"].extend( [ - "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTSanJapaneseForConditionalGeneration", "GPTSanJapaneseModel", "GPTSanJapanesePreTrainedModel", @@ -2414,7 +2107,6 @@ ) _import_structure["models.graphormer"].extend( [ - "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", @@ -2422,7 +2114,6 @@ ) _import_structure["models.grounding_dino"].extend( [ - "GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST", "GroundingDinoForObjectDetection", "GroundingDinoModel", "GroundingDinoPreTrainedModel", @@ -2430,7 +2121,6 @@ ) _import_structure["models.groupvit"].extend( [ - "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", @@ -2439,7 +2129,6 @@ ) _import_structure["models.hubert"].extend( [ - "HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "HubertForCTC", "HubertForSequenceClassification", "HubertModel", @@ -2448,7 +2137,6 @@ ) _import_structure["models.ibert"].extend( [ - "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", @@ -2460,7 +2148,6 @@ ) _import_structure["models.idefics"].extend( [ - "IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST", "IdeficsForVisionText2Text", "IdeficsModel", "IdeficsPreTrainedModel", @@ -2469,7 +2156,6 @@ ) _import_structure["models.idefics2"].extend( [ - "IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST", "Idefics2ForConditionalGeneration", "Idefics2Model", "Idefics2PreTrainedModel", @@ -2478,7 +2164,6 @@ ) _import_structure["models.imagegpt"].extend( [ - "IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "ImageGPTForCausalImageModeling", "ImageGPTForImageClassification", "ImageGPTModel", @@ -2488,7 +2173,6 @@ ) _import_structure["models.informer"].extend( [ - "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", @@ -2496,7 +2180,6 @@ ) _import_structure["models.instructblip"].extend( [ - "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipForConditionalGeneration", "InstructBlipPreTrainedModel", "InstructBlipQFormerModel", @@ -2513,7 +2196,6 @@ ) _import_structure["models.jukebox"].extend( [ - "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxPrior", @@ -2522,7 +2204,6 @@ ) _import_structure["models.kosmos2"].extend( [ - "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST", "Kosmos2ForConditionalGeneration", "Kosmos2Model", "Kosmos2PreTrainedModel", @@ -2530,7 +2211,6 @@ ) _import_structure["models.layoutlm"].extend( [ - "LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMForMaskedLM", "LayoutLMForQuestionAnswering", "LayoutLMForSequenceClassification", @@ -2541,7 +2221,6 @@ ) _import_structure["models.layoutlmv2"].extend( [ - "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", @@ -2551,7 +2230,6 @@ ) _import_structure["models.layoutlmv3"].extend( [ - "LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv3ForQuestionAnswering", "LayoutLMv3ForSequenceClassification", "LayoutLMv3ForTokenClassification", @@ -2561,7 +2239,6 @@ ) _import_structure["models.led"].extend( [ - "LED_PRETRAINED_MODEL_ARCHIVE_LIST", "LEDForConditionalGeneration", "LEDForQuestionAnswering", "LEDForSequenceClassification", @@ -2571,7 +2248,6 @@ ) _import_structure["models.levit"].extend( [ - "LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "LevitForImageClassification", "LevitForImageClassificationWithTeacher", "LevitModel", @@ -2580,7 +2256,6 @@ ) _import_structure["models.lilt"].extend( [ - "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", @@ -2599,21 +2274,18 @@ ) _import_structure["models.llava"].extend( [ - "LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", "LlavaForConditionalGeneration", "LlavaPreTrainedModel", ] ) _import_structure["models.llava_next"].extend( [ - "LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "LlavaNextForConditionalGeneration", "LlavaNextPreTrainedModel", ] ) _import_structure["models.longformer"].extend( [ - "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "LongformerForMaskedLM", "LongformerForMultipleChoice", "LongformerForQuestionAnswering", @@ -2626,7 +2298,6 @@ ) _import_structure["models.longt5"].extend( [ - "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", @@ -2635,7 +2306,6 @@ ) _import_structure["models.luke"].extend( [ - "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", @@ -2661,7 +2331,6 @@ ) _import_structure["models.m2m_100"].extend( [ - "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", @@ -2669,7 +2338,6 @@ ) _import_structure["models.mamba"].extend( [ - "MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST", "MambaForCausalLM", "MambaModel", "MambaPreTrainedModel", @@ -2678,7 +2346,6 @@ _import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"]) _import_structure["models.markuplm"].extend( [ - "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST", "MarkupLMForQuestionAnswering", "MarkupLMForSequenceClassification", "MarkupLMForTokenClassification", @@ -2688,7 +2355,6 @@ ) _import_structure["models.mask2former"].extend( [ - "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", @@ -2696,7 +2362,6 @@ ) _import_structure["models.maskformer"].extend( [ - "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", @@ -2715,7 +2380,6 @@ ) _import_structure["models.mega"].extend( [ - "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", @@ -2728,7 +2392,6 @@ ) _import_structure["models.megatron_bert"].extend( [ - "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", @@ -2743,7 +2406,6 @@ ) _import_structure["models.mgp_str"].extend( [ - "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrForSceneTextRecognition", "MgpstrModel", "MgpstrPreTrainedModel", @@ -2762,7 +2424,6 @@ ) _import_structure["models.mobilebert"].extend( [ - "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", @@ -2778,7 +2439,6 @@ ) _import_structure["models.mobilenet_v1"].extend( [ - "MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV1ForImageClassification", "MobileNetV1Model", "MobileNetV1PreTrainedModel", @@ -2787,7 +2447,6 @@ ) _import_structure["models.mobilenet_v2"].extend( [ - "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", @@ -2797,7 +2456,6 @@ ) _import_structure["models.mobilevit"].extend( [ - "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", @@ -2806,7 +2464,6 @@ ) _import_structure["models.mobilevitv2"].extend( [ - "MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTV2ForImageClassification", "MobileViTV2ForSemanticSegmentation", "MobileViTV2Model", @@ -2815,7 +2472,6 @@ ) _import_structure["models.mpnet"].extend( [ - "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", "MPNetForMaskedLM", "MPNetForMultipleChoice", "MPNetForQuestionAnswering", @@ -2828,7 +2484,6 @@ ) _import_structure["models.mpt"].extend( [ - "MPT_PRETRAINED_MODEL_ARCHIVE_LIST", "MptForCausalLM", "MptForQuestionAnswering", "MptForSequenceClassification", @@ -2839,7 +2494,6 @@ ) _import_structure["models.mra"].extend( [ - "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", @@ -2862,7 +2516,6 @@ ) _import_structure["models.musicgen"].extend( [ - "MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST", "MusicgenForCausalLM", "MusicgenForConditionalGeneration", "MusicgenModel", @@ -2872,7 +2525,6 @@ ) _import_structure["models.musicgen_melody"].extend( [ - "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", "MusicgenMelodyForCausalLM", "MusicgenMelodyForConditionalGeneration", "MusicgenMelodyModel", @@ -2881,7 +2533,6 @@ ) _import_structure["models.mvp"].extend( [ - "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", @@ -2892,7 +2543,6 @@ ) _import_structure["models.nat"].extend( [ - "NAT_PRETRAINED_MODEL_ARCHIVE_LIST", "NatBackbone", "NatForImageClassification", "NatModel", @@ -2901,7 +2551,6 @@ ) _import_structure["models.nezha"].extend( [ - "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForMaskedLM", "NezhaForMultipleChoice", "NezhaForNextSentencePrediction", @@ -2915,7 +2564,6 @@ ) _import_structure["models.nllb_moe"].extend( [ - "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", @@ -2925,7 +2573,6 @@ ) _import_structure["models.nystromformer"].extend( [ - "NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "NystromformerForMaskedLM", "NystromformerForMultipleChoice", "NystromformerForQuestionAnswering", @@ -2945,7 +2592,6 @@ ) _import_structure["models.oneformer"].extend( [ - "ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "OneFormerForUniversalSegmentation", "OneFormerModel", "OneFormerPreTrainedModel", @@ -2953,7 +2599,6 @@ ) _import_structure["models.openai"].extend( [ - "OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OpenAIGPTDoubleHeadsModel", "OpenAIGPTForSequenceClassification", "OpenAIGPTLMHeadModel", @@ -2964,7 +2609,6 @@ ) _import_structure["models.opt"].extend( [ - "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTForQuestionAnswering", "OPTForSequenceClassification", @@ -2974,7 +2618,6 @@ ) _import_structure["models.owlv2"].extend( [ - "OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Owlv2ForObjectDetection", "Owlv2Model", "Owlv2PreTrainedModel", @@ -2984,7 +2627,6 @@ ) _import_structure["models.owlvit"].extend( [ - "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "OwlViTForObjectDetection", "OwlViTModel", "OwlViTPreTrainedModel", @@ -2994,7 +2636,6 @@ ) _import_structure["models.patchtsmixer"].extend( [ - "PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST", "PatchTSMixerForPrediction", "PatchTSMixerForPretraining", "PatchTSMixerForRegression", @@ -3005,7 +2646,6 @@ ) _import_structure["models.patchtst"].extend( [ - "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", "PatchTSTForClassification", "PatchTSTForPrediction", "PatchTSTForPretraining", @@ -3024,7 +2664,6 @@ ) _import_structure["models.pegasus_x"].extend( [ - "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", @@ -3032,7 +2671,6 @@ ) _import_structure["models.perceiver"].extend( [ - "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", @@ -3055,7 +2693,6 @@ ) _import_structure["models.phi"].extend( [ - "PHI_PRETRAINED_MODEL_ARCHIVE_LIST", "PhiForCausalLM", "PhiForSequenceClassification", "PhiForTokenClassification", @@ -3065,7 +2702,6 @@ ) _import_structure["models.phi3"].extend( [ - "PHI3_PRETRAINED_MODEL_ARCHIVE_LIST", "Phi3ForCausalLM", "Phi3ForSequenceClassification", "Phi3ForTokenClassification", @@ -3075,7 +2711,6 @@ ) _import_structure["models.pix2struct"].extend( [ - "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructForConditionalGeneration", "Pix2StructPreTrainedModel", "Pix2StructTextModel", @@ -3084,7 +2719,6 @@ ) _import_structure["models.plbart"].extend( [ - "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", @@ -3094,7 +2728,6 @@ ) _import_structure["models.poolformer"].extend( [ - "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", @@ -3102,14 +2735,12 @@ ) _import_structure["models.pop2piano"].extend( [ - "POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST", "Pop2PianoForConditionalGeneration", "Pop2PianoPreTrainedModel", ] ) _import_structure["models.prophetnet"].extend( [ - "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ProphetNetDecoder", "ProphetNetEncoder", "ProphetNetForCausalLM", @@ -3120,7 +2751,6 @@ ) _import_structure["models.pvt"].extend( [ - "PVT_PRETRAINED_MODEL_ARCHIVE_LIST", "PvtForImageClassification", "PvtModel", "PvtPreTrainedModel", @@ -3136,7 +2766,6 @@ ) _import_structure["models.qdqbert"].extend( [ - "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "QDQBertForMaskedLM", "QDQBertForMultipleChoice", "QDQBertForNextSentencePrediction", @@ -3176,7 +2805,6 @@ ) _import_structure["models.realm"].extend( [ - "REALM_PRETRAINED_MODEL_ARCHIVE_LIST", "RealmEmbedder", "RealmForOpenQA", "RealmKnowledgeAugEncoder", @@ -3196,7 +2824,6 @@ ) _import_structure["models.reformer"].extend( [ - "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", @@ -3209,7 +2836,6 @@ ) _import_structure["models.regnet"].extend( [ - "REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", "RegNetForImageClassification", "RegNetModel", "RegNetPreTrainedModel", @@ -3217,7 +2843,6 @@ ) _import_structure["models.rembert"].extend( [ - "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", @@ -3232,7 +2857,6 @@ ) _import_structure["models.resnet"].extend( [ - "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetBackbone", "ResNetForImageClassification", "ResNetModel", @@ -3241,7 +2865,6 @@ ) _import_structure["models.roberta"].extend( [ - "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", @@ -3254,7 +2877,6 @@ ) _import_structure["models.roberta_prelayernorm"].extend( [ - "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", @@ -3267,7 +2889,6 @@ ) _import_structure["models.roc_bert"].extend( [ - "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", @@ -3283,7 +2904,6 @@ ) _import_structure["models.roformer"].extend( [ - "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", @@ -3298,7 +2918,6 @@ ) _import_structure["models.rwkv"].extend( [ - "RWKV_PRETRAINED_MODEL_ARCHIVE_LIST", "RwkvForCausalLM", "RwkvModel", "RwkvPreTrainedModel", @@ -3306,14 +2925,12 @@ ) _import_structure["models.sam"].extend( [ - "SAM_PRETRAINED_MODEL_ARCHIVE_LIST", "SamModel", "SamPreTrainedModel", ] ) _import_structure["models.seamless_m4t"].extend( [ - "SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST", "SeamlessM4TCodeHifiGan", "SeamlessM4TForSpeechToSpeech", "SeamlessM4TForSpeechToText", @@ -3328,7 +2945,6 @@ ) _import_structure["models.seamless_m4t_v2"].extend( [ - "SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "SeamlessM4Tv2ForSpeechToSpeech", "SeamlessM4Tv2ForSpeechToText", "SeamlessM4Tv2ForTextToSpeech", @@ -3339,7 +2955,6 @@ ) _import_structure["models.segformer"].extend( [ - "SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SegformerDecodeHead", "SegformerForImageClassification", "SegformerForSemanticSegmentation", @@ -3350,7 +2965,6 @@ ) _import_structure["models.seggpt"].extend( [ - "SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "SegGptForImageSegmentation", "SegGptModel", "SegGptPreTrainedModel", @@ -3358,7 +2972,6 @@ ) _import_structure["models.sew"].extend( [ - "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", @@ -3367,7 +2980,6 @@ ) _import_structure["models.sew_d"].extend( [ - "SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWDForCTC", "SEWDForSequenceClassification", "SEWDModel", @@ -3376,7 +2988,6 @@ ) _import_structure["models.siglip"].extend( [ - "SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "SiglipForImageClassification", "SiglipModel", "SiglipPreTrainedModel", @@ -3387,7 +2998,6 @@ _import_structure["models.speech_encoder_decoder"].extend(["SpeechEncoderDecoderModel"]) _import_structure["models.speech_to_text"].extend( [ - "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", @@ -3396,7 +3006,6 @@ _import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"]) _import_structure["models.speecht5"].extend( [ - "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToSpeech", "SpeechT5ForSpeechToText", "SpeechT5ForTextToSpeech", @@ -3407,7 +3016,6 @@ ) _import_structure["models.splinter"].extend( [ - "SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST", "SplinterForPreTraining", "SplinterForQuestionAnswering", "SplinterLayer", @@ -3417,7 +3025,6 @@ ) _import_structure["models.squeezebert"].extend( [ - "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "SqueezeBertForMaskedLM", "SqueezeBertForMultipleChoice", "SqueezeBertForQuestionAnswering", @@ -3446,14 +3053,12 @@ ) _import_structure["models.superpoint"].extend( [ - "SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST", "SuperPointForKeypointDetection", "SuperPointPreTrainedModel", ] ) _import_structure["models.swiftformer"].extend( [ - "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", @@ -3461,7 +3066,6 @@ ) _import_structure["models.swin"].extend( [ - "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinBackbone", "SwinForImageClassification", "SwinForMaskedImageModeling", @@ -3471,7 +3075,6 @@ ) _import_structure["models.swin2sr"].extend( [ - "SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST", "Swin2SRForImageSuperResolution", "Swin2SRModel", "Swin2SRPreTrainedModel", @@ -3479,7 +3082,6 @@ ) _import_structure["models.swinv2"].extend( [ - "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2Backbone", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", @@ -3489,7 +3091,6 @@ ) _import_structure["models.switch_transformers"].extend( [ - "SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST", "SwitchTransformersEncoderModel", "SwitchTransformersForConditionalGeneration", "SwitchTransformersModel", @@ -3500,7 +3101,6 @@ ) _import_structure["models.t5"].extend( [ - "T5_PRETRAINED_MODEL_ARCHIVE_LIST", "T5EncoderModel", "T5ForConditionalGeneration", "T5ForQuestionAnswering", @@ -3513,7 +3113,6 @@ ) _import_structure["models.table_transformer"].extend( [ - "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", @@ -3521,7 +3120,6 @@ ) _import_structure["models.tapas"].extend( [ - "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", @@ -3532,7 +3130,6 @@ ) _import_structure["models.time_series_transformer"].extend( [ - "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", @@ -3540,7 +3137,6 @@ ) _import_structure["models.timesformer"].extend( [ - "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerForVideoClassification", "TimesformerModel", "TimesformerPreTrainedModel", @@ -3549,14 +3145,12 @@ _import_structure["models.timm_backbone"].extend(["TimmBackbone"]) _import_structure["models.trocr"].extend( [ - "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] ) _import_structure["models.tvlt"].extend( [ - "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST", "TvltForAudioVisualClassification", "TvltForPreTraining", "TvltModel", @@ -3565,7 +3159,6 @@ ) _import_structure["models.tvp"].extend( [ - "TVP_PRETRAINED_MODEL_ARCHIVE_LIST", "TvpForVideoGrounding", "TvpModel", "TvpPreTrainedModel", @@ -3573,7 +3166,6 @@ ) _import_structure["models.udop"].extend( [ - "UDOP_PRETRAINED_MODEL_ARCHIVE_LIST", "UdopEncoderModel", "UdopForConditionalGeneration", "UdopModel", @@ -3593,7 +3185,6 @@ ) _import_structure["models.unispeech"].extend( [ - "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", @@ -3603,7 +3194,6 @@ ) _import_structure["models.unispeech_sat"].extend( [ - "UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechSatForAudioFrameClassification", "UniSpeechSatForCTC", "UniSpeechSatForPreTraining", @@ -3615,7 +3205,6 @@ ) _import_structure["models.univnet"].extend( [ - "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST", "UnivNetModel", ] ) @@ -3627,7 +3216,6 @@ ) _import_structure["models.videomae"].extend( [ - "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", "VideoMAEForPreTraining", "VideoMAEForVideoClassification", "VideoMAEModel", @@ -3636,7 +3224,6 @@ ) _import_structure["models.vilt"].extend( [ - "VILT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViltForImageAndTextRetrieval", "ViltForImagesAndTextClassification", "ViltForMaskedLM", @@ -3649,7 +3236,6 @@ ) _import_structure["models.vipllava"].extend( [ - "VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", "VipLlavaForConditionalGeneration", "VipLlavaPreTrainedModel", ] @@ -3658,7 +3244,6 @@ _import_structure["models.vision_text_dual_encoder"].extend(["VisionTextDualEncoderModel"]) _import_structure["models.visual_bert"].extend( [ - "VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "VisualBertForMultipleChoice", "VisualBertForPreTraining", "VisualBertForQuestionAnswering", @@ -3671,7 +3256,6 @@ ) _import_structure["models.vit"].extend( [ - "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", @@ -3680,7 +3264,6 @@ ) _import_structure["models.vit_hybrid"].extend( [ - "VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTHybridForImageClassification", "ViTHybridModel", "ViTHybridPreTrainedModel", @@ -3688,7 +3271,6 @@ ) _import_structure["models.vit_mae"].extend( [ - "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", @@ -3697,7 +3279,6 @@ ) _import_structure["models.vit_msn"].extend( [ - "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNForImageClassification", "ViTMSNModel", "ViTMSNPreTrainedModel", @@ -3705,7 +3286,6 @@ ) _import_structure["models.vitdet"].extend( [ - "VITDET_PRETRAINED_MODEL_ARCHIVE_LIST", "VitDetBackbone", "VitDetModel", "VitDetPreTrainedModel", @@ -3713,21 +3293,18 @@ ) _import_structure["models.vitmatte"].extend( [ - "VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST", "VitMatteForImageMatting", "VitMattePreTrainedModel", ] ) _import_structure["models.vits"].extend( [ - "VITS_PRETRAINED_MODEL_ARCHIVE_LIST", "VitsModel", "VitsPreTrainedModel", ] ) _import_structure["models.vivit"].extend( [ - "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitForVideoClassification", "VivitModel", "VivitPreTrainedModel", @@ -3735,7 +3312,6 @@ ) _import_structure["models.wav2vec2"].extend( [ - "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", @@ -3748,7 +3324,6 @@ ) _import_structure["models.wav2vec2_bert"].extend( [ - "WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2BertForAudioFrameClassification", "Wav2Vec2BertForCTC", "Wav2Vec2BertForSequenceClassification", @@ -3759,7 +3334,6 @@ ) _import_structure["models.wav2vec2_conformer"].extend( [ - "WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ConformerForAudioFrameClassification", "Wav2Vec2ConformerForCTC", "Wav2Vec2ConformerForPreTraining", @@ -3771,7 +3345,6 @@ ) _import_structure["models.wavlm"].extend( [ - "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", @@ -3782,7 +3355,6 @@ ) _import_structure["models.whisper"].extend( [ - "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForAudioClassification", "WhisperForCausalLM", "WhisperForConditionalGeneration", @@ -3792,7 +3364,6 @@ ) _import_structure["models.x_clip"].extend( [ - "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", @@ -3801,7 +3372,6 @@ ) _import_structure["models.xglm"].extend( [ - "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", @@ -3809,7 +3379,6 @@ ) _import_structure["models.xlm"].extend( [ - "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", @@ -3822,7 +3391,6 @@ ) _import_structure["models.xlm_prophetnet"].extend( [ - "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMProphetNetDecoder", "XLMProphetNetEncoder", "XLMProphetNetForCausalLM", @@ -3833,7 +3401,6 @@ ) _import_structure["models.xlm_roberta"].extend( [ - "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", @@ -3846,7 +3413,6 @@ ) _import_structure["models.xlm_roberta_xl"].extend( [ - "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", @@ -3859,7 +3425,6 @@ ) _import_structure["models.xlnet"].extend( [ - "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", @@ -3873,7 +3438,6 @@ ) _import_structure["models.xmod"].extend( [ - "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", @@ -3886,7 +3450,6 @@ ) _import_structure["models.yolos"].extend( [ - "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", @@ -3894,7 +3457,6 @@ ) _import_structure["models.yoso"].extend( [ - "YOSO_PRETRAINED_MODEL_ARCHIVE_LIST", "YosoForMaskedLM", "YosoForMultipleChoice", "YosoForQuestionAnswering", @@ -3972,7 +3534,6 @@ # TensorFlow models structure _import_structure["models.albert"].extend( [ - "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", @@ -4042,7 +3603,6 @@ ) _import_structure["models.bert"].extend( [ - "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", @@ -4073,7 +3633,6 @@ ) _import_structure["models.blip"].extend( [ - "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBlipForConditionalGeneration", "TFBlipForImageTextRetrieval", "TFBlipForQuestionAnswering", @@ -4085,7 +3644,6 @@ ) _import_structure["models.camembert"].extend( [ - "TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCamembertForCausalLM", "TFCamembertForMaskedLM", "TFCamembertForMultipleChoice", @@ -4098,7 +3656,6 @@ ) _import_structure["models.clip"].extend( [ - "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", @@ -4107,7 +3664,6 @@ ) _import_structure["models.convbert"].extend( [ - "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", @@ -4134,7 +3690,6 @@ ) _import_structure["models.ctrl"].extend( [ - "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", @@ -4143,7 +3698,6 @@ ) _import_structure["models.cvt"].extend( [ - "TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCvtForImageClassification", "TFCvtModel", "TFCvtPreTrainedModel", @@ -4159,7 +3713,6 @@ ) _import_structure["models.deberta"].extend( [ - "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", @@ -4170,7 +3723,6 @@ ) _import_structure["models.deberta_v2"].extend( [ - "TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaV2ForMaskedLM", "TFDebertaV2ForMultipleChoice", "TFDebertaV2ForQuestionAnswering", @@ -4182,7 +3734,6 @@ ) _import_structure["models.deit"].extend( [ - "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", @@ -4192,7 +3743,6 @@ ) _import_structure["models.deprecated.transfo_xl"].extend( [ - "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", @@ -4203,7 +3753,6 @@ ) _import_structure["models.distilbert"].extend( [ - "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", @@ -4216,9 +3765,6 @@ ) _import_structure["models.dpr"].extend( [ - "TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDPRContextEncoder", "TFDPRPretrainedContextEncoder", "TFDPRPretrainedQuestionEncoder", @@ -4229,7 +3775,6 @@ ) _import_structure["models.efficientformer"].extend( [ - "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", @@ -4238,7 +3783,6 @@ ) _import_structure["models.electra"].extend( [ - "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", @@ -4252,7 +3796,6 @@ _import_structure["models.encoder_decoder"].append("TFEncoderDecoderModel") _import_structure["models.esm"].extend( [ - "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEsmForMaskedLM", "TFEsmForSequenceClassification", "TFEsmForTokenClassification", @@ -4262,7 +3805,6 @@ ) _import_structure["models.flaubert"].extend( [ - "TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFlaubertForMultipleChoice", "TFFlaubertForQuestionAnsweringSimple", "TFFlaubertForSequenceClassification", @@ -4274,7 +3816,6 @@ ) _import_structure["models.funnel"].extend( [ - "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", @@ -4288,7 +3829,6 @@ ) _import_structure["models.gpt2"].extend( [ - "TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGPT2DoubleHeadsModel", "TFGPT2ForSequenceClassification", "TFGPT2LMHeadModel", @@ -4308,7 +3848,6 @@ ) _import_structure["models.groupvit"].extend( [ - "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", @@ -4317,7 +3856,6 @@ ) _import_structure["models.hubert"].extend( [ - "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFHubertForCTC", "TFHubertModel", "TFHubertPreTrainedModel", @@ -4325,7 +3863,6 @@ ) _import_structure["models.layoutlm"].extend( [ - "TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLayoutLMForMaskedLM", "TFLayoutLMForQuestionAnswering", "TFLayoutLMForSequenceClassification", @@ -4337,7 +3874,6 @@ ) _import_structure["models.layoutlmv3"].extend( [ - "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForTokenClassification", @@ -4348,7 +3884,6 @@ _import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]) _import_structure["models.longformer"].extend( [ - "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", @@ -4361,7 +3896,6 @@ ) _import_structure["models.lxmert"].extend( [ - "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", @@ -4375,7 +3909,6 @@ ) _import_structure["models.mobilebert"].extend( [ - "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", @@ -4390,7 +3923,6 @@ ) _import_structure["models.mobilevit"].extend( [ - "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", @@ -4399,7 +3931,6 @@ ) _import_structure["models.mpnet"].extend( [ - "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMPNetForMaskedLM", "TFMPNetForMultipleChoice", "TFMPNetForQuestionAnswering", @@ -4413,7 +3944,6 @@ _import_structure["models.mt5"].extend(["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]) _import_structure["models.openai"].extend( [ - "TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFOpenAIGPTDoubleHeadsModel", "TFOpenAIGPTForSequenceClassification", "TFOpenAIGPTLMHeadModel", @@ -4446,7 +3976,6 @@ ) _import_structure["models.regnet"].extend( [ - "TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRegNetForImageClassification", "TFRegNetModel", "TFRegNetPreTrainedModel", @@ -4454,7 +3983,6 @@ ) _import_structure["models.rembert"].extend( [ - "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", @@ -4468,7 +3996,6 @@ ) _import_structure["models.resnet"].extend( [ - "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", @@ -4476,7 +4003,6 @@ ) _import_structure["models.roberta"].extend( [ - "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", @@ -4490,7 +4016,6 @@ ) _import_structure["models.roberta_prelayernorm"].extend( [ - "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", @@ -4504,7 +4029,6 @@ ) _import_structure["models.roformer"].extend( [ - "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", @@ -4518,14 +4042,12 @@ ) _import_structure["models.sam"].extend( [ - "TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSamModel", "TFSamPreTrainedModel", ] ) _import_structure["models.segformer"].extend( [ - "TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSegformerDecodeHead", "TFSegformerForImageClassification", "TFSegformerForSemanticSegmentation", @@ -4535,7 +4057,6 @@ ) _import_structure["models.speech_to_text"].extend( [ - "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", @@ -4543,7 +4064,6 @@ ) _import_structure["models.swiftformer"].extend( [ - "TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwiftFormerForImageClassification", "TFSwiftFormerModel", "TFSwiftFormerPreTrainedModel", @@ -4551,7 +4071,6 @@ ) _import_structure["models.swin"].extend( [ - "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", @@ -4560,7 +4079,6 @@ ) _import_structure["models.t5"].extend( [ - "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST", "TFT5EncoderModel", "TFT5ForConditionalGeneration", "TFT5Model", @@ -4569,7 +4087,6 @@ ) _import_structure["models.tapas"].extend( [ - "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", @@ -4595,7 +4112,6 @@ ) _import_structure["models.wav2vec2"].extend( [ - "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2ForSequenceClassification", "TFWav2Vec2Model", @@ -4604,7 +4120,6 @@ ) _import_structure["models.whisper"].extend( [ - "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", @@ -4612,7 +4127,6 @@ ) _import_structure["models.xglm"].extend( [ - "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", @@ -4620,7 +4134,6 @@ ) _import_structure["models.xlm"].extend( [ - "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", @@ -4633,7 +4146,6 @@ ) _import_structure["models.xlm_roberta"].extend( [ - "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", @@ -4646,7 +4158,6 @@ ) _import_structure["models.xlnet"].extend( [ - "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", @@ -5025,7 +4536,6 @@ ) _import_structure["models.xlm_roberta"].extend( [ - "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", @@ -5127,28 +4637,24 @@ load_tf2_model_in_pytorch_model, load_tf2_weights_in_pytorch_model, ) - from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig + from .models.albert import AlbertConfig from .models.align import ( - ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP, AlignConfig, AlignProcessor, AlignTextConfig, AlignVisionConfig, ) from .models.altclip import ( - ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPProcessor, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .models.audio_spectrogram_transformer import ( - AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ASTFeatureExtractor, ) from .models.auto import ( - ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, @@ -5162,7 +4668,6 @@ AutoTokenizer, ) from .models.autoformer import ( - AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) from .models.bark import ( @@ -5173,9 +4678,8 @@ BarkSemanticConfig, ) from .models.bart import BartConfig, BartTokenizer - from .models.beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig + from .models.beit import BeitConfig from .models.bert import ( - BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BasicTokenizer, BertConfig, BertTokenizer, @@ -5188,80 +4692,67 @@ MecabTokenizer, ) from .models.bertweet import BertweetTokenizer - from .models.big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig + from .models.big_bird import BigBirdConfig from .models.bigbird_pegasus import ( - BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, ) from .models.biogpt import ( - BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig, BioGptTokenizer, ) - from .models.bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig + from .models.bit import BitConfig from .models.blenderbot import ( - BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotTokenizer, ) from .models.blenderbot_small import ( - BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallTokenizer, ) from .models.blip import ( - BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipProcessor, BlipTextConfig, BlipVisionConfig, ) from .models.blip_2 import ( - BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Blip2Config, Blip2Processor, Blip2QFormerConfig, Blip2VisionConfig, ) - from .models.bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig + from .models.bloom import BloomConfig from .models.bridgetower import ( - BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerProcessor, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .models.bros import ( - BROS_PRETRAINED_CONFIG_ARCHIVE_MAP, BrosConfig, BrosProcessor, ) from .models.byt5 import ByT5Tokenizer from .models.camembert import ( - CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig, ) from .models.canine import ( - CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig, CanineTokenizer, ) from .models.chinese_clip import ( - CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPProcessor, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .models.clap import ( - CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapProcessor, ClapTextConfig, ) from .models.clip import ( - CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPProcessor, CLIPTextConfig, @@ -5269,14 +4760,12 @@ CLIPVisionConfig, ) from .models.clipseg import ( - CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .models.clvp import ( - CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP, ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig, @@ -5285,221 +4774,182 @@ ClvpTokenizer, ) from .models.codegen import ( - CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenTokenizer, ) - from .models.cohere import COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP, CohereConfig + from .models.cohere import CohereConfig from .models.conditional_detr import ( - CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ) from .models.convbert import ( - CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer, ) - from .models.convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig + from .models.convnext import ConvNextConfig from .models.convnextv2 import ( - CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextV2Config, ) from .models.cpmant import ( - CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig, CpmAntTokenizer, ) from .models.ctrl import ( - CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer, ) - from .models.cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig + from .models.cvt import CvtConfig from .models.data2vec import ( - DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, - DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecAudioConfig, Data2VecTextConfig, Data2VecVisionConfig, ) from .models.dbrx import DbrxConfig from .models.deberta import ( - DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer, ) from .models.deberta_v2 import ( - DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config, ) from .models.decision_transformer import ( - DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, DecisionTransformerConfig, ) from .models.deformable_detr import ( - DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig, ) - from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig + from .models.deit import DeiTConfig from .models.deprecated.mctct import ( - MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig, MCTCTFeatureExtractor, MCTCTProcessor, ) from .models.deprecated.mmbt import MMBTConfig from .models.deprecated.open_llama import ( - OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenLlamaConfig, ) from .models.deprecated.retribert import ( - RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer, ) from .models.deprecated.tapex import TapexTokenizer from .models.deprecated.trajectory_transformer import ( - TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) from .models.deprecated.transfo_xl import ( - TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig, TransfoXLCorpus, TransfoXLTokenizer, ) - from .models.deprecated.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig - from .models.depth_anything import DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP, DepthAnythingConfig - from .models.deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig - from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig - from .models.dinat import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP, DinatConfig - from .models.dinov2 import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Dinov2Config + from .models.deprecated.van import VanConfig + from .models.depth_anything import DepthAnythingConfig + from .models.deta import DetaConfig + from .models.detr import DetrConfig + from .models.dinat import DinatConfig + from .models.dinov2 import Dinov2Config from .models.distilbert import ( - DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer, ) from .models.donut import ( - DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutProcessor, DonutSwinConfig, ) from .models.dpr import ( - DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig, DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderOutput, DPRReaderTokenizer, ) - from .models.dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig + from .models.dpt import DPTConfig from .models.efficientformer import ( - EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig, ) from .models.efficientnet import ( - EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, ) from .models.electra import ( - ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer, ) from .models.encodec import ( - ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, EncodecFeatureExtractor, ) from .models.encoder_decoder import EncoderDecoderConfig - from .models.ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig - from .models.ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig - from .models.esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig, EsmTokenizer - from .models.falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig + from .models.ernie import ErnieConfig + from .models.ernie_m import ErnieMConfig + from .models.esm import EsmConfig, EsmTokenizer + from .models.falcon import FalconConfig from .models.fastspeech2_conformer import ( - FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, - FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, - FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerTokenizer, FastSpeech2ConformerWithHifiGanConfig, ) - from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer + from .models.flaubert import FlaubertConfig, FlaubertTokenizer from .models.flava import ( - FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig, ) - from .models.fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig - from .models.focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig + from .models.fnet import FNetConfig + from .models.focalnet import FocalNetConfig from .models.fsmt import ( - FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer, ) from .models.funnel import ( - FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer, ) - from .models.fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig - from .models.gemma import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP, GemmaConfig + from .models.fuyu import FuyuConfig + from .models.gemma import GemmaConfig from .models.git import ( - GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitProcessor, GitVisionConfig, ) - from .models.glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig + from .models.glpn import GLPNConfig from .models.gpt2 import ( - GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer, ) from .models.gpt_bigcode import ( - GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig, ) - from .models.gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig - from .models.gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig + from .models.gpt_neo import GPTNeoConfig + from .models.gpt_neox import GPTNeoXConfig from .models.gpt_neox_japanese import ( - GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig, ) - from .models.gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig + from .models.gptj import GPTJConfig from .models.gptsan_japanese import ( - GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig, GPTSanJapaneseTokenizer, ) - from .models.graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig + from .models.graphormer import GraphormerConfig from .models.grounding_dino import ( - GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP, GroundingDinoConfig, GroundingDinoProcessor, ) from .models.groupvit import ( - GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig, ) from .models.herbert import HerbertTokenizer - from .models.hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig - from .models.ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig + from .models.hubert import HubertConfig + from .models.ibert import IBertConfig from .models.idefics import ( - IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP, IdeficsConfig, ) from .models.idefics2 import Idefics2Config - from .models.imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig - from .models.informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig + from .models.imagegpt import ImageGPTConfig + from .models.informer import InformerConfig from .models.instructblip import ( - INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipProcessor, InstructBlipQFormerConfig, @@ -5507,24 +4957,20 @@ ) from .models.jamba import JambaConfig from .models.jukebox import ( - JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxTokenizer, JukeboxVQVAEConfig, ) from .models.kosmos2 import ( - KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, Kosmos2Config, Kosmos2Processor, ) from .models.layoutlm import ( - LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer, ) from .models.layoutlmv2 import ( - LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv2Config, LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor, @@ -5532,7 +4978,6 @@ LayoutLMv2Tokenizer, ) from .models.layoutlmv3 import ( - LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv3Config, LayoutLMv3FeatureExtractor, LayoutLMv3ImageProcessor, @@ -5540,228 +4985,190 @@ LayoutLMv3Tokenizer, ) from .models.layoutxlm import LayoutXLMProcessor - from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer - from .models.levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig - from .models.lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig - from .models.llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig + from .models.led import LEDConfig, LEDTokenizer + from .models.levit import LevitConfig + from .models.lilt import LiltConfig + from .models.llama import LlamaConfig from .models.llava import ( - LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaConfig, LlavaProcessor, ) from .models.llava_next import ( - LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaNextConfig, LlavaNextProcessor, ) from .models.longformer import ( - LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer, ) - from .models.longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config + from .models.longt5 import LongT5Config from .models.luke import ( - LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig, LukeTokenizer, ) from .models.lxmert import ( - LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer, ) - from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config - from .models.mamba import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP, MambaConfig + from .models.m2m_100 import M2M100Config + from .models.mamba import MambaConfig from .models.marian import MarianConfig from .models.markuplm import ( - MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig, MarkupLMFeatureExtractor, MarkupLMProcessor, MarkupLMTokenizer, ) from .models.mask2former import ( - MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Mask2FormerConfig, ) from .models.maskformer import ( - MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig, MaskFormerSwinConfig, ) from .models.mbart import MBartConfig - from .models.mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig + from .models.mega import MegaConfig from .models.megatron_bert import ( - MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig, ) from .models.mgp_str import ( - MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig, MgpstrProcessor, MgpstrTokenizer, ) - from .models.mistral import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MistralConfig - from .models.mixtral import MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MixtralConfig + from .models.mistral import MistralConfig + from .models.mixtral import MixtralConfig from .models.mobilebert import ( - MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer, ) from .models.mobilenet_v1 import ( - MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetV1Config, ) from .models.mobilenet_v2 import ( - MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetV2Config, ) from .models.mobilevit import ( - MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, ) from .models.mobilevitv2 import ( - MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTV2Config, ) from .models.mpnet import ( - MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig, MPNetTokenizer, ) - from .models.mpt import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP, MptConfig - from .models.mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig + from .models.mpt import MptConfig + from .models.mra import MraConfig from .models.mt5 import MT5Config from .models.musicgen import ( - MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, MusicgenConfig, MusicgenDecoderConfig, ) from .models.musicgen_melody import ( - MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, MusicgenMelodyConfig, MusicgenMelodyDecoderConfig, ) from .models.mvp import MvpConfig, MvpTokenizer - from .models.nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig - from .models.nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig - from .models.nllb_moe import NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig + from .models.nat import NatConfig + from .models.nezha import NezhaConfig + from .models.nllb_moe import NllbMoeConfig from .models.nougat import NougatProcessor from .models.nystromformer import ( - NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, NystromformerConfig, ) - from .models.olmo import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP, OlmoConfig + from .models.olmo import OlmoConfig from .models.oneformer import ( - ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, OneFormerConfig, OneFormerProcessor, ) from .models.openai import ( - OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer, ) from .models.opt import OPTConfig from .models.owlv2 import ( - OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Owlv2Config, Owlv2Processor, Owlv2TextConfig, Owlv2VisionConfig, ) from .models.owlvit import ( - OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTProcessor, OwlViTTextConfig, OwlViTVisionConfig, ) from .models.patchtsmixer import ( - PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSMixerConfig, ) - from .models.patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig + from .models.patchtst import PatchTSTConfig from .models.pegasus import ( - PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig, PegasusTokenizer, ) from .models.pegasus_x import ( - PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig, ) from .models.perceiver import ( - PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverTokenizer, ) from .models.persimmon import ( - PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP, PersimmonConfig, ) - from .models.phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig - from .models.phi3 import PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP, Phi3Config + from .models.phi import PhiConfig + from .models.phi3 import Phi3Config from .models.phobert import PhobertTokenizer from .models.pix2struct import ( - PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, Pix2StructConfig, Pix2StructProcessor, Pix2StructTextConfig, Pix2StructVisionConfig, ) - from .models.plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig + from .models.plbart import PLBartConfig from .models.poolformer import ( - POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, ) from .models.pop2piano import ( - POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP, Pop2PianoConfig, ) from .models.prophetnet import ( - PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer, ) - from .models.pvt import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtConfig + from .models.pvt import PvtConfig from .models.pvt_v2 import PvtV2Config - from .models.qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig - from .models.qwen2 import QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2Config, Qwen2Tokenizer - from .models.qwen2_moe import QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2MoeConfig + from .models.qdqbert import QDQBertConfig + from .models.qwen2 import Qwen2Config, Qwen2Tokenizer + from .models.qwen2_moe import Qwen2MoeConfig from .models.rag import RagConfig, RagRetriever, RagTokenizer from .models.realm import ( - REALM_PRETRAINED_CONFIG_ARCHIVE_MAP, RealmConfig, RealmTokenizer, ) from .models.recurrent_gemma import RecurrentGemmaConfig - from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig - from .models.regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig - from .models.rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig - from .models.resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig + from .models.reformer import ReformerConfig + from .models.regnet import RegNetConfig + from .models.rembert import RemBertConfig + from .models.resnet import ResNetConfig from .models.roberta import ( - ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer, ) from .models.roberta_prelayernorm import ( - ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, ) from .models.roc_bert import ( - ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig, RoCBertTokenizer, ) from .models.roformer import ( - ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerTokenizer, ) - from .models.rwkv import RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP, RwkvConfig + from .models.rwkv import RwkvConfig from .models.sam import ( - SAM_PRETRAINED_CONFIG_ARCHIVE_MAP, SamConfig, SamMaskDecoderConfig, SamProcessor, @@ -5769,21 +5176,18 @@ SamVisionConfig, ) from .models.seamless_m4t import ( - SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4TConfig, SeamlessM4TFeatureExtractor, SeamlessM4TProcessor, ) from .models.seamless_m4t_v2 import ( - SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4Tv2Config, ) - from .models.segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig - from .models.seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig - from .models.sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig - from .models.sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig + from .models.segformer import SegformerConfig + from .models.seggpt import SegGptConfig + from .models.sew import SEWConfig + from .models.sew_d import SEWDConfig from .models.siglip import ( - SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, SiglipConfig, SiglipProcessor, SiglipTextConfig, @@ -5791,110 +5195,90 @@ ) from .models.speech_encoder_decoder import SpeechEncoderDecoderConfig from .models.speech_to_text import ( - SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig, Speech2TextFeatureExtractor, Speech2TextProcessor, ) from .models.speech_to_text_2 import ( - SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2Text2Config, Speech2Text2Processor, Speech2Text2Tokenizer, ) from .models.speecht5 import ( - SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, - SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechT5Config, SpeechT5FeatureExtractor, SpeechT5HifiGanConfig, SpeechT5Processor, ) from .models.splinter import ( - SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig, SplinterTokenizer, ) from .models.squeezebert import ( - SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer, ) - from .models.stablelm import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP, StableLmConfig - from .models.starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config - from .models.superpoint import SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP, SuperPointConfig + from .models.stablelm import StableLmConfig + from .models.starcoder2 import Starcoder2Config + from .models.superpoint import SuperPointConfig from .models.swiftformer import ( - SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, ) - from .models.swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig - from .models.swin2sr import SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP, Swin2SRConfig - from .models.swinv2 import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Swinv2Config + from .models.swin import SwinConfig + from .models.swin2sr import Swin2SRConfig + from .models.swinv2 import Swinv2Config from .models.switch_transformers import ( - SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP, SwitchTransformersConfig, ) - from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config + from .models.t5 import T5Config from .models.table_transformer import ( - TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, ) from .models.tapas import ( - TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig, TapasTokenizer, ) from .models.time_series_transformer import ( - TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) from .models.timesformer import ( - TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig, ) from .models.timm_backbone import TimmBackboneConfig from .models.trocr import ( - TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig, TrOCRProcessor, ) from .models.tvlt import ( - TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig, TvltFeatureExtractor, TvltProcessor, ) from .models.tvp import ( - TVP_PRETRAINED_CONFIG_ARCHIVE_MAP, TvpConfig, TvpProcessor, ) - from .models.udop import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP, UdopConfig, UdopProcessor + from .models.udop import UdopConfig, UdopProcessor from .models.umt5 import UMT5Config from .models.unispeech import ( - UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig, ) from .models.unispeech_sat import ( - UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig, ) from .models.univnet import ( - UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP, UnivNetConfig, UnivNetFeatureExtractor, ) from .models.upernet import UperNetConfig - from .models.videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig + from .models.videomae import VideoMAEConfig from .models.vilt import ( - VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig, ViltFeatureExtractor, ViltImageProcessor, ViltProcessor, ) from .models.vipllava import ( - VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, VipLlavaConfig, ) from .models.vision_encoder_decoder import VisionEncoderDecoderConfig @@ -5903,26 +5287,22 @@ VisionTextDualEncoderProcessor, ) from .models.visual_bert import ( - VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, VisualBertConfig, ) - from .models.vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig + from .models.vit import ViTConfig from .models.vit_hybrid import ( - VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTHybridConfig, ) - from .models.vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig - from .models.vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig - from .models.vitdet import VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP, VitDetConfig - from .models.vitmatte import VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP, VitMatteConfig + from .models.vit_mae import ViTMAEConfig + from .models.vit_msn import ViTMSNConfig + from .models.vitdet import VitDetConfig + from .models.vitmatte import VitMatteConfig from .models.vits import ( - VITS_PRETRAINED_CONFIG_ARCHIVE_MAP, VitsConfig, VitsTokenizer, ) - from .models.vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig + from .models.vivit import VivitConfig from .models.wav2vec2 import ( - WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2Config, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, @@ -5930,49 +5310,42 @@ Wav2Vec2Tokenizer, ) from .models.wav2vec2_bert import ( - WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2BertConfig, Wav2Vec2BertProcessor, ) from .models.wav2vec2_conformer import ( - WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2ConformerConfig, ) from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM - from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig + from .models.wavlm import WavLMConfig from .models.whisper import ( - WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperFeatureExtractor, WhisperProcessor, WhisperTokenizer, ) from .models.x_clip import ( - XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) - from .models.xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig - from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer + from .models.xglm import XGLMConfig + from .models.xlm import XLMConfig, XLMTokenizer from .models.xlm_prophetnet import ( - XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig, ) from .models.xlm_roberta import ( - XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, ) from .models.xlm_roberta_xl import ( - XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, ) - from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig - from .models.xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig - from .models.yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig - from .models.yoso import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP, YosoConfig + from .models.xlnet import XLNetConfig + from .models.xmod import XmodConfig + from .models.yolos import YolosConfig + from .models.yoso import YosoConfig # Pipelines from .pipelines import ( @@ -6417,7 +5790,6 @@ ) from .modeling_utils import PreTrainedModel from .models.albert import ( - ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, @@ -6429,21 +5801,18 @@ load_tf_weights_in_albert, ) from .models.align import ( - ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST, AlignModel, AlignPreTrainedModel, AlignTextModel, AlignVisionModel, ) from .models.altclip import ( - ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) from .models.audio_spectrogram_transformer import ( - AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, @@ -6531,13 +5900,11 @@ AutoModelWithLMHead, ) from .models.autoformer import ( - AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) from .models.bark import ( - BARK_PRETRAINED_MODEL_ARCHIVE_LIST, BarkCausalModel, BarkCoarseModel, BarkFineModel, @@ -6546,7 +5913,6 @@ BarkSemanticModel, ) from .models.bart import ( - BART_PRETRAINED_MODEL_ARCHIVE_LIST, BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, @@ -6557,7 +5923,6 @@ PretrainedBartModel, ) from .models.beit import ( - BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, @@ -6566,7 +5931,6 @@ BeitPreTrainedModel, ) from .models.bert import ( - BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, @@ -6587,7 +5951,6 @@ load_tf_weights_in_bert_generation, ) from .models.big_bird import ( - BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, @@ -6601,7 +5964,6 @@ load_tf_weights_in_big_bird, ) from .models.bigbird_pegasus import ( - BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, @@ -6610,7 +5972,6 @@ BigBirdPegasusPreTrainedModel, ) from .models.biogpt import ( - BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, @@ -6618,28 +5979,24 @@ BioGptPreTrainedModel, ) from .models.bit import ( - BIT_PRETRAINED_MODEL_ARCHIVE_LIST, BitBackbone, BitForImageClassification, BitModel, BitPreTrainedModel, ) from .models.blenderbot import ( - BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) from .models.blenderbot_small import ( - BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) from .models.blip import ( - BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, @@ -6649,7 +6006,6 @@ BlipVisionModel, ) from .models.blip_2 import ( - BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, Blip2ForConditionalGeneration, Blip2Model, Blip2PreTrainedModel, @@ -6657,7 +6013,6 @@ Blip2VisionModel, ) from .models.bloom import ( - BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, @@ -6666,7 +6021,6 @@ BloomPreTrainedModel, ) from .models.bridgetower import ( - BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, @@ -6674,7 +6028,6 @@ BridgeTowerPreTrainedModel, ) from .models.bros import ( - BROS_PRETRAINED_MODEL_ARCHIVE_LIST, BrosForTokenClassification, BrosModel, BrosPreTrainedModel, @@ -6683,7 +6036,6 @@ BrosSpadeELForTokenClassification, ) from .models.camembert import ( - CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, @@ -6694,7 +6046,6 @@ CamembertPreTrainedModel, ) from .models.canine import ( - CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, @@ -6705,14 +6056,12 @@ load_tf_weights_in_canine, ) from .models.chinese_clip import ( - CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) from .models.clap import ( - CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapFeatureExtractor, @@ -6722,7 +6071,6 @@ ClapTextModelWithProjection, ) from .models.clip import ( - CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPForImageClassification, CLIPModel, CLIPPreTrainedModel, @@ -6732,7 +6080,6 @@ CLIPVisionModelWithProjection, ) from .models.clipseg import ( - CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, @@ -6740,7 +6087,6 @@ CLIPSegVisionModel, ) from .models.clvp import ( - CLVP_PRETRAINED_MODEL_ARCHIVE_LIST, ClvpDecoder, ClvpEncoder, ClvpForCausalLM, @@ -6749,7 +6095,6 @@ ClvpPreTrainedModel, ) from .models.codegen import ( - CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST, CodeGenForCausalLM, CodeGenModel, CodeGenPreTrainedModel, @@ -6760,14 +6105,12 @@ CoherePreTrainedModel, ) from .models.conditional_detr import ( - CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) from .models.convbert import ( - CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, @@ -6779,42 +6122,34 @@ load_tf_weights_in_convbert, ) from .models.convnext import ( - CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) from .models.convnextv2 import ( - CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextV2Backbone, ConvNextV2ForImageClassification, ConvNextV2Model, ConvNextV2PreTrainedModel, ) from .models.cpmant import ( - CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) from .models.ctrl import ( - CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) from .models.cvt import ( - CVT_PRETRAINED_MODEL_ARCHIVE_LIST, CvtForImageClassification, CvtModel, CvtPreTrainedModel, ) from .models.data2vec import ( - DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, - DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, - DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, Data2VecAudioForAudioFrameClassification, Data2VecAudioForCTC, Data2VecAudioForSequenceClassification, @@ -6842,7 +6177,6 @@ DbrxPreTrainedModel, ) from .models.deberta import ( - DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, @@ -6851,7 +6185,6 @@ DebertaPreTrainedModel, ) from .models.deberta_v2 import ( - DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaV2ForMaskedLM, DebertaV2ForMultipleChoice, DebertaV2ForQuestionAnswering, @@ -6861,20 +6194,17 @@ DebertaV2PreTrainedModel, ) from .models.decision_transformer import ( - DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, DecisionTransformerGPT2Model, DecisionTransformerGPT2PreTrainedModel, DecisionTransformerModel, DecisionTransformerPreTrainedModel, ) from .models.deformable_detr import ( - DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, DeformableDetrForObjectDetection, DeformableDetrModel, DeformableDetrPreTrainedModel, ) from .models.deit import ( - DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, @@ -6882,7 +6212,6 @@ DeiTPreTrainedModel, ) from .models.deprecated.mctct import ( - MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel, @@ -6899,17 +6228,14 @@ OpenLlamaPreTrainedModel, ) from .models.deprecated.retribert import ( - RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel, ) from .models.deprecated.trajectory_transformer import ( - TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, ) from .models.deprecated.transfo_xl import ( - TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, @@ -6918,45 +6244,38 @@ load_tf_weights_in_transfo_xl, ) from .models.deprecated.van import ( - VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) from .models.depth_anything import ( - DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST, DepthAnythingForDepthEstimation, DepthAnythingPreTrainedModel, ) from .models.deta import ( - DETA_PRETRAINED_MODEL_ARCHIVE_LIST, DetaForObjectDetection, DetaModel, DetaPreTrainedModel, ) from .models.detr import ( - DETR_PRETRAINED_MODEL_ARCHIVE_LIST, DetrForObjectDetection, DetrForSegmentation, DetrModel, DetrPreTrainedModel, ) from .models.dinat import ( - DINAT_PRETRAINED_MODEL_ARCHIVE_LIST, DinatBackbone, DinatForImageClassification, DinatModel, DinatPreTrainedModel, ) from .models.dinov2 import ( - DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST, Dinov2Backbone, Dinov2ForImageClassification, Dinov2Model, Dinov2PreTrainedModel, ) from .models.distilbert import ( - DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, @@ -6966,14 +6285,10 @@ DistilBertPreTrainedModel, ) from .models.donut import ( - DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, DonutSwinModel, DonutSwinPreTrainedModel, ) from .models.dpr import ( - DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, DPRContextEncoder, DPRPretrainedContextEncoder, DPRPreTrainedModel, @@ -6983,27 +6298,23 @@ DPRReader, ) from .models.dpt import ( - DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) from .models.efficientformer import ( - EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) from .models.efficientnet import ( - EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) from .models.electra import ( - ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, @@ -7016,13 +6327,11 @@ load_tf_weights_in_electra, ) from .models.encodec import ( - ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) from .models.encoder_decoder import EncoderDecoderModel from .models.ernie import ( - ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, @@ -7035,7 +6344,6 @@ ErniePreTrainedModel, ) from .models.ernie_m import ( - ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieMForInformationExtraction, ErnieMForMultipleChoice, ErnieMForQuestionAnswering, @@ -7045,7 +6353,6 @@ ErnieMPreTrainedModel, ) from .models.esm import ( - ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmFoldPreTrainedModel, EsmForMaskedLM, EsmForProteinFolding, @@ -7055,7 +6362,6 @@ EsmPreTrainedModel, ) from .models.falcon import ( - FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, @@ -7064,14 +6370,12 @@ FalconPreTrainedModel, ) from .models.fastspeech2_conformer import ( - FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FastSpeech2ConformerHifiGan, FastSpeech2ConformerModel, FastSpeech2ConformerPreTrainedModel, FastSpeech2ConformerWithHifiGan, ) from .models.flaubert import ( - FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, @@ -7082,7 +6386,6 @@ FlaubertWithLMHeadModel, ) from .models.flava import ( - FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, @@ -7092,7 +6395,6 @@ FlavaTextModel, ) from .models.fnet import ( - FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, @@ -7105,7 +6407,6 @@ FNetPreTrainedModel, ) from .models.focalnet import ( - FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, @@ -7118,7 +6419,6 @@ PretrainedFSMTModel, ) from .models.funnel import ( - FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, @@ -7141,20 +6441,17 @@ GemmaPreTrainedModel, ) from .models.git import ( - GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) from .models.glpn import ( - GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNModel, GLPNPreTrainedModel, ) from .models.gpt2 import ( - GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, @@ -7165,7 +6462,6 @@ load_tf_weights_in_gpt2, ) from .models.gpt_bigcode import ( - GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, @@ -7173,7 +6469,6 @@ GPTBigCodePreTrainedModel, ) from .models.gpt_neo import ( - GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, @@ -7183,7 +6478,6 @@ load_tf_weights_in_gpt_neo, ) from .models.gpt_neox import ( - GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, @@ -7193,14 +6487,12 @@ GPTNeoXPreTrainedModel, ) from .models.gpt_neox_japanese import ( - GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) from .models.gptj import ( - GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST, GPTJForCausalLM, GPTJForQuestionAnswering, GPTJForSequenceClassification, @@ -7208,39 +6500,33 @@ GPTJPreTrainedModel, ) from .models.gptsan_japanese import ( - GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapanesePreTrainedModel, ) from .models.graphormer import ( - GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) from .models.grounding_dino import ( - GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST, GroundingDinoForObjectDetection, GroundingDinoModel, GroundingDinoPreTrainedModel, ) from .models.groupvit import ( - GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) from .models.hubert import ( - HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, HubertForCTC, HubertForSequenceClassification, HubertModel, HubertPreTrainedModel, ) from .models.ibert import ( - IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, @@ -7250,21 +6536,18 @@ IBertPreTrainedModel, ) from .models.idefics import ( - IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST, IdeficsForVisionText2Text, IdeficsModel, IdeficsPreTrainedModel, IdeficsProcessor, ) from .models.idefics2 import ( - IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST, Idefics2ForConditionalGeneration, Idefics2Model, Idefics2PreTrainedModel, Idefics2Processor, ) from .models.imagegpt import ( - IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, @@ -7272,13 +6555,11 @@ load_tf_weights_in_imagegpt, ) from .models.informer import ( - INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) from .models.instructblip import ( - INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, @@ -7291,20 +6572,17 @@ JambaPreTrainedModel, ) from .models.jukebox import ( - JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) from .models.kosmos2 import ( - KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST, Kosmos2ForConditionalGeneration, Kosmos2Model, Kosmos2PreTrainedModel, ) from .models.layoutlm import ( - LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, @@ -7313,7 +6591,6 @@ LayoutLMPreTrainedModel, ) from .models.layoutlmv2 import ( - LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, @@ -7321,7 +6598,6 @@ LayoutLMv2PreTrainedModel, ) from .models.layoutlmv3 import ( - LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, @@ -7329,7 +6605,6 @@ LayoutLMv3PreTrainedModel, ) from .models.led import ( - LED_PRETRAINED_MODEL_ARCHIVE_LIST, LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, @@ -7337,14 +6612,12 @@ LEDPreTrainedModel, ) from .models.levit import ( - LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, LevitPreTrainedModel, ) from .models.lilt import ( - LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, @@ -7359,17 +6632,14 @@ LlamaPreTrainedModel, ) from .models.llava import ( - LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, LlavaForConditionalGeneration, LlavaPreTrainedModel, ) from .models.llava_next import ( - LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST, LlavaNextForConditionalGeneration, LlavaNextPreTrainedModel, ) from .models.longformer import ( - LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, @@ -7380,14 +6650,12 @@ LongformerSelfAttention, ) from .models.longt5 import ( - LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, LongT5PreTrainedModel, ) from .models.luke import ( - LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, @@ -7409,20 +6677,17 @@ LxmertXLayer, ) from .models.m2m_100 import ( - M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, M2M100ForConditionalGeneration, M2M100Model, M2M100PreTrainedModel, ) from .models.mamba import ( - MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST, MambaForCausalLM, MambaModel, MambaPreTrainedModel, ) from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel from .models.markuplm import ( - MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST, MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, @@ -7430,13 +6695,11 @@ MarkupLMPreTrainedModel, ) from .models.mask2former import ( - MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, Mask2FormerForUniversalSegmentation, Mask2FormerModel, Mask2FormerPreTrainedModel, ) from .models.maskformer import ( - MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, @@ -7451,7 +6714,6 @@ MBartPreTrainedModel, ) from .models.mega import ( - MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, @@ -7462,7 +6724,6 @@ MegaPreTrainedModel, ) from .models.megatron_bert import ( - MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, @@ -7475,7 +6736,6 @@ MegatronBertPreTrainedModel, ) from .models.mgp_str import ( - MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, @@ -7493,7 +6753,6 @@ MixtralPreTrainedModel, ) from .models.mobilebert import ( - MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, @@ -7507,14 +6766,12 @@ load_tf_weights_in_mobilebert, ) from .models.mobilenet_v1 import ( - MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetV1ForImageClassification, MobileNetV1Model, MobileNetV1PreTrainedModel, load_tf_weights_in_mobilenet_v1, ) from .models.mobilenet_v2 import ( - MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation, MobileNetV2Model, @@ -7522,21 +6779,18 @@ load_tf_weights_in_mobilenet_v2, ) from .models.mobilevit import ( - MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) from .models.mobilevitv2 import ( - MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTV2ForImageClassification, MobileViTV2ForSemanticSegmentation, MobileViTV2Model, MobileViTV2PreTrainedModel, ) from .models.mpnet import ( - MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, @@ -7547,7 +6801,6 @@ MPNetPreTrainedModel, ) from .models.mpt import ( - MPT_PRETRAINED_MODEL_ARCHIVE_LIST, MptForCausalLM, MptForQuestionAnswering, MptForSequenceClassification, @@ -7556,7 +6809,6 @@ MptPreTrainedModel, ) from .models.mra import ( - MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, @@ -7575,7 +6827,6 @@ MT5PreTrainedModel, ) from .models.musicgen import ( - MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST, MusicgenForCausalLM, MusicgenForConditionalGeneration, MusicgenModel, @@ -7583,14 +6834,12 @@ MusicgenProcessor, ) from .models.musicgen_melody import ( - MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration, MusicgenMelodyModel, MusicgenMelodyPreTrainedModel, ) from .models.mvp import ( - MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, @@ -7599,14 +6848,12 @@ MvpPreTrainedModel, ) from .models.nat import ( - NAT_PRETRAINED_MODEL_ARCHIVE_LIST, NatBackbone, NatForImageClassification, NatModel, NatPreTrainedModel, ) from .models.nezha import ( - NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, @@ -7618,7 +6865,6 @@ NezhaPreTrainedModel, ) from .models.nllb_moe import ( - NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, @@ -7626,7 +6872,6 @@ NllbMoeTop2Router, ) from .models.nystromformer import ( - NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, @@ -7642,13 +6887,11 @@ OlmoPreTrainedModel, ) from .models.oneformer import ( - ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, OneFormerForUniversalSegmentation, OneFormerModel, OneFormerPreTrainedModel, ) from .models.openai import ( - OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, @@ -7657,7 +6900,6 @@ load_tf_weights_in_openai_gpt, ) from .models.opt import ( - OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, @@ -7665,7 +6907,6 @@ OPTPreTrainedModel, ) from .models.owlv2 import ( - OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST, Owlv2ForObjectDetection, Owlv2Model, Owlv2PreTrainedModel, @@ -7673,7 +6914,6 @@ Owlv2VisionModel, ) from .models.owlvit import ( - OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, @@ -7681,7 +6921,6 @@ OwlViTVisionModel, ) from .models.patchtsmixer import ( - PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST, PatchTSMixerForPrediction, PatchTSMixerForPretraining, PatchTSMixerForRegression, @@ -7690,7 +6929,6 @@ PatchTSMixerPreTrainedModel, ) from .models.patchtst import ( - PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, PatchTSTForClassification, PatchTSTForPrediction, PatchTSTForPretraining, @@ -7705,13 +6943,11 @@ PegasusPreTrainedModel, ) from .models.pegasus_x import ( - PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) from .models.perceiver import ( - PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, @@ -7730,7 +6966,6 @@ PersimmonPreTrainedModel, ) from .models.phi import ( - PHI_PRETRAINED_MODEL_ARCHIVE_LIST, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification, @@ -7738,7 +6973,6 @@ PhiPreTrainedModel, ) from .models.phi3 import ( - PHI3_PRETRAINED_MODEL_ARCHIVE_LIST, Phi3ForCausalLM, Phi3ForSequenceClassification, Phi3ForTokenClassification, @@ -7746,14 +6980,12 @@ Phi3PreTrainedModel, ) from .models.pix2struct import ( - PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, Pix2StructForConditionalGeneration, Pix2StructPreTrainedModel, Pix2StructTextModel, Pix2StructVisionModel, ) from .models.plbart import ( - PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, @@ -7761,18 +6993,15 @@ PLBartPreTrainedModel, ) from .models.poolformer import ( - POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) from .models.pop2piano import ( - POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST, Pop2PianoForConditionalGeneration, Pop2PianoPreTrainedModel, ) from .models.prophetnet import ( - PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, @@ -7781,7 +7010,6 @@ ProphetNetPreTrainedModel, ) from .models.pvt import ( - PVT_PRETRAINED_MODEL_ARCHIVE_LIST, PvtForImageClassification, PvtModel, PvtPreTrainedModel, @@ -7793,7 +7021,6 @@ PvtV2PreTrainedModel, ) from .models.qdqbert import ( - QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST, QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, @@ -7825,7 +7052,6 @@ RagTokenForGeneration, ) from .models.realm import ( - REALM_PRETRAINED_MODEL_ARCHIVE_LIST, RealmEmbedder, RealmForOpenQA, RealmKnowledgeAugEncoder, @@ -7841,7 +7067,6 @@ RecurrentGemmaPreTrainedModel, ) from .models.reformer import ( - REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, @@ -7852,13 +7077,11 @@ ReformerPreTrainedModel, ) from .models.regnet import ( - REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, RegNetForImageClassification, RegNetModel, RegNetPreTrainedModel, ) from .models.rembert import ( - REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, @@ -7871,14 +7094,12 @@ load_tf_weights_in_rembert, ) from .models.resnet import ( - RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) from .models.roberta import ( - ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, @@ -7889,7 +7110,6 @@ RobertaPreTrainedModel, ) from .models.roberta_prelayernorm import ( - ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, @@ -7900,7 +7120,6 @@ RobertaPreLayerNormPreTrainedModel, ) from .models.roc_bert import ( - ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, @@ -7914,7 +7133,6 @@ load_tf_weights_in_roc_bert, ) from .models.roformer import ( - ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, @@ -7927,18 +7145,15 @@ load_tf_weights_in_roformer, ) from .models.rwkv import ( - RWKV_PRETRAINED_MODEL_ARCHIVE_LIST, RwkvForCausalLM, RwkvModel, RwkvPreTrainedModel, ) from .models.sam import ( - SAM_PRETRAINED_MODEL_ARCHIVE_LIST, SamModel, SamPreTrainedModel, ) from .models.seamless_m4t import ( - SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST, SeamlessM4TCodeHifiGan, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, @@ -7951,7 +7166,6 @@ SeamlessM4TTextToUnitModel, ) from .models.seamless_m4t_v2 import ( - SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST, SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech, @@ -7960,7 +7174,6 @@ SeamlessM4Tv2PreTrainedModel, ) from .models.segformer import ( - SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SegformerDecodeHead, SegformerForImageClassification, SegformerForSemanticSegmentation, @@ -7969,27 +7182,23 @@ SegformerPreTrainedModel, ) from .models.seggpt import ( - SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST, SegGptForImageSegmentation, SegGptModel, SegGptPreTrainedModel, ) from .models.sew import ( - SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) from .models.sew_d import ( - SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST, SEWDForCTC, SEWDForSequenceClassification, SEWDModel, SEWDPreTrainedModel, ) from .models.siglip import ( - SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST, SiglipForImageClassification, SiglipModel, SiglipPreTrainedModel, @@ -7998,7 +7207,6 @@ ) from .models.speech_encoder_decoder import SpeechEncoderDecoderModel from .models.speech_to_text import ( - SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextPreTrainedModel, @@ -8008,7 +7216,6 @@ Speech2Text2PreTrainedModel, ) from .models.speecht5 import ( - SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, @@ -8017,7 +7224,6 @@ SpeechT5PreTrainedModel, ) from .models.splinter import ( - SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterLayer, @@ -8025,7 +7231,6 @@ SplinterPreTrainedModel, ) from .models.squeezebert import ( - SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, @@ -8048,18 +7253,15 @@ Starcoder2PreTrainedModel, ) from .models.superpoint import ( - SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST, SuperPointForKeypointDetection, SuperPointPreTrainedModel, ) from .models.swiftformer import ( - SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) from .models.swin import ( - SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, @@ -8067,13 +7269,11 @@ SwinPreTrainedModel, ) from .models.swin2sr import ( - SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST, Swin2SRForImageSuperResolution, Swin2SRModel, Swin2SRPreTrainedModel, ) from .models.swinv2 import ( - SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, Swinv2Backbone, Swinv2ForImageClassification, Swinv2ForMaskedImageModeling, @@ -8081,7 +7281,6 @@ Swinv2PreTrainedModel, ) from .models.switch_transformers import ( - SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST, SwitchTransformersEncoderModel, SwitchTransformersForConditionalGeneration, SwitchTransformersModel, @@ -8090,7 +7289,6 @@ SwitchTransformersTop1Router, ) from .models.t5 import ( - T5_PRETRAINED_MODEL_ARCHIVE_LIST, T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, @@ -8101,13 +7299,11 @@ load_tf_weights_in_t5, ) from .models.table_transformer import ( - TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) from .models.tapas import ( - TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, @@ -8116,38 +7312,32 @@ load_tf_weights_in_tapas, ) from .models.time_series_transformer import ( - TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) from .models.timesformer import ( - TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) from .models.timm_backbone import TimmBackbone from .models.trocr import ( - TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel, ) from .models.tvlt import ( - TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, TvltPreTrainedModel, ) from .models.tvp import ( - TVP_PRETRAINED_MODEL_ARCHIVE_LIST, TvpForVideoGrounding, TvpModel, TvpPreTrainedModel, ) from .models.udop import ( - UDOP_PRETRAINED_MODEL_ARCHIVE_LIST, UdopEncoderModel, UdopForConditionalGeneration, UdopModel, @@ -8163,7 +7353,6 @@ UMT5PreTrainedModel, ) from .models.unispeech import ( - UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, @@ -8171,7 +7360,6 @@ UniSpeechPreTrainedModel, ) from .models.unispeech_sat import ( - UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechSatForAudioFrameClassification, UniSpeechSatForCTC, UniSpeechSatForPreTraining, @@ -8180,20 +7368,18 @@ UniSpeechSatModel, UniSpeechSatPreTrainedModel, ) - from .models.univnet import UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST, UnivNetModel + from .models.univnet import UnivNetModel from .models.upernet import ( UperNetForSemanticSegmentation, UperNetPreTrainedModel, ) from .models.videomae import ( - VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, VideoMAEPreTrainedModel, ) from .models.vilt import ( - VILT_PRETRAINED_MODEL_ARCHIVE_LIST, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, @@ -8204,14 +7390,12 @@ ViltPreTrainedModel, ) from .models.vipllava import ( - VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, VipLlavaForConditionalGeneration, VipLlavaPreTrainedModel, ) from .models.vision_encoder_decoder import VisionEncoderDecoderModel from .models.vision_text_dual_encoder import VisionTextDualEncoderModel from .models.visual_bert import ( - VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, @@ -8222,55 +7406,46 @@ VisualBertPreTrainedModel, ) from .models.vit import ( - VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) from .models.vit_hybrid import ( - VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST, ViTHybridForImageClassification, ViTHybridModel, ViTHybridPreTrainedModel, ) from .models.vit_mae import ( - VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) from .models.vit_msn import ( - VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) from .models.vitdet import ( - VITDET_PRETRAINED_MODEL_ARCHIVE_LIST, VitDetBackbone, VitDetModel, VitDetPreTrainedModel, ) from .models.vitmatte import ( - VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST, VitMatteForImageMatting, VitMattePreTrainedModel, ) from .models.vits import ( - VITS_PRETRAINED_MODEL_ARCHIVE_LIST, VitsModel, VitsPreTrainedModel, ) from .models.vivit import ( - VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) from .models.wav2vec2 import ( - WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, @@ -8281,7 +7456,6 @@ Wav2Vec2PreTrainedModel, ) from .models.wav2vec2_bert import ( - WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForCTC, Wav2Vec2BertForSequenceClassification, @@ -8290,7 +7464,6 @@ Wav2Vec2BertPreTrainedModel, ) from .models.wav2vec2_conformer import ( - WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, @@ -8300,7 +7473,6 @@ Wav2Vec2ConformerPreTrainedModel, ) from .models.wavlm import ( - WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, @@ -8309,7 +7481,6 @@ WavLMPreTrainedModel, ) from .models.whisper import ( - WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForCausalLM, WhisperForConditionalGeneration, @@ -8317,20 +7488,17 @@ WhisperPreTrainedModel, ) from .models.x_clip import ( - XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) from .models.xglm import ( - XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel, ) from .models.xlm import ( - XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, @@ -8341,7 +7509,6 @@ XLMWithLMHeadModel, ) from .models.xlm_prophetnet import ( - XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLMProphetNetDecoder, XLMProphetNetEncoder, XLMProphetNetForCausalLM, @@ -8350,7 +7517,6 @@ XLMProphetNetPreTrainedModel, ) from .models.xlm_roberta import ( - XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, @@ -8361,7 +7527,6 @@ XLMRobertaPreTrainedModel, ) from .models.xlm_roberta_xl import ( - XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, @@ -8372,7 +7537,6 @@ XLMRobertaXLPreTrainedModel, ) from .models.xlnet import ( - XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, @@ -8384,7 +7548,6 @@ load_tf_weights_in_xlnet, ) from .models.xmod import ( - XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, @@ -8395,13 +7558,11 @@ XmodPreTrainedModel, ) from .models.yolos import ( - YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) from .models.yoso import ( - YOSO_PRETRAINED_MODEL_ARCHIVE_LIST, YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, @@ -8474,7 +7635,6 @@ # TensorFlow model imports from .models.albert import ( - TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, @@ -8538,7 +7698,6 @@ TFBartPretrainedModel, ) from .models.bert import ( - TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, @@ -8563,7 +7722,6 @@ TFBlenderbotSmallPreTrainedModel, ) from .models.blip import ( - TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, @@ -8573,7 +7731,6 @@ TFBlipVisionModel, ) from .models.camembert import ( - TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCamembertForCausalLM, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, @@ -8584,14 +7741,12 @@ TFCamembertPreTrainedModel, ) from .models.clip import ( - TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) from .models.convbert import ( - TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, @@ -8612,14 +7767,12 @@ TFConvNextV2PreTrainedModel, ) from .models.ctrl import ( - TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) from .models.cvt import ( - TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCvtForImageClassification, TFCvtModel, TFCvtPreTrainedModel, @@ -8631,7 +7784,6 @@ TFData2VecVisionPreTrainedModel, ) from .models.deberta import ( - TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, @@ -8640,7 +7792,6 @@ TFDebertaPreTrainedModel, ) from .models.deberta_v2 import ( - TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaV2ForMaskedLM, TFDebertaV2ForMultipleChoice, TFDebertaV2ForQuestionAnswering, @@ -8650,7 +7801,6 @@ TFDebertaV2PreTrainedModel, ) from .models.deit import ( - TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, @@ -8658,7 +7808,6 @@ TFDeiTPreTrainedModel, ) from .models.deprecated.transfo_xl import ( - TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, @@ -8667,7 +7816,6 @@ TFTransfoXLPreTrainedModel, ) from .models.distilbert import ( - TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, @@ -8678,9 +7826,6 @@ TFDistilBertPreTrainedModel, ) from .models.dpr import ( - TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, TFDPRContextEncoder, TFDPRPretrainedContextEncoder, TFDPRPretrainedQuestionEncoder, @@ -8689,14 +7834,12 @@ TFDPRReader, ) from .models.efficientformer import ( - TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) from .models.electra import ( - TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, @@ -8708,7 +7851,6 @@ ) from .models.encoder_decoder import TFEncoderDecoderModel from .models.esm import ( - ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, @@ -8716,7 +7858,6 @@ TFEsmPreTrainedModel, ) from .models.flaubert import ( - TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, @@ -8726,7 +7867,6 @@ TFFlaubertWithLMHeadModel, ) from .models.funnel import ( - TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, @@ -8738,7 +7878,6 @@ TFFunnelPreTrainedModel, ) from .models.gpt2 import ( - TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, @@ -8754,20 +7893,17 @@ TFGPTJPreTrainedModel, ) from .models.groupvit import ( - TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) from .models.hubert import ( - TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFHubertForCTC, TFHubertModel, TFHubertPreTrainedModel, ) from .models.layoutlm import ( - TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, @@ -8777,7 +7913,6 @@ TFLayoutLMPreTrainedModel, ) from .models.layoutlmv3 import ( - TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMv3ForQuestionAnswering, TFLayoutLMv3ForSequenceClassification, TFLayoutLMv3ForTokenClassification, @@ -8790,7 +7925,6 @@ TFLEDPreTrainedModel, ) from .models.longformer import ( - TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, @@ -8801,7 +7935,6 @@ TFLongformerSelfAttention, ) from .models.lxmert import ( - TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, @@ -8819,7 +7952,6 @@ TFMBartPreTrainedModel, ) from .models.mobilebert import ( - TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, @@ -8832,14 +7964,12 @@ TFMobileBertPreTrainedModel, ) from .models.mobilevit import ( - TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) from .models.mpnet import ( - TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, @@ -8855,7 +7985,6 @@ TFMT5Model, ) from .models.openai import ( - TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification, TFOpenAIGPTLMHeadModel, @@ -8876,13 +8005,11 @@ TFRagTokenForGeneration, ) from .models.regnet import ( - TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel, TFRegNetPreTrainedModel, ) from .models.rembert import ( - TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, @@ -8894,13 +8021,11 @@ TFRemBertPreTrainedModel, ) from .models.resnet import ( - TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) from .models.roberta import ( - TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, @@ -8912,7 +8037,6 @@ TFRobertaPreTrainedModel, ) from .models.roberta_prelayernorm import ( - TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, @@ -8924,7 +8048,6 @@ TFRobertaPreLayerNormPreTrainedModel, ) from .models.roformer import ( - TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, @@ -8936,12 +8059,10 @@ TFRoFormerPreTrainedModel, ) from .models.sam import ( - TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST, TFSamModel, TFSamPreTrainedModel, ) from .models.segformer import ( - TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFSegformerDecodeHead, TFSegformerForImageClassification, TFSegformerForSemanticSegmentation, @@ -8949,33 +8070,28 @@ TFSegformerPreTrainedModel, ) from .models.speech_to_text import ( - TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeech2TextForConditionalGeneration, TFSpeech2TextModel, TFSpeech2TextPreTrainedModel, ) from .models.swiftformer import ( - TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwiftFormerForImageClassification, TFSwiftFormerModel, TFSwiftFormerPreTrainedModel, ) from .models.swin import ( - TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) from .models.t5 import ( - TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model, TFT5PreTrainedModel, ) from .models.tapas import ( - TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, @@ -8995,26 +8111,22 @@ TFViTMAEPreTrainedModel, ) from .models.wav2vec2 import ( - TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWav2Vec2ForCTC, TFWav2Vec2ForSequenceClassification, TFWav2Vec2Model, TFWav2Vec2PreTrainedModel, ) from .models.whisper import ( - TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) from .models.xglm import ( - TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) from .models.xlm import ( - TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, @@ -9025,7 +8137,6 @@ TFXLMWithLMHeadModel, ) from .models.xlm_roberta import ( - TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, @@ -9036,7 +8147,6 @@ TFXLMRobertaPreTrainedModel, ) from .models.xlnet import ( - TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, @@ -9358,7 +8468,6 @@ FlaxXGLMPreTrainedModel, ) from .models.xlm_roberta import ( - FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, diff --git a/src/transformers/models/albert/__init__.py b/src/transformers/models/albert/__init__.py index 168c68db837d08..1d0a4a4d02845c 100644 --- a/src/transformers/models/albert/__init__.py +++ b/src/transformers/models/albert/__init__.py @@ -26,7 +26,7 @@ _import_structure = { - "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], + "configuration_albert": ["AlbertConfig", "AlbertOnnxConfig"], } try: @@ -52,7 +52,6 @@ pass else: _import_structure["modeling_albert"] = [ - "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", @@ -71,7 +70,6 @@ pass else: _import_structure["modeling_tf_albert"] = [ - "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", @@ -101,7 +99,7 @@ ] if TYPE_CHECKING: - from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig + from .configuration_albert import AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): @@ -126,7 +124,6 @@ pass else: from .modeling_albert import ( - ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, @@ -145,7 +142,6 @@ pass else: from .modeling_tf_albert import ( - TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, diff --git a/src/transformers/models/albert/configuration_albert.py b/src/transformers/models/albert/configuration_albert.py index c5ddded4833481..492ca2f65b33ee 100644 --- a/src/transformers/models/albert/configuration_albert.py +++ b/src/transformers/models/albert/configuration_albert.py @@ -19,7 +19,6 @@ from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig -from ..deprecated._archive_maps import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class AlbertConfig(PretrainedConfig): diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index ff50f2f1293e17..ac4958798b2cdd 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "AlbertConfig" -from ..deprecated._archive_maps import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index 5aa521bb73dea7..ad2c2d637a0143 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -66,9 +66,6 @@ _CONFIG_FOR_DOC = "AlbertConfig" -from ..deprecated._archive_maps import TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFAlbertPreTrainingLoss: """ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP + diff --git a/src/transformers/models/align/__init__.py b/src/transformers/models/align/__init__.py index 8f9a6c40a7169f..650b25c3e5d1ee 100644 --- a/src/transformers/models/align/__init__.py +++ b/src/transformers/models/align/__init__.py @@ -22,7 +22,6 @@ _import_structure = { "configuration_align": [ - "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlignConfig", "AlignTextConfig", "AlignVisionConfig", @@ -37,7 +36,6 @@ pass else: _import_structure["modeling_align"] = [ - "ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST", "AlignModel", "AlignPreTrainedModel", "AlignTextModel", @@ -46,7 +44,6 @@ if TYPE_CHECKING: from .configuration_align import ( - ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP, AlignConfig, AlignTextConfig, AlignVisionConfig, @@ -60,7 +57,6 @@ pass else: from .modeling_align import ( - ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST, AlignModel, AlignPreTrainedModel, AlignTextModel, diff --git a/src/transformers/models/align/configuration_align.py b/src/transformers/models/align/configuration_align.py index a4b3149d971a15..9e96f5d15a1d35 100644 --- a/src/transformers/models/align/configuration_align.py +++ b/src/transformers/models/align/configuration_align.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class AlignTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a diff --git a/src/transformers/models/align/modeling_align.py b/src/transformers/models/align/modeling_align.py index 4fa128a5f67fa8..08de9aa14f7f38 100644 --- a/src/transformers/models/align/modeling_align.py +++ b/src/transformers/models/align/modeling_align.py @@ -47,9 +47,6 @@ _CONFIG_FOR_DOC = "AlignConfig" -from ..deprecated._archive_maps import ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - ALIGN_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads diff --git a/src/transformers/models/altclip/__init__.py b/src/transformers/models/altclip/__init__.py index 5fc02b192b256b..4e3cb99bbb16c9 100755 --- a/src/transformers/models/altclip/__init__.py +++ b/src/transformers/models/altclip/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_altclip": [ - "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_altclip"] = [ - "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", @@ -43,7 +41,6 @@ if TYPE_CHECKING: from .configuration_altclip import ( - ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, @@ -57,7 +54,6 @@ pass else: from .modeling_altclip import ( - ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, diff --git a/src/transformers/models/altclip/configuration_altclip.py b/src/transformers/models/altclip/configuration_altclip.py index 590f2b526e8c4b..d6e533e1f8101d 100755 --- a/src/transformers/models/altclip/configuration_altclip.py +++ b/src/transformers/models/altclip/configuration_altclip.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class AltCLIPTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index 3e184085331720..dfccab53ea307c 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -41,9 +41,6 @@ _CONFIG_FOR_DOC = "AltCLIPConfig" -from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - ALTCLIP_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads diff --git a/src/transformers/models/audio_spectrogram_transformer/__init__.py b/src/transformers/models/audio_spectrogram_transformer/__init__.py index 2b48fe07311c1e..9f1d65e1aac839 100644 --- a/src/transformers/models/audio_spectrogram_transformer/__init__.py +++ b/src/transformers/models/audio_spectrogram_transformer/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_audio_spectrogram_transformer": [ - "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ASTConfig", - ], + "configuration_audio_spectrogram_transformer": ["ASTConfig"], "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"], } @@ -31,7 +28,6 @@ pass else: _import_structure["modeling_audio_spectrogram_transformer"] = [ - "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", @@ -40,7 +36,6 @@ if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( - AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor @@ -52,7 +47,6 @@ pass else: from .modeling_audio_spectrogram_transformer import ( - AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, diff --git a/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py index 94a7af6006fd7d..158f1ee5d97034 100644 --- a/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ASTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST diff --git a/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py index 5ec18e2c7f16b2..1d70e57c2fd128 100644 --- a/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py @@ -45,9 +45,6 @@ _SEQ_CLASS_EXPECTED_LOSS = 0.17 -from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class ASTEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 96a159133cc005..3bb2b8e9d4c199 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -25,7 +25,7 @@ _import_structure = { "auto_factory": ["get_values"], - "configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"], + "configuration_auto": ["CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"], "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"], "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"], "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"], @@ -213,7 +213,7 @@ if TYPE_CHECKING: from .auto_factory import get_values - from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig + from .configuration_auto import CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor from .processing_auto import PROCESSOR_MAPPING, AutoProcessor diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c655689d46c172..f5569eb1cb71a6 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CONFIG_ARCHIVE_MAP_MAPPING_NAMES # noqa: F401, E402 - - CONFIG_MAPPING_NAMES = OrderedDict( [ # Add configs here @@ -982,6 +979,3 @@ def register(model_type, config, exist_ok=False): "match!" ) CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok) - - -ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES) diff --git a/src/transformers/models/autoformer/__init__.py b/src/transformers/models/autoformer/__init__.py index f87bfdea532d61..1ef70173e30a43 100644 --- a/src/transformers/models/autoformer/__init__.py +++ b/src/transformers/models/autoformer/__init__.py @@ -18,10 +18,7 @@ _import_structure = { - "configuration_autoformer": [ - "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "AutoformerConfig", - ], + "configuration_autoformer": ["AutoformerConfig"], } try: @@ -31,7 +28,6 @@ pass else: _import_structure["modeling_autoformer"] = [ - "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "AutoformerForPrediction", "AutoformerModel", "AutoformerPreTrainedModel", @@ -40,7 +36,6 @@ if TYPE_CHECKING: from .configuration_autoformer import ( - AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) @@ -51,7 +46,6 @@ pass else: from .modeling_autoformer import ( - AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, diff --git a/src/transformers/models/autoformer/configuration_autoformer.py b/src/transformers/models/autoformer/configuration_autoformer.py index 11909ac5c38c4c..4f499fd1cb6340 100644 --- a/src/transformers/models/autoformer/configuration_autoformer.py +++ b/src/transformers/models/autoformer/configuration_autoformer.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class AutoformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an diff --git a/src/transformers/models/autoformer/modeling_autoformer.py b/src/transformers/models/autoformer/modeling_autoformer.py index 8a993fad32785f..d7e1d7a4a33a70 100644 --- a/src/transformers/models/autoformer/modeling_autoformer.py +++ b/src/transformers/models/autoformer/modeling_autoformer.py @@ -167,9 +167,6 @@ class AutoformerModelOutput(ModelOutput): static_features: Optional[torch.FloatTensor] = None -from ..deprecated._archive_maps import AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Autoformer class AutoformerFeatureEmbedder(nn.Module): """ diff --git a/src/transformers/models/bark/__init__.py b/src/transformers/models/bark/__init__.py index 03e5865ca4a483..4cb1a606cf6567 100644 --- a/src/transformers/models/bark/__init__.py +++ b/src/transformers/models/bark/__init__.py @@ -22,7 +22,6 @@ _import_structure = { "configuration_bark": [ - "BARK_PRETRAINED_CONFIG_ARCHIVE_MAP", "BarkCoarseConfig", "BarkConfig", "BarkFineConfig", @@ -38,7 +37,6 @@ pass else: _import_structure["modeling_bark"] = [ - "BARK_PRETRAINED_MODEL_ARCHIVE_LIST", "BarkFineModel", "BarkSemanticModel", "BarkCoarseModel", @@ -49,7 +47,6 @@ if TYPE_CHECKING: from .configuration_bark import ( - BARK_PRETRAINED_CONFIG_ARCHIVE_MAP, BarkCoarseConfig, BarkConfig, BarkFineConfig, @@ -64,7 +61,6 @@ pass else: from .modeling_bark import ( - BARK_PRETRAINED_MODEL_ARCHIVE_LIST, BarkCausalModel, BarkCoarseModel, BarkFineModel, diff --git a/src/transformers/models/bark/modeling_bark.py b/src/transformers/models/bark/modeling_bark.py index a40ce794105024..0690c96e6aee53 100644 --- a/src/transformers/models/bark/modeling_bark.py +++ b/src/transformers/models/bark/modeling_bark.py @@ -64,9 +64,6 @@ _CONFIG_FOR_DOC = "BarkConfig" -from ..deprecated._archive_maps import BARK_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/bart/__init__.py b/src/transformers/models/bart/__init__.py index 4f104efce1a4d2..d538fbb7d34304 100644 --- a/src/transformers/models/bart/__init__.py +++ b/src/transformers/models/bart/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_bart": ["BART_PRETRAINED_CONFIG_ARCHIVE_MAP", "BartConfig", "BartOnnxConfig"], + "configuration_bart": ["BartConfig", "BartOnnxConfig"], "tokenization_bart": ["BartTokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_bart"] = [ - "BART_PRETRAINED_MODEL_ARCHIVE_LIST", "BartForCausalLM", "BartForConditionalGeneration", "BartForQuestionAnswering", @@ -84,7 +83,7 @@ ] if TYPE_CHECKING: - from .configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig, BartOnnxConfig + from .configuration_bart import BartConfig, BartOnnxConfig from .tokenization_bart import BartTokenizer try: @@ -102,7 +101,6 @@ pass else: from .modeling_bart import ( - BART_PRETRAINED_MODEL_ARCHIVE_LIST, BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index f44286bb08c59b..33fea9a0183afe 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -78,9 +78,6 @@ _QA_EXPECTED_OUTPUT = "' nice puppet'" -from ..deprecated._archive_maps import BART_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/beit/__init__.py b/src/transformers/models/beit/__init__.py index ce399f92e0fa4d..c2f49240d6e64c 100644 --- a/src/transformers/models/beit/__init__.py +++ b/src/transformers/models/beit/__init__.py @@ -23,7 +23,7 @@ ) -_import_structure = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} +_import_structure = {"configuration_beit": ["BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): @@ -41,7 +41,6 @@ pass else: _import_structure["modeling_beit"] = [ - "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", @@ -65,7 +64,7 @@ ] if TYPE_CHECKING: - from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig + from .configuration_beit import BeitConfig, BeitOnnxConfig try: if not is_vision_available(): @@ -83,7 +82,6 @@ pass else: from .modeling_beit import ( - BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, diff --git a/src/transformers/models/beit/configuration_beit.py b/src/transformers/models/beit/configuration_beit.py index dbb1e755e94b36..4664bdc898fba3 100644 --- a/src/transformers/models/beit/configuration_beit.py +++ b/src/transformers/models/beit/configuration_beit.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BeitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index c23d4f4ea4cdee..d6c5a164d73ebe 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -61,9 +61,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class BeitModelOutputWithPooling(BaseModelOutputWithPooling): """ diff --git a/src/transformers/models/bert/__init__.py b/src/transformers/models/bert/__init__.py index 882655f394e9c9..17048a5d1c967a 100644 --- a/src/transformers/models/bert/__init__.py +++ b/src/transformers/models/bert/__init__.py @@ -26,7 +26,7 @@ _import_structure = { - "configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"], + "configuration_bert": ["BertConfig", "BertOnnxConfig"], "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"], } @@ -45,7 +45,6 @@ pass else: _import_structure["modeling_bert"] = [ - "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", @@ -67,7 +66,6 @@ pass else: _import_structure["modeling_tf_bert"] = [ - "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", @@ -109,7 +107,7 @@ ] if TYPE_CHECKING: - from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig + from .configuration_bert import BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: @@ -127,7 +125,6 @@ pass else: from .modeling_bert import ( - BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, @@ -149,7 +146,6 @@ pass else: from .modeling_tf_bert import ( - TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, diff --git a/src/transformers/models/bert/configuration_bert.py b/src/transformers/models/bert/configuration_bert.py index e692f8284c2bac..7ad3536c96e789 100644 --- a/src/transformers/models/bert/configuration_bert.py +++ b/src/transformers/models/bert/configuration_bert.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index 129336cc5280c0..b516a97187b8bc 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -82,9 +82,6 @@ _SEQ_CLASS_EXPECTED_LOSS = 0.01 -from ..deprecated._archive_maps import BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index 9d027d84316582..89885887c96a4c 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -90,9 +90,6 @@ _SEQ_CLASS_EXPECTED_LOSS = 0.01 -from ..deprecated._archive_maps import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFBertPreTrainingLoss: """ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining diff --git a/src/transformers/models/big_bird/__init__.py b/src/transformers/models/big_bird/__init__.py index ef8ad80aa6b5e6..8eda33d9ee6608 100644 --- a/src/transformers/models/big_bird/__init__.py +++ b/src/transformers/models/big_bird/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig", "BigBirdOnnxConfig"], + "configuration_big_bird": ["BigBirdConfig", "BigBirdOnnxConfig"], } try: @@ -51,7 +51,6 @@ pass else: _import_structure["modeling_big_bird"] = [ - "BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdForCausalLM", "BigBirdForMaskedLM", "BigBirdForMultipleChoice", @@ -84,7 +83,7 @@ ] if TYPE_CHECKING: - from .configuration_big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig, BigBirdOnnxConfig + from .configuration_big_bird import BigBirdConfig, BigBirdOnnxConfig try: if not is_sentencepiece_available(): @@ -109,7 +108,6 @@ pass else: from .modeling_big_bird import ( - BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, diff --git a/src/transformers/models/big_bird/configuration_big_bird.py b/src/transformers/models/big_bird/configuration_big_bird.py index f803d56839d744..dfd36d82c37c3a 100644 --- a/src/transformers/models/big_bird/configuration_big_bird.py +++ b/src/transformers/models/big_bird/configuration_big_bird.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BigBirdConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BigBirdModel`]. It is used to instantiate an diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index 6e5363f0bc6e57..39144c77e22771 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -55,9 +55,6 @@ _CONFIG_FOR_DOC = "BigBirdConfig" -from ..deprecated._archive_maps import BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - _TRIVIA_QA_MAPPING = { "big_bird_attention": "attention/self", "output_layer_norm": "output/LayerNorm", diff --git a/src/transformers/models/bigbird_pegasus/__init__.py b/src/transformers/models/bigbird_pegasus/__init__.py index c4245496e73dc2..85621ce76d902b 100644 --- a/src/transformers/models/bigbird_pegasus/__init__.py +++ b/src/transformers/models/bigbird_pegasus/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_bigbird_pegasus": [ - "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdPegasusConfig", "BigBirdPegasusOnnxConfig", ], @@ -31,7 +30,6 @@ pass else: _import_structure["modeling_bigbird_pegasus"] = [ - "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdPegasusForCausalLM", "BigBirdPegasusForConditionalGeneration", "BigBirdPegasusForQuestionAnswering", @@ -43,7 +41,6 @@ if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( - BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) @@ -55,7 +52,6 @@ pass else: from .modeling_bigbird_pegasus import ( - BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, diff --git a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py index 5cdcbca775bf4d..c548573f322d36 100644 --- a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BigBirdPegasusConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BigBirdPegasusModel`]. It is used to instantiate diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index 6ea7a822d75e75..74ec4432a57a66 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -54,9 +54,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 7, 1024] -from ..deprecated._archive_maps import BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. diff --git a/src/transformers/models/biogpt/__init__.py b/src/transformers/models/biogpt/__init__.py index ec3d6966ac419d..355c87e67ba2b7 100644 --- a/src/transformers/models/biogpt/__init__.py +++ b/src/transformers/models/biogpt/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], + "configuration_biogpt": ["BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_biogpt"] = [ - "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", @@ -38,7 +37,7 @@ if TYPE_CHECKING: - from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig + from .configuration_biogpt import BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: @@ -48,7 +47,6 @@ pass else: from .modeling_biogpt import ( - BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, diff --git a/src/transformers/models/biogpt/configuration_biogpt.py b/src/transformers/models/biogpt/configuration_biogpt.py index 1b4155c0aea3bb..936fee76328f4d 100644 --- a/src/transformers/models/biogpt/configuration_biogpt.py +++ b/src/transformers/models/biogpt/configuration_biogpt.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BioGptConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an diff --git a/src/transformers/models/biogpt/modeling_biogpt.py b/src/transformers/models/biogpt/modeling_biogpt.py index 8a94105081a9dd..ad1a5df71541ff 100755 --- a/src/transformers/models/biogpt/modeling_biogpt.py +++ b/src/transformers/models/biogpt/modeling_biogpt.py @@ -47,9 +47,6 @@ _CONFIG_FOR_DOC = "BioGptConfig" -from ..deprecated._archive_maps import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding with OPT->BioGpt class BioGptLearnedPositionalEmbedding(nn.Embedding): """ diff --git a/src/transformers/models/bit/__init__.py b/src/transformers/models/bit/__init__.py index fc50659d9fa068..8f298a9adf6535 100644 --- a/src/transformers/models/bit/__init__.py +++ b/src/transformers/models/bit/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]} +_import_structure = {"configuration_bit": ["BitConfig", "BitOnnxConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_bit"] = [ - "BIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BitForImageClassification", "BitModel", "BitPreTrainedModel", @@ -43,7 +42,7 @@ if TYPE_CHECKING: - from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig + from .configuration_bit import BitConfig, BitOnnxConfig try: if not is_torch_available(): @@ -52,7 +51,6 @@ pass else: from .modeling_bit import ( - BIT_PRETRAINED_MODEL_ARCHIVE_LIST, BitBackbone, BitForImageClassification, BitModel, diff --git a/src/transformers/models/bit/configuration_bit.py b/src/transformers/models/bit/configuration_bit.py index 2ec6307421bfaa..f1532a74b9ae0f 100644 --- a/src/transformers/models/bit/configuration_bit.py +++ b/src/transformers/models/bit/configuration_bit.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT diff --git a/src/transformers/models/bit/modeling_bit.py b/src/transformers/models/bit/modeling_bit.py index 5906aae5e5e481..af96150dc99338 100644 --- a/src/transformers/models/bit/modeling_bit.py +++ b/src/transformers/models/bit/modeling_bit.py @@ -57,9 +57,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" -from ..deprecated._archive_maps import BIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]: r""" Utility function to get the tuple padding value given the kernel_size and padding. diff --git a/src/transformers/models/blenderbot/__init__.py b/src/transformers/models/blenderbot/__init__.py index 86d857b1e9a26d..8b53b9100a4af1 100644 --- a/src/transformers/models/blenderbot/__init__.py +++ b/src/transformers/models/blenderbot/__init__.py @@ -26,7 +26,6 @@ _import_structure = { "configuration_blenderbot": [ - "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], @@ -48,7 +47,6 @@ pass else: _import_structure["modeling_blenderbot"] = [ - "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", @@ -84,7 +82,6 @@ if TYPE_CHECKING: from .configuration_blenderbot import ( - BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) @@ -105,7 +102,6 @@ pass else: from .modeling_blenderbot import ( - BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, diff --git a/src/transformers/models/blenderbot/configuration_blenderbot.py b/src/transformers/models/blenderbot/configuration_blenderbot.py index 00608710592998..6b9a12e02e35b1 100644 --- a/src/transformers/models/blenderbot/configuration_blenderbot.py +++ b/src/transformers/models/blenderbot/configuration_blenderbot.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BlenderbotConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 1f61fe62a65afa..6fc86bcec45723 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -53,9 +53,6 @@ _CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill" -from ..deprecated._archive_maps import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ diff --git a/src/transformers/models/blenderbot_small/__init__.py b/src/transformers/models/blenderbot_small/__init__.py index 5622ab70de6429..e6cab05c0cae02 100644 --- a/src/transformers/models/blenderbot_small/__init__.py +++ b/src/transformers/models/blenderbot_small/__init__.py @@ -25,7 +25,6 @@ _import_structure = { "configuration_blenderbot_small": [ - "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig", ], @@ -47,7 +46,6 @@ pass else: _import_structure["modeling_blenderbot_small"] = [ - "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", @@ -80,7 +78,6 @@ if TYPE_CHECKING: from .configuration_blenderbot_small import ( - BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) @@ -101,7 +98,6 @@ pass else: from .modeling_blenderbot_small import ( - BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, diff --git a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py index 8b54bd3760feea..667db5bd55bc40 100644 --- a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py @@ -27,8 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - class BlenderbotSmallConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index da07669a4e777d..504f073ed0bec6 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -49,9 +49,6 @@ _CONFIG_FOR_DOC = "BlenderbotSmallConfig" -from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ diff --git a/src/transformers/models/blip/__init__.py b/src/transformers/models/blip/__init__.py index a7001788e62916..f78c2500bd64f4 100644 --- a/src/transformers/models/blip/__init__.py +++ b/src/transformers/models/blip/__init__.py @@ -24,7 +24,6 @@ _import_structure = { "configuration_blip": [ - "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlipConfig", "BlipTextConfig", "BlipVisionConfig", @@ -48,7 +47,6 @@ pass else: _import_structure["modeling_blip"] = [ - "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "BlipModel", "BlipPreTrainedModel", "BlipForConditionalGeneration", @@ -65,7 +63,6 @@ pass else: _import_structure["modeling_tf_blip"] = [ - "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBlipModel", "TFBlipPreTrainedModel", "TFBlipForConditionalGeneration", @@ -76,7 +73,7 @@ ] if TYPE_CHECKING: - from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig + from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: @@ -94,7 +91,6 @@ pass else: from .modeling_blip import ( - BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, @@ -111,7 +107,6 @@ pass else: from .modeling_tf_blip import ( - TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, diff --git a/src/transformers/models/blip/configuration_blip.py b/src/transformers/models/blip/configuration_blip.py index 2a76660c0f8ead..1a6fe37aa4f278 100644 --- a/src/transformers/models/blip/configuration_blip.py +++ b/src/transformers/models/blip/configuration_blip.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BlipTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index bd61a1cbd781e7..c99c3c06b9dd5b 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -42,9 +42,6 @@ _CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base" -from ..deprecated._archive_maps import BLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) diff --git a/src/transformers/models/blip/modeling_tf_blip.py b/src/transformers/models/blip/modeling_tf_blip.py index 37098467a7ad6c..5312cf2323b2e1 100644 --- a/src/transformers/models/blip/modeling_tf_blip.py +++ b/src/transformers/models/blip/modeling_tf_blip.py @@ -49,9 +49,6 @@ _CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base" -from ..deprecated._archive_maps import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.clip.modeling_tf_clip.contrastive_loss def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean( diff --git a/src/transformers/models/blip_2/__init__.py b/src/transformers/models/blip_2/__init__.py index 6fbfd53b3703fd..6897dd35c89bd4 100644 --- a/src/transformers/models/blip_2/__init__.py +++ b/src/transformers/models/blip_2/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_blip_2": [ - "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_blip_2"] = [ - "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2Model", "Blip2QFormerModel", "Blip2PreTrainedModel", @@ -43,7 +41,6 @@ if TYPE_CHECKING: from .configuration_blip_2 import ( - BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Blip2Config, Blip2QFormerConfig, Blip2VisionConfig, @@ -57,7 +54,6 @@ pass else: from .modeling_blip_2 import ( - BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, Blip2ForConditionalGeneration, Blip2Model, Blip2PreTrainedModel, diff --git a/src/transformers/models/blip_2/configuration_blip_2.py b/src/transformers/models/blip_2/configuration_blip_2.py index f5645f5deed57c..70dea87d352b27 100644 --- a/src/transformers/models/blip_2/configuration_blip_2.py +++ b/src/transformers/models/blip_2/configuration_blip_2.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Blip2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index edd0d9a6d76133..12396bf286eaaf 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -48,9 +48,6 @@ _CHECKPOINT_FOR_DOC = "Salesforce/blip2-opt-2.7b" -from ..deprecated._archive_maps import BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class Blip2ForConditionalGenerationModelOutput(ModelOutput): """ diff --git a/src/transformers/models/bloom/__init__.py b/src/transformers/models/bloom/__init__.py index 32e8617e8270e9..3c903b39dca23f 100644 --- a/src/transformers/models/bloom/__init__.py +++ b/src/transformers/models/bloom/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], + "configuration_bloom": ["BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): @@ -41,7 +41,6 @@ pass else: _import_structure["modeling_bloom"] = [ - "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", @@ -64,7 +63,7 @@ if TYPE_CHECKING: - from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig + from .configuration_bloom import BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): @@ -81,7 +80,6 @@ pass else: from .modeling_bloom import ( - BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index e04877485e3f54..ddea3f720a4d19 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -30,9 +30,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BloomConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index 05b18f5938106e..0ef158b1f85f11 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -44,9 +44,6 @@ _CONFIG_FOR_DOC = "BloomConfig" -from ..deprecated._archive_maps import BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it diff --git a/src/transformers/models/bridgetower/__init__.py b/src/transformers/models/bridgetower/__init__.py index cbd5bd4a366aed..3120ca9f2a163a 100644 --- a/src/transformers/models/bridgetower/__init__.py +++ b/src/transformers/models/bridgetower/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_bridgetower": [ - "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", @@ -41,7 +40,6 @@ pass else: _import_structure["modeling_bridgetower"] = [ - "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", @@ -52,7 +50,6 @@ if TYPE_CHECKING: from .configuration_bridgetower import ( - BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, @@ -74,7 +71,6 @@ pass else: from .modeling_bridgetower import ( - BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, diff --git a/src/transformers/models/bridgetower/configuration_bridgetower.py b/src/transformers/models/bridgetower/configuration_bridgetower.py index 2d3340ad62ab67..8513ce21f7606e 100644 --- a/src/transformers/models/bridgetower/configuration_bridgetower.py +++ b/src/transformers/models/bridgetower/configuration_bridgetower.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BridgeTowerVisionConfig(PretrainedConfig): r""" This is the configuration class to store the vision configuration of a [`BridgeTowerModel`]. Instantiating a diff --git a/src/transformers/models/bridgetower/modeling_bridgetower.py b/src/transformers/models/bridgetower/modeling_bridgetower.py index 6bbb0435464af3..91cbda9b72edbb 100644 --- a/src/transformers/models/bridgetower/modeling_bridgetower.py +++ b/src/transformers/models/bridgetower/modeling_bridgetower.py @@ -45,9 +45,6 @@ _TOKENIZER_FOR_DOC = "RobertaTokenizer" -from ..deprecated._archive_maps import BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - BRIDGETOWER_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module `_ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and diff --git a/src/transformers/models/bros/__init__.py b/src/transformers/models/bros/__init__.py index b08d55836488a0..516c6349cd120c 100644 --- a/src/transformers/models/bros/__init__.py +++ b/src/transformers/models/bros/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_bros": ["BROS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BrosConfig"], + "configuration_bros": ["BrosConfig"], } try: @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_bros"] = [ - "BROS_PRETRAINED_MODEL_ARCHIVE_LIST", "BrosPreTrainedModel", "BrosModel", "BrosForTokenClassification", @@ -45,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_bros import BROS_PRETRAINED_CONFIG_ARCHIVE_MAP, BrosConfig + from .configuration_bros import BrosConfig try: if not is_tokenizers_available(): @@ -62,7 +61,6 @@ pass else: from .modeling_bros import ( - BROS_PRETRAINED_MODEL_ARCHIVE_LIST, BrosForTokenClassification, BrosModel, BrosPreTrainedModel, diff --git a/src/transformers/models/bros/configuration_bros.py b/src/transformers/models/bros/configuration_bros.py index 547bbf39ad2ccd..6a1ef6d948e9eb 100644 --- a/src/transformers/models/bros/configuration_bros.py +++ b/src/transformers/models/bros/configuration_bros.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import BROS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class BrosConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to diff --git a/src/transformers/models/bros/modeling_bros.py b/src/transformers/models/bros/modeling_bros.py index 32f0338f0ec061..0f80cec4049ef5 100755 --- a/src/transformers/models/bros/modeling_bros.py +++ b/src/transformers/models/bros/modeling_bros.py @@ -48,9 +48,6 @@ _CONFIG_FOR_DOC = "BrosConfig" -from ..deprecated._archive_maps import BROS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - BROS_START_DOCSTRING = r""" This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage diff --git a/src/transformers/models/camembert/__init__.py b/src/transformers/models/camembert/__init__.py index 9882fc2b973355..1759762f47f1a1 100644 --- a/src/transformers/models/camembert/__init__.py +++ b/src/transformers/models/camembert/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig", "CamembertOnnxConfig"], + "configuration_camembert": ["CamembertConfig", "CamembertOnnxConfig"], } try: @@ -51,7 +51,6 @@ pass else: _import_structure["modeling_camembert"] = [ - "CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "CamembertForCausalLM", "CamembertForMaskedLM", "CamembertForMultipleChoice", @@ -69,7 +68,6 @@ pass else: _import_structure["modeling_tf_camembert"] = [ - "TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCamembertForCausalLM", "TFCamembertForMaskedLM", "TFCamembertForMultipleChoice", @@ -82,7 +80,7 @@ if TYPE_CHECKING: - from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig, CamembertOnnxConfig + from .configuration_camembert import CamembertConfig, CamembertOnnxConfig try: if not is_sentencepiece_available(): @@ -107,7 +105,6 @@ pass else: from .modeling_camembert import ( - CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, @@ -125,7 +122,6 @@ pass else: from .modeling_tf_camembert import ( - TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCamembertForCausalLM, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, diff --git a/src/transformers/models/camembert/configuration_camembert.py b/src/transformers/models/camembert/configuration_camembert.py index d29ca067db2790..124d14abec147b 100644 --- a/src/transformers/models/camembert/configuration_camembert.py +++ b/src/transformers/models/camembert/configuration_camembert.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CamembertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CamembertModel`] or a [`TFCamembertModel`]. It is diff --git a/src/transformers/models/camembert/modeling_camembert.py b/src/transformers/models/camembert/modeling_camembert.py index f399fb3f5cfb9b..368b3fccaceb08 100644 --- a/src/transformers/models/camembert/modeling_camembert.py +++ b/src/transformers/models/camembert/modeling_camembert.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "CamembertConfig" -from ..deprecated._archive_maps import CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - CAMEMBERT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the diff --git a/src/transformers/models/camembert/modeling_tf_camembert.py b/src/transformers/models/camembert/modeling_tf_camembert.py index 9ec998593d51b9..9e66f124689808 100644 --- a/src/transformers/models/camembert/modeling_tf_camembert.py +++ b/src/transformers/models/camembert/modeling_tf_camembert.py @@ -66,9 +66,6 @@ _CONFIG_FOR_DOC = "CamembertConfig" -from ..deprecated._archive_maps import TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - CAMEMBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the diff --git a/src/transformers/models/canine/__init__.py b/src/transformers/models/canine/__init__.py index d036045e2f2156..93f103344d476b 100644 --- a/src/transformers/models/canine/__init__.py +++ b/src/transformers/models/canine/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], + "configuration_canine": ["CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_canine"] = [ - "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", @@ -41,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig + from .configuration_canine import CanineConfig from .tokenization_canine import CanineTokenizer try: @@ -51,7 +50,6 @@ pass else: from .modeling_canine import ( - CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, diff --git a/src/transformers/models/canine/configuration_canine.py b/src/transformers/models/canine/configuration_canine.py index c5a77a5c4b47bc..e3d2d1373b9983 100644 --- a/src/transformers/models/canine/configuration_canine.py +++ b/src/transformers/models/canine/configuration_canine.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CanineConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index 39d89c6e0b3da8..fda0ae72e6fce8 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "CanineConfig" -from ..deprecated._archive_maps import CANINE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Support up to 16 hash functions. _PRIMES = [31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223] diff --git a/src/transformers/models/chinese_clip/__init__.py b/src/transformers/models/chinese_clip/__init__.py index dbc0a57e8324f3..03c9665ab0d09f 100644 --- a/src/transformers/models/chinese_clip/__init__.py +++ b/src/transformers/models/chinese_clip/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_chinese_clip": [ - "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", @@ -43,7 +42,6 @@ pass else: _import_structure["modeling_chinese_clip"] = [ - "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", @@ -52,7 +50,6 @@ if TYPE_CHECKING: from .configuration_chinese_clip import ( - CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, @@ -75,7 +72,6 @@ pass else: from .modeling_chinese_clip import ( - CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, diff --git a/src/transformers/models/chinese_clip/configuration_chinese_clip.py b/src/transformers/models/chinese_clip/configuration_chinese_clip.py index 349833d1f2c335..0cd73f67f2d121 100644 --- a/src/transformers/models/chinese_clip/configuration_chinese_clip.py +++ b/src/transformers/models/chinese_clip/configuration_chinese_clip.py @@ -31,9 +31,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ChineseCLIPTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a diff --git a/src/transformers/models/chinese_clip/modeling_chinese_clip.py b/src/transformers/models/chinese_clip/modeling_chinese_clip.py index 7d5c8f2fcc855d..573a39fb1c2946 100644 --- a/src/transformers/models/chinese_clip/modeling_chinese_clip.py +++ b/src/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -49,9 +49,6 @@ _CONFIG_FOR_DOC = "ChineseCLIPConfig" -from ..deprecated._archive_maps import CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/clap/__init__.py b/src/transformers/models/clap/__init__.py index 57e39b6e1fa660..4d3d3ba04e136f 100644 --- a/src/transformers/models/clap/__init__.py +++ b/src/transformers/models/clap/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_clap": [ - "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_clap"] = [ - "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", @@ -45,7 +43,6 @@ if TYPE_CHECKING: from .configuration_clap import ( - CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, @@ -60,7 +57,6 @@ else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( - CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, diff --git a/src/transformers/models/clap/modeling_clap.py b/src/transformers/models/clap/modeling_clap.py index c21e173133a17f..5b86ca657b973b 100644 --- a/src/transformers/models/clap/modeling_clap.py +++ b/src/transformers/models/clap/modeling_clap.py @@ -45,9 +45,6 @@ _CHECKPOINT_FOR_DOC = "laion/clap-htsat-fused" -from ..deprecated._archive_maps import CLAP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Adapted from: https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/utils.py#L191 def interpolate(hidden_states, ratio): """ diff --git a/src/transformers/models/clip/__init__.py b/src/transformers/models/clip/__init__.py index 868c46616e9b33..36247e943ecaf7 100644 --- a/src/transformers/models/clip/__init__.py +++ b/src/transformers/models/clip/__init__.py @@ -26,7 +26,6 @@ _import_structure = { "configuration_clip": [ - "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", @@ -60,7 +59,6 @@ pass else: _import_structure["modeling_clip"] = [ - "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", @@ -77,7 +75,6 @@ pass else: _import_structure["modeling_tf_clip"] = [ - "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", @@ -103,7 +100,6 @@ if TYPE_CHECKING: from .configuration_clip import ( - CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, @@ -136,7 +132,6 @@ pass else: from .modeling_clip import ( - CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPForImageClassification, CLIPModel, CLIPPreTrainedModel, @@ -153,7 +148,6 @@ pass else: from .modeling_tf_clip import ( - TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index a48cb73a9715ba..827fe31d5b4533 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -31,9 +31,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CLIPTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index 03e2fceb0e5b83..c2fc3424a9ce33 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -49,9 +49,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_0" -from ..deprecated._archive_maps import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # contrastive loss function, adapted from # https://sachinruk.github.io/blog/2021-03-07-clip.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index c7e8ba7f5c954e..142141fdc4df4d 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -52,9 +52,6 @@ _CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32" -from ..deprecated._archive_maps import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/clipseg/__init__.py b/src/transformers/models/clipseg/__init__.py index 0e2e250e507a81..cb7daf11553efd 100644 --- a/src/transformers/models/clipseg/__init__.py +++ b/src/transformers/models/clipseg/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_clipseg": [ - "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegTextConfig", "CLIPSegVisionConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_clipseg"] = [ - "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegModel", "CLIPSegPreTrainedModel", "CLIPSegTextModel", @@ -43,7 +41,6 @@ if TYPE_CHECKING: from .configuration_clipseg import ( - CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, @@ -57,7 +54,6 @@ pass else: from .modeling_clipseg import ( - CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, diff --git a/src/transformers/models/clipseg/configuration_clipseg.py b/src/transformers/models/clipseg/configuration_clipseg.py index 07ba08f4759c93..7df10bfe8cf771 100644 --- a/src/transformers/models/clipseg/configuration_clipseg.py +++ b/src/transformers/models/clipseg/configuration_clipseg.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CLIPSegTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an diff --git a/src/transformers/models/clipseg/modeling_clipseg.py b/src/transformers/models/clipseg/modeling_clipseg.py index 59d6c1ba1ea329..00dcecff2d26f7 100644 --- a/src/transformers/models/clipseg/modeling_clipseg.py +++ b/src/transformers/models/clipseg/modeling_clipseg.py @@ -43,9 +43,6 @@ _CHECKPOINT_FOR_DOC = "CIDAS/clipseg-rd64-refined" -from ..deprecated._archive_maps import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/clvp/__init__.py b/src/transformers/models/clvp/__init__.py index fb88e24171c369..6ef4bc60e32148 100644 --- a/src/transformers/models/clvp/__init__.py +++ b/src/transformers/models/clvp/__init__.py @@ -22,7 +22,6 @@ _import_structure = { "configuration_clvp": [ - "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ClvpConfig", "ClvpDecoderConfig", "ClvpEncoderConfig", @@ -40,7 +39,6 @@ pass else: _import_structure["modeling_clvp"] = [ - "CLVP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClvpModelForConditionalGeneration", "ClvpForCausalLM", "ClvpModel", @@ -52,7 +50,6 @@ if TYPE_CHECKING: from .configuration_clvp import ( - CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP, ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig, @@ -68,7 +65,6 @@ pass else: from .modeling_clvp import ( - CLVP_PRETRAINED_MODEL_ARCHIVE_LIST, ClvpDecoder, ClvpEncoder, ClvpForCausalLM, diff --git a/src/transformers/models/clvp/configuration_clvp.py b/src/transformers/models/clvp/configuration_clvp.py index 00906e7d7f86b6..505238b2a8a7fb 100644 --- a/src/transformers/models/clvp/configuration_clvp.py +++ b/src/transformers/models/clvp/configuration_clvp.py @@ -29,9 +29,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ClvpEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py index 654989dcbd6039..a36e9822421ecc 100644 --- a/src/transformers/models/clvp/modeling_clvp.py +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -56,9 +56,6 @@ _CHECKPOINT_FOR_DOC = "susnato/clvp_dev" -from ..deprecated._archive_maps import CLVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) diff --git a/src/transformers/models/codegen/__init__.py b/src/transformers/models/codegen/__init__.py index a1ce89620035d5..7d4cb05adb20e9 100644 --- a/src/transformers/models/codegen/__init__.py +++ b/src/transformers/models/codegen/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenOnnxConfig"], + "configuration_codegen": ["CodeGenConfig", "CodeGenOnnxConfig"], "tokenization_codegen": ["CodeGenTokenizer"], } @@ -36,14 +36,13 @@ pass else: _import_structure["modeling_codegen"] = [ - "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST", "CodeGenForCausalLM", "CodeGenModel", "CodeGenPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenOnnxConfig + from .configuration_codegen import CodeGenConfig, CodeGenOnnxConfig from .tokenization_codegen import CodeGenTokenizer try: @@ -61,7 +60,6 @@ pass else: from .modeling_codegen import ( - CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST, CodeGenForCausalLM, CodeGenModel, CodeGenPreTrainedModel, diff --git a/src/transformers/models/codegen/configuration_codegen.py b/src/transformers/models/codegen/configuration_codegen.py index e16dd1fadcf74a..db0008a033312b 100644 --- a/src/transformers/models/codegen/configuration_codegen.py +++ b/src/transformers/models/codegen/configuration_codegen.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CodeGenConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a diff --git a/src/transformers/models/codegen/modeling_codegen.py b/src/transformers/models/codegen/modeling_codegen.py index c14e33bd1261dd..0987bbc3053fa6 100644 --- a/src/transformers/models/codegen/modeling_codegen.py +++ b/src/transformers/models/codegen/modeling_codegen.py @@ -34,9 +34,6 @@ _CONFIG_FOR_DOC = "CodeGenConfig" -from ..deprecated._archive_maps import CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim)) diff --git a/src/transformers/models/cohere/__init__.py b/src/transformers/models/cohere/__init__.py index d6f69d1e496d0e..f92e8b68a50a72 100644 --- a/src/transformers/models/cohere/__init__.py +++ b/src/transformers/models/cohere/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_cohere": ["COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CohereConfig"], + "configuration_cohere": ["CohereConfig"], } @@ -49,7 +49,7 @@ if TYPE_CHECKING: - from .configuration_cohere import COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP, CohereConfig + from .configuration_cohere import CohereConfig try: if not is_tokenizers_available(): diff --git a/src/transformers/models/cohere/configuration_cohere.py b/src/transformers/models/cohere/configuration_cohere.py index 7ceca2b887af7d..f0b0e50b9177f4 100644 --- a/src/transformers/models/cohere/configuration_cohere.py +++ b/src/transformers/models/cohere/configuration_cohere.py @@ -25,8 +25,6 @@ logger = logging.get_logger(__name__) -COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP = {} - class CohereConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/conditional_detr/__init__.py b/src/transformers/models/conditional_detr/__init__.py index 565323321160ff..c7d5c5261d6e67 100644 --- a/src/transformers/models/conditional_detr/__init__.py +++ b/src/transformers/models/conditional_detr/__init__.py @@ -19,7 +19,6 @@ _import_structure = { "configuration_conditional_detr": [ - "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] @@ -41,7 +40,6 @@ pass else: _import_structure["modeling_conditional_detr"] = [ - "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", @@ -51,7 +49,6 @@ if TYPE_CHECKING: from .configuration_conditional_detr import ( - CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) @@ -72,7 +69,6 @@ pass else: from .modeling_conditional_detr import ( - CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, diff --git a/src/transformers/models/conditional_detr/configuration_conditional_detr.py b/src/transformers/models/conditional_detr/configuration_conditional_detr.py index 4f95de3582f082..e5dc3930d37337 100644 --- a/src/transformers/models/conditional_detr/configuration_conditional_detr.py +++ b/src/transformers/models/conditional_detr/configuration_conditional_detr.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ConditionalDetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index d723d3866ea416..2eb0ea885cfa0c 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -61,9 +61,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/conditional-detr-resnet-50" -from ..deprecated._archive_maps import CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ diff --git a/src/transformers/models/convbert/__init__.py b/src/transformers/models/convbert/__init__.py index f1b19a949abbef..15c6bb51767af1 100644 --- a/src/transformers/models/convbert/__init__.py +++ b/src/transformers/models/convbert/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"], + "configuration_convbert": ["ConvBertConfig", "ConvBertOnnxConfig"], "tokenization_convbert": ["ConvBertTokenizer"], } @@ -42,7 +42,6 @@ pass else: _import_structure["modeling_convbert"] = [ - "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvBertForMaskedLM", "ConvBertForMultipleChoice", "ConvBertForQuestionAnswering", @@ -62,7 +61,6 @@ pass else: _import_structure["modeling_tf_convbert"] = [ - "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", @@ -75,7 +73,7 @@ if TYPE_CHECKING: - from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig + from .configuration_convbert import ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: @@ -93,7 +91,6 @@ pass else: from .modeling_convbert import ( - CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, @@ -112,7 +109,6 @@ pass else: from .modeling_tf_convbert import ( - TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, diff --git a/src/transformers/models/convbert/configuration_convbert.py b/src/transformers/models/convbert/configuration_convbert.py index d309ca396baffc..82d555cd3a3ccf 100644 --- a/src/transformers/models/convbert/configuration_convbert.py +++ b/src/transformers/models/convbert/configuration_convbert.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ConvBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index d88add4e1390ef..dd5d06ef92541c 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -46,9 +46,6 @@ _CONFIG_FOR_DOC = "ConvBertConfig" -from ..deprecated._archive_maps import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_convbert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index 7206b3558ace8a..a8ac11a8cdf910 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -61,9 +61,6 @@ _CONFIG_FOR_DOC = "ConvBertConfig" -from ..deprecated._archive_maps import TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert class TFConvBertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/convnext/__init__.py b/src/transformers/models/convnext/__init__.py index 099a7fc9d63da4..4e9a90bd4deb33 100644 --- a/src/transformers/models/convnext/__init__.py +++ b/src/transformers/models/convnext/__init__.py @@ -22,9 +22,7 @@ ) -_import_structure = { - "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] -} +_import_structure = {"configuration_convnext": ["ConvNextConfig", "ConvNextOnnxConfig"]} try: if not is_vision_available(): @@ -42,7 +40,6 @@ pass else: _import_structure["modeling_convnext"] = [ - "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", @@ -62,7 +59,7 @@ ] if TYPE_CHECKING: - from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig + from .configuration_convnext import ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): @@ -80,7 +77,6 @@ pass else: from .modeling_convnext import ( - CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index f84c31079ea34e..2549f06b9940dc 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ConvNextConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index 7aee810ab9d727..19eb1b599c6fdc 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -55,9 +55,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/convnextv2/__init__.py b/src/transformers/models/convnextv2/__init__.py index d2a484b9b82850..5505868c14a4f4 100644 --- a/src/transformers/models/convnextv2/__init__.py +++ b/src/transformers/models/convnextv2/__init__.py @@ -26,12 +26,7 @@ ) -_import_structure = { - "configuration_convnextv2": [ - "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ConvNextV2Config", - ] -} +_import_structure = {"configuration_convnextv2": ["ConvNextV2Config"]} try: if not is_torch_available(): @@ -40,7 +35,6 @@ pass else: _import_structure["modeling_convnextv2"] = [ - "CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextV2ForImageClassification", "ConvNextV2Model", "ConvNextV2PreTrainedModel", @@ -61,7 +55,6 @@ if TYPE_CHECKING: from .configuration_convnextv2 import ( - CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextV2Config, ) @@ -72,7 +65,6 @@ pass else: from .modeling_convnextv2 import ( - CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextV2Backbone, ConvNextV2ForImageClassification, ConvNextV2Model, diff --git a/src/transformers/models/convnextv2/configuration_convnextv2.py b/src/transformers/models/convnextv2/configuration_convnextv2.py index ccee03eef6a492..e7692250b2c186 100644 --- a/src/transformers/models/convnextv2/configuration_convnextv2.py +++ b/src/transformers/models/convnextv2/configuration_convnextv2.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an diff --git a/src/transformers/models/convnextv2/modeling_convnextv2.py b/src/transformers/models/convnextv2/modeling_convnextv2.py index ef878748a49168..3f965f9c2ceb6a 100644 --- a/src/transformers/models/convnextv2/modeling_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_convnextv2.py @@ -55,9 +55,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/cpmant/__init__.py b/src/transformers/models/cpmant/__init__.py index 8140009b60f156..61db942a4f66bd 100644 --- a/src/transformers/models/cpmant/__init__.py +++ b/src/transformers/models/cpmant/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], + "configuration_cpmant": ["CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } @@ -33,7 +33,6 @@ pass else: _import_structure["modeling_cpmant"] = [ - "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", @@ -41,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig + from .configuration_cpmant import CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: @@ -51,7 +50,6 @@ pass else: from .modeling_cpmant import ( - CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, diff --git a/src/transformers/models/cpmant/configuration_cpmant.py b/src/transformers/models/cpmant/configuration_cpmant.py index 62bbce8ada50e1..4c2a8808669260 100644 --- a/src/transformers/models/cpmant/configuration_cpmant.py +++ b/src/transformers/models/cpmant/configuration_cpmant.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CpmAntConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CpmAntModel`]. It is used to instantiate an diff --git a/src/transformers/models/cpmant/modeling_cpmant.py b/src/transformers/models/cpmant/modeling_cpmant.py index 63bb467e64e354..9882d4ccc65831 100755 --- a/src/transformers/models/cpmant/modeling_cpmant.py +++ b/src/transformers/models/cpmant/modeling_cpmant.py @@ -37,9 +37,6 @@ _CONFIG_FOR_DOC = "CpmAntConfig" -from ..deprecated._archive_maps import CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class CpmAntLayerNorm(nn.Module): """ We use Root Mean Square (RMS) Layer Normalization, please see https://arxiv.org/abs/1910.07467 for details." diff --git a/src/transformers/models/ctrl/__init__.py b/src/transformers/models/ctrl/__init__.py index 7463117bfbc623..f64cced4e28bfe 100644 --- a/src/transformers/models/ctrl/__init__.py +++ b/src/transformers/models/ctrl/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], + "configuration_ctrl": ["CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_ctrl"] = [ - "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", @@ -43,7 +42,6 @@ pass else: _import_structure["modeling_tf_ctrl"] = [ - "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", @@ -52,7 +50,7 @@ if TYPE_CHECKING: - from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig + from .configuration_ctrl import CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: @@ -62,7 +60,6 @@ pass else: from .modeling_ctrl import ( - CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, @@ -76,7 +73,6 @@ pass else: from .modeling_tf_ctrl import ( - TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, diff --git a/src/transformers/models/ctrl/configuration_ctrl.py b/src/transformers/models/ctrl/configuration_ctrl.py index 0c5a68bf6fcbdc..8fd01c10b560e2 100644 --- a/src/transformers/models/ctrl/configuration_ctrl.py +++ b/src/transformers/models/ctrl/configuration_ctrl.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CTRLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index 7534a0e50c9a23..c0376f673a4f3a 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -34,9 +34,6 @@ _CONFIG_FOR_DOC = "CTRLConfig" -from ..deprecated._archive_maps import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def angle_defn(pos, i, d_model_size): angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size) return pos * angle_rates diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index 6569b9e7d7b788..86808190c885e0 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -44,9 +44,6 @@ _CONFIG_FOR_DOC = "CTRLConfig" -from ..deprecated._archive_maps import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def angle_defn(pos, i, d_model_size): angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size) return pos * angle_rates diff --git a/src/transformers/models/cvt/__init__.py b/src/transformers/models/cvt/__init__.py index 5241bb5a5f3a7a..7018b41d58e8b2 100644 --- a/src/transformers/models/cvt/__init__.py +++ b/src/transformers/models/cvt/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available -_import_structure = {"configuration_cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"]} +_import_structure = {"configuration_cvt": ["CvtConfig"]} try: @@ -26,7 +26,6 @@ pass else: _import_structure["modeling_cvt"] = [ - "CVT_PRETRAINED_MODEL_ARCHIVE_LIST", "CvtForImageClassification", "CvtModel", "CvtPreTrainedModel", @@ -39,14 +38,13 @@ pass else: _import_structure["modeling_tf_cvt"] = [ - "TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCvtForImageClassification", "TFCvtModel", "TFCvtPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig + from .configuration_cvt import CvtConfig try: if not is_torch_available(): @@ -55,7 +53,6 @@ pass else: from .modeling_cvt import ( - CVT_PRETRAINED_MODEL_ARCHIVE_LIST, CvtForImageClassification, CvtModel, CvtPreTrainedModel, @@ -68,7 +65,6 @@ pass else: from .modeling_tf_cvt import ( - TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCvtForImageClassification, TFCvtModel, TFCvtPreTrainedModel, diff --git a/src/transformers/models/cvt/configuration_cvt.py b/src/transformers/models/cvt/configuration_cvt.py index 412387af5e8a7b..e8c50fbf7746a0 100644 --- a/src/transformers/models/cvt/configuration_cvt.py +++ b/src/transformers/models/cvt/configuration_cvt.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class CvtConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model diff --git a/src/transformers/models/cvt/modeling_cvt.py b/src/transformers/models/cvt/modeling_cvt.py index c2d1dd56d2c6a5..23728c770c79fb 100644 --- a/src/transformers/models/cvt/modeling_cvt.py +++ b/src/transformers/models/cvt/modeling_cvt.py @@ -45,9 +45,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import CVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class BaseModelOutputWithCLSToken(ModelOutput): """ diff --git a/src/transformers/models/cvt/modeling_tf_cvt.py b/src/transformers/models/cvt/modeling_tf_cvt.py index 5664412effb594..03df5033b06b7f 100644 --- a/src/transformers/models/cvt/modeling_tf_cvt.py +++ b/src/transformers/models/cvt/modeling_tf_cvt.py @@ -50,9 +50,6 @@ _CONFIG_FOR_DOC = "CvtConfig" -from ..deprecated._archive_maps import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TFBaseModelOutputWithCLSToken(ModelOutput): """ diff --git a/src/transformers/models/data2vec/__init__.py b/src/transformers/models/data2vec/__init__.py index 45522f4ba893a1..525068db59832c 100644 --- a/src/transformers/models/data2vec/__init__.py +++ b/src/transformers/models/data2vec/__init__.py @@ -18,14 +18,12 @@ _import_structure = { - "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], + "configuration_data2vec_audio": ["Data2VecAudioConfig"], "configuration_data2vec_text": [ - "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ - "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], @@ -38,7 +36,6 @@ pass else: _import_structure["modeling_data2vec_audio"] = [ - "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", @@ -47,7 +44,6 @@ "Data2VecAudioPreTrainedModel", ] _import_structure["modeling_data2vec_text"] = [ - "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", @@ -58,7 +54,6 @@ "Data2VecTextPreTrainedModel", ] _import_structure["modeling_data2vec_vision"] = [ - "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", @@ -75,14 +70,12 @@ ] if TYPE_CHECKING: - from .configuration_data2vec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecAudioConfig + from .configuration_data2vec_audio import Data2VecAudioConfig from .configuration_data2vec_text import ( - DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecTextConfig, Data2VecTextOnnxConfig, ) from .configuration_data2vec_vision import ( - DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecVisionConfig, Data2VecVisionOnnxConfig, ) @@ -94,7 +87,6 @@ pass else: from .modeling_data2vec_audio import ( - DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, Data2VecAudioForAudioFrameClassification, Data2VecAudioForCTC, Data2VecAudioForSequenceClassification, @@ -103,7 +95,6 @@ Data2VecAudioPreTrainedModel, ) from .modeling_data2vec_text import ( - DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextForMultipleChoice, @@ -114,7 +105,6 @@ Data2VecTextPreTrainedModel, ) from .modeling_data2vec_vision import ( - DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, Data2VecVisionForImageClassification, Data2VecVisionForMaskedImageModeling, Data2VecVisionForSemanticSegmentation, diff --git a/src/transformers/models/data2vec/configuration_data2vec_text.py b/src/transformers/models/data2vec/configuration_data2vec_text.py index cd52db2d326e9f..e7b15270eda9fb 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_text.py +++ b/src/transformers/models/data2vec/configuration_data2vec_text.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Data2VecTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Data2VecTextModel`] and [`Data2VecTextModel`]. It diff --git a/src/transformers/models/data2vec/configuration_data2vec_vision.py b/src/transformers/models/data2vec/configuration_data2vec_vision.py index 9a9de9c4be5a0d..315f24a55729b9 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_vision.py +++ b/src/transformers/models/data2vec/configuration_data2vec_vision.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Data2VecVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Data2VecVisionModel`]. It is used to instantiate diff --git a/src/transformers/models/data2vec/modeling_data2vec_audio.py b/src/transformers/models/data2vec/modeling_data2vec_audio.py index fe527968051902..e77bc728ab365c 100755 --- a/src/transformers/models/data2vec/modeling_data2vec_audio.py +++ b/src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -69,9 +69,6 @@ _CTC_EXPECTED_LOSS = 66.95 -from ..deprecated._archive_maps import DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/data2vec/modeling_data2vec_text.py b/src/transformers/models/data2vec/modeling_data2vec_text.py index 20e1e1eca5ffab..6c27554efddf0b 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_text.py +++ b/src/transformers/models/data2vec/modeling_data2vec_text.py @@ -55,9 +55,6 @@ _CONFIG_FOR_DOC = "Data2VecTextConfig" -from ..deprecated._archive_maps import DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Data2VecText class Data2VecTextForTextEmbeddings(nn.Module): """ diff --git a/src/transformers/models/data2vec/modeling_data2vec_vision.py b/src/transformers/models/data2vec/modeling_data2vec_vision.py index c7f4f6390aad64..12d62b78a03103 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_data2vec_vision.py @@ -58,9 +58,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote" -from ..deprecated._archive_maps import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.beit.modeling_beit.BeitModelOutputWithPooling with Beit->Data2VecVision class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling): diff --git a/src/transformers/models/deberta/__init__.py b/src/transformers/models/deberta/__init__.py index 87806dd60d60c5..76beee798ff075 100644 --- a/src/transformers/models/deberta/__init__.py +++ b/src/transformers/models/deberta/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], + "configuration_deberta": ["DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_deberta"] = [ - "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", @@ -59,7 +58,6 @@ pass else: _import_structure["modeling_tf_deberta"] = [ - "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", @@ -70,7 +68,7 @@ if TYPE_CHECKING: - from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig + from .configuration_deberta import DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: @@ -88,7 +86,6 @@ pass else: from .modeling_deberta import ( - DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, @@ -104,7 +101,6 @@ pass else: from .modeling_tf_deberta import ( - TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, diff --git a/src/transformers/models/deberta/configuration_deberta.py b/src/transformers/models/deberta/configuration_deberta.py index 5907f0869d6821..e79e7238abcaba 100644 --- a/src/transformers/models/deberta/configuration_deberta.py +++ b/src/transformers/models/deberta/configuration_deberta.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DebertaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 02047a5cffd448..03646b95aae0cf 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -53,9 +53,6 @@ _QA_TARGET_END_INDEX = 14 -from ..deprecated._archive_maps import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class ContextPooler(nn.Module): def __init__(self, config): super().__init__() diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index 3cef6a50c873f4..774d6296d01c76 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -54,9 +54,6 @@ _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base" -from ..deprecated._archive_maps import TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFDebertaContextPooler(keras.layers.Layer): def __init__(self, config: DebertaConfig, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/models/deberta_v2/__init__.py b/src/transformers/models/deberta_v2/__init__.py index fb1b20a331fe11..314901aee1aed3 100644 --- a/src/transformers/models/deberta_v2/__init__.py +++ b/src/transformers/models/deberta_v2/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config", "DebertaV2OnnxConfig"], + "configuration_deberta_v2": ["DebertaV2Config", "DebertaV2OnnxConfig"], "tokenization_deberta_v2": ["DebertaV2Tokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_tf_deberta_v2"] = [ - "TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaV2ForMaskedLM", "TFDebertaV2ForQuestionAnswering", "TFDebertaV2ForMultipleChoice", @@ -60,7 +59,6 @@ pass else: _import_structure["modeling_deberta_v2"] = [ - "DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaV2ForMaskedLM", "DebertaV2ForMultipleChoice", "DebertaV2ForQuestionAnswering", @@ -73,7 +71,6 @@ if TYPE_CHECKING: from .configuration_deberta_v2 import ( - DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config, DebertaV2OnnxConfig, ) @@ -94,7 +91,6 @@ pass else: from .modeling_tf_deberta_v2 import ( - TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaV2ForMaskedLM, TFDebertaV2ForMultipleChoice, TFDebertaV2ForQuestionAnswering, @@ -111,7 +107,6 @@ pass else: from .modeling_deberta_v2 import ( - DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaV2ForMaskedLM, DebertaV2ForMultipleChoice, DebertaV2ForQuestionAnswering, diff --git a/src/transformers/models/deberta_v2/configuration_deberta_v2.py b/src/transformers/models/deberta_v2/configuration_deberta_v2.py index 25348849e2f240..78fdd91c81d4f3 100644 --- a/src/transformers/models/deberta_v2/configuration_deberta_v2.py +++ b/src/transformers/models/deberta_v2/configuration_deberta_v2.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DebertaV2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index f898c33af09492..572ee9196f3fdc 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -45,9 +45,6 @@ _QA_TARGET_END_INDEX = 9 -from ..deprecated._archive_maps import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.deberta.modeling_deberta.ContextPooler class ContextPooler(nn.Module): def __init__(self, config): diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index 546e7f1a8d0038..9bd62581006dd6 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -53,9 +53,6 @@ _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-v2-xlarge" -from ..deprecated._archive_maps import TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2 class TFDebertaV2ContextPooler(keras.layers.Layer): def __init__(self, config: DebertaV2Config, **kwargs): diff --git a/src/transformers/models/decision_transformer/__init__.py b/src/transformers/models/decision_transformer/__init__.py index 44070229aaa859..ce97cf7352a782 100644 --- a/src/transformers/models/decision_transformer/__init__.py +++ b/src/transformers/models/decision_transformer/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_decision_transformer": [ - "DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "DecisionTransformerConfig", - ], + "configuration_decision_transformer": ["DecisionTransformerConfig"], } try: @@ -30,7 +27,6 @@ pass else: _import_structure["modeling_decision_transformer"] = [ - "DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "DecisionTransformerGPT2Model", "DecisionTransformerGPT2PreTrainedModel", "DecisionTransformerModel", @@ -40,7 +36,6 @@ if TYPE_CHECKING: from .configuration_decision_transformer import ( - DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, DecisionTransformerConfig, ) @@ -51,7 +46,6 @@ pass else: from .modeling_decision_transformer import ( - DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, DecisionTransformerGPT2Model, DecisionTransformerGPT2PreTrainedModel, DecisionTransformerModel, diff --git a/src/transformers/models/decision_transformer/configuration_decision_transformer.py b/src/transformers/models/decision_transformer/configuration_decision_transformer.py index d2c1914bee06ee..6f1fb500bab801 100644 --- a/src/transformers/models/decision_transformer/configuration_decision_transformer.py +++ b/src/transformers/models/decision_transformer/configuration_decision_transformer.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DecisionTransformerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to diff --git a/src/transformers/models/decision_transformer/modeling_decision_transformer.py b/src/transformers/models/decision_transformer/modeling_decision_transformer.py index 6f939460aab86f..be7928995c8791 100755 --- a/src/transformers/models/decision_transformer/modeling_decision_transformer.py +++ b/src/transformers/models/decision_transformer/modeling_decision_transformer.py @@ -44,9 +44,6 @@ _CONFIG_FOR_DOC = "DecisionTransformerConfig" -from ..deprecated._archive_maps import DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2 def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): """Load tf checkpoints in a pytorch model""" diff --git a/src/transformers/models/deformable_detr/__init__.py b/src/transformers/models/deformable_detr/__init__.py index a560265f4bfcb8..ab44adf3718149 100644 --- a/src/transformers/models/deformable_detr/__init__.py +++ b/src/transformers/models/deformable_detr/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"], + "configuration_deformable_detr": ["DeformableDetrConfig"], } try: @@ -37,7 +37,6 @@ pass else: _import_structure["modeling_deformable_detr"] = [ - "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "DeformableDetrForObjectDetection", "DeformableDetrModel", "DeformableDetrPreTrainedModel", @@ -45,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig + from .configuration_deformable_detr import DeformableDetrConfig try: if not is_vision_available(): @@ -63,7 +62,6 @@ pass else: from .modeling_deformable_detr import ( - DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, DeformableDetrForObjectDetection, DeformableDetrModel, DeformableDetrPreTrainedModel, diff --git a/src/transformers/models/deformable_detr/configuration_deformable_detr.py b/src/transformers/models/deformable_detr/configuration_deformable_detr.py index 3f3ffff69ff2e9..b623d3504a2ee4 100644 --- a/src/transformers/models/deformable_detr/configuration_deformable_detr.py +++ b/src/transformers/models/deformable_detr/configuration_deformable_detr.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DeformableDetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py index 7b2bbb9b1242c9..61f0f011617a7d 100755 --- a/src/transformers/models/deformable_detr/modeling_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -107,11 +107,6 @@ def load_cuda_kernels(): _CONFIG_FOR_DOC = "DeformableDetrConfig" _CHECKPOINT_FOR_DOC = "sensetime/deformable-detr" -DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "sensetime/deformable-detr", - # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr -] - class MultiScaleDeformableAttentionFunction(Function): @staticmethod diff --git a/src/transformers/models/deit/__init__.py b/src/transformers/models/deit/__init__.py index a0b44186efbc05..8248823be24c73 100644 --- a/src/transformers/models/deit/__init__.py +++ b/src/transformers/models/deit/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} +_import_structure = {"configuration_deit": ["DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): @@ -40,7 +40,6 @@ pass else: _import_structure["modeling_deit"] = [ - "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", @@ -55,7 +54,6 @@ pass else: _import_structure["modeling_tf_deit"] = [ - "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", @@ -65,7 +63,7 @@ if TYPE_CHECKING: - from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig + from .configuration_deit import DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): @@ -83,7 +81,6 @@ pass else: from .modeling_deit import ( - DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, @@ -98,7 +95,6 @@ pass else: from .modeling_tf_deit import ( - TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, diff --git a/src/transformers/models/deit/configuration_deit.py b/src/transformers/models/deit/configuration_deit.py index 394c6ff93704cc..e1767c35fda838 100644 --- a/src/transformers/models/deit/configuration_deit.py +++ b/src/transformers/models/deit/configuration_deit.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DeiTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DeiTModel`]. It is used to instantiate an DeiT diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 5efcc95d503da4..2480b99586192f 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -59,9 +59,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class DeiTEmbeddings(nn.Module): """ Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token. diff --git a/src/transformers/models/deit/modeling_tf_deit.py b/src/transformers/models/deit/modeling_tf_deit.py index aec5f6df95922a..43a3465ba14d3a 100644 --- a/src/transformers/models/deit/modeling_tf_deit.py +++ b/src/transformers/models/deit/modeling_tf_deit.py @@ -65,9 +65,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TFDeiTForImageClassificationWithTeacherOutput(ModelOutput): """ diff --git a/src/transformers/models/deprecated/_archive_maps.py b/src/transformers/models/deprecated/_archive_maps.py deleted file mode 100644 index 256813e0883f45..00000000000000 --- a/src/transformers/models/deprecated/_archive_maps.py +++ /dev/null @@ -1,2774 +0,0 @@ -# coding=utf-8 -# Copyright 2024 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from collections import OrderedDict - -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -class DeprecatedDict(dict): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def __getitem__(self, item): - logger.warning( - "Archive maps are deprecated and will be removed in version v4.40.0 as they are no longer relevant. " - "If looking to get all checkpoints for a given architecture, we recommend using `huggingface_hub` " - "with the list_models method." - ) - return self[item] - - -class DeprecatedList(list): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def __getitem__(self, item): - logger.warning_once( - "Archive maps are deprecated and will be removed in version v4.40.0 as they are no longer relevant. " - "If looking to get all checkpoints for a given architecture, we recommend using `huggingface_hub` " - "with the `list_models` method." - ) - return super().__getitem__(item) - - -ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "albert/albert-base-v1": "https://huggingface.co/albert/albert-base-v1/resolve/main/config.json", - "albert/albert-large-v1": "https://huggingface.co/albert/albert-large-v1/resolve/main/config.json", - "albert/albert-xlarge-v1": "https://huggingface.co/albert/albert-xlarge-v1/resolve/main/config.json", - "albert/albert-xxlarge-v1": "https://huggingface.co/albert/albert-xxlarge-v1/resolve/main/config.json", - "albert/albert-base-v2": "https://huggingface.co/albert/albert-base-v2/resolve/main/config.json", - "albert/albert-large-v2": "https://huggingface.co/albert/albert-large-v2/resolve/main/config.json", - "albert/albert-xlarge-v2": "https://huggingface.co/albert/albert-xlarge-v2/resolve/main/config.json", - "albert/albert-xxlarge-v2": "https://huggingface.co/albert/albert-xxlarge-v2/resolve/main/config.json", - } -) - -ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "albert/albert-base-v1", - "albert/albert-large-v1", - "albert/albert-xlarge-v1", - "albert/albert-xxlarge-v1", - "albert/albert-base-v2", - "albert/albert-large-v2", - "albert/albert-xlarge-v2", - "albert/albert-xxlarge-v2", - ] -) - -TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "albert/albert-base-v1", - "albert/albert-large-v1", - "albert/albert-xlarge-v1", - "albert/albert-xxlarge-v1", - "albert/albert-base-v2", - "albert/albert-large-v2", - "albert/albert-xlarge-v2", - "albert/albert-xxlarge-v2", - ] -) - -ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json"} -) - -ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["kakaobrain/align-base"]) - -ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json"} -) - -ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["BAAI/AltCLIP"]) - -AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "MIT/ast-finetuned-audioset-10-10-0.4593": "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" - } -) - -AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["MIT/ast-finetuned-audioset-10-10-0.4593"] -) - -AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json" - } -) - -AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["huggingface/autoformer-tourism-monthly"]) - -BARK_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["suno/bark-small", "suno/bark"]) - -BART_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/bart-large"]) - -BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/beit-base-patch16-224-pt22k": "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" - } -) - -BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/beit-base-patch16-224"]) - -BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/config.json", - "google-bert/bert-large-uncased": "https://huggingface.co/google-bert/bert-large-uncased/resolve/main/config.json", - "google-bert/bert-base-cased": "https://huggingface.co/google-bert/bert-base-cased/resolve/main/config.json", - "google-bert/bert-large-cased": "https://huggingface.co/google-bert/bert-large-cased/resolve/main/config.json", - "google-bert/bert-base-multilingual-uncased": "https://huggingface.co/google-bert/bert-base-multilingual-uncased/resolve/main/config.json", - "google-bert/bert-base-multilingual-cased": "https://huggingface.co/google-bert/bert-base-multilingual-cased/resolve/main/config.json", - "google-bert/bert-base-chinese": "https://huggingface.co/google-bert/bert-base-chinese/resolve/main/config.json", - "google-bert/bert-base-german-cased": "https://huggingface.co/google-bert/bert-base-german-cased/resolve/main/config.json", - "google-bert/bert-large-uncased-whole-word-masking": "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking/resolve/main/config.json", - "google-bert/bert-large-cased-whole-word-masking": "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking/resolve/main/config.json", - "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json", - "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json", - "google-bert/bert-base-cased-finetuned-mrpc": "https://huggingface.co/google-bert/bert-base-cased-finetuned-mrpc/resolve/main/config.json", - "google-bert/bert-base-german-dbmdz-cased": "https://huggingface.co/google-bert/bert-base-german-dbmdz-cased/resolve/main/config.json", - "google-bert/bert-base-german-dbmdz-uncased": "https://huggingface.co/google-bert/bert-base-german-dbmdz-uncased/resolve/main/config.json", - "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", - "cl-tohoku/bert-base-japanese-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json", - "cl-tohoku/bert-base-japanese-char": "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json", - "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json", - "TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json", - "TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json", - "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", - } -) - -BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google-bert/bert-base-uncased", - "google-bert/bert-large-uncased", - "google-bert/bert-base-cased", - "google-bert/bert-large-cased", - "google-bert/bert-base-multilingual-uncased", - "google-bert/bert-base-multilingual-cased", - "google-bert/bert-base-chinese", - "google-bert/bert-base-german-cased", - "google-bert/bert-large-uncased-whole-word-masking", - "google-bert/bert-large-cased-whole-word-masking", - "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad", - "google-bert/bert-large-cased-whole-word-masking-finetuned-squad", - "google-bert/bert-base-cased-finetuned-mrpc", - "google-bert/bert-base-german-dbmdz-cased", - "google-bert/bert-base-german-dbmdz-uncased", - "cl-tohoku/bert-base-japanese", - "cl-tohoku/bert-base-japanese-whole-word-masking", - "cl-tohoku/bert-base-japanese-char", - "cl-tohoku/bert-base-japanese-char-whole-word-masking", - "TurkuNLP/bert-base-finnish-cased-v1", - "TurkuNLP/bert-base-finnish-uncased-v1", - "wietsedv/bert-base-dutch-cased", - ] -) - -TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google-bert/bert-base-uncased", - "google-bert/bert-large-uncased", - "google-bert/bert-base-cased", - "google-bert/bert-large-cased", - "google-bert/bert-base-multilingual-uncased", - "google-bert/bert-base-multilingual-cased", - "google-bert/bert-base-chinese", - "google-bert/bert-base-german-cased", - "google-bert/bert-large-uncased-whole-word-masking", - "google-bert/bert-large-cased-whole-word-masking", - "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad", - "google-bert/bert-large-cased-whole-word-masking-finetuned-squad", - "google-bert/bert-base-cased-finetuned-mrpc", - "cl-tohoku/bert-base-japanese", - "cl-tohoku/bert-base-japanese-whole-word-masking", - "cl-tohoku/bert-base-japanese-char", - "cl-tohoku/bert-base-japanese-char-whole-word-masking", - "TurkuNLP/bert-base-finnish-cased-v1", - "TurkuNLP/bert-base-finnish-uncased-v1", - "wietsedv/bert-base-dutch-cased", - ] -) - -BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", - "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", - "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", - } -) - -BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["google/bigbird-roberta-base", "google/bigbird-roberta-large", "google/bigbird-base-trivia-itc"] -) - -BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/bigbird-pegasus-large-arxiv": "https://huggingface.co/google/bigbird-pegasus-large-arxiv/resolve/main/config.json", - "google/bigbird-pegasus-large-pubmed": "https://huggingface.co/google/bigbird-pegasus-large-pubmed/resolve/main/config.json", - "google/bigbird-pegasus-large-bigpatent": "https://huggingface.co/google/bigbird-pegasus-large-bigpatent/resolve/main/config.json", - } -) - -BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/bigbird-pegasus-large-arxiv", - "google/bigbird-pegasus-large-pubmed", - "google/bigbird-pegasus-large-bigpatent", - ] -) - -BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json"} -) - -BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/biogpt", "microsoft/BioGPT-Large"]) - -BIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json"} -) - -BIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/bit-50"]) - -BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/config.json"} -) - -BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/blenderbot-3B"]) - -BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", - # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small -} - -BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/blenderbot_small-90M"]) - -BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", - "Salesforce/blip-vqa-capfit-large": "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json", - "Salesforce/blip-image-captioning-base": "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json", - "Salesforce/blip-image-captioning-large": "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json", - "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", - "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", - "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", - "Salesforce/blip-itm-large-flikr": "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json", - } -) - -BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "Salesforce/blip-vqa-base", - "Salesforce/blip-vqa-capfilt-large", - "Salesforce/blip-image-captioning-base", - "Salesforce/blip-image-captioning-large", - "Salesforce/blip-itm-base-coco", - "Salesforce/blip-itm-large-coco", - "Salesforce/blip-itm-base-flickr", - "Salesforce/blip-itm-large-flickr", - ] -) - -TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "Salesforce/blip-vqa-base", - "Salesforce/blip-vqa-capfilt-large", - "Salesforce/blip-image-captioning-base", - "Salesforce/blip-image-captioning-large", - "Salesforce/blip-itm-base-coco", - "Salesforce/blip-itm-large-coco", - "Salesforce/blip-itm-base-flickr", - "Salesforce/blip-itm-large-flickr", - ] -) - -BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json"} -) - -BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/blip2-opt-2.7b"]) - -BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", - "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", - "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", - "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", - "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", - "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", - } -) - -BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "bigscience/bigscience-small-testing", - "bigscience/bloom-560m", - "bigscience/bloom-1b1", - "bigscience/bloom-1b7", - "bigscience/bloom-3b", - "bigscience/bloom-7b1", - "bigscience/bloom", - ] -) - -BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", - "BridgeTower/bridgetower-base-itm-mlm": "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json", - } -) - -BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["BridgeTower/bridgetower-base", "BridgeTower/bridgetower-base-itm-mlm"] -) - -BROS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "jinho8345/bros-base-uncased": "https://huggingface.co/jinho8345/bros-base-uncased/blob/main/config.json", - "jinho8345/bros-large-uncased": "https://huggingface.co/jinho8345/bros-large-uncased/blob/main/config.json", - } -) - -BROS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["jinho8345/bros-base-uncased", "jinho8345/bros-large-uncased"]) - -CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "almanach/camembert-base": "https://huggingface.co/almanach/camembert-base/resolve/main/config.json", - "umberto-commoncrawl-cased-v1": "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json", - "umberto-wikipedia-uncased-v1": "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json", - } -) - -CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["almanach/camembert-base", "Musixmatch/umberto-commoncrawl-cased-v1", "Musixmatch/umberto-wikipedia-uncased-v1"] -) - -TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([]) - -CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json"} -) - -CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/canine-s", "google/canine-r"]) - -CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "OFA-Sys/chinese-clip-vit-base-patch16": "https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/config.json" - } -) - -CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["OFA-Sys/chinese-clip-vit-base-patch16"]) - -CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["laion/clap-htsat-fused", "laion/clap-htsat-unfused"]) - -CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/config.json"} -) - -CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/clip-vit-base-patch32"]) - -TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/clip-vit-base-patch32"]) - -CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"CIDAS/clipseg-rd64": "https://huggingface.co/CIDAS/clipseg-rd64/resolve/main/config.json"} -) - -CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["CIDAS/clipseg-rd64-refined"]) - -CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"susnato/clvp_dev": "https://huggingface.co/susnato/clvp_dev/resolve/main/config.json"} -) - -CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["susnato/clvp_dev"]) - -CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json", - "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json", - "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json", - "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json", - "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json", - "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json", - "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json", - "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json", - "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json", - "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json", - "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json", - "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json", - } -) - -CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "Salesforce/codegen-350M-nl", - "Salesforce/codegen-350M-multi", - "Salesforce/codegen-350M-mono", - "Salesforce/codegen-2B-nl", - "Salesforce/codegen-2B-multi", - "Salesforce/codegen-2B-mono", - "Salesforce/codegen-6B-nl", - "Salesforce/codegen-6B-multi", - "Salesforce/codegen-6B-mono", - "Salesforce/codegen-16B-nl", - "Salesforce/codegen-16B-multi", - "Salesforce/codegen-16B-mono", - ] -) - -CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/conditional-detr-resnet-50": "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" - } -) - -CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/conditional-detr-resnet-50"]) - -CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json", - "YituTech/conv-bert-medium-small": "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json", - "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json", - } -) - -CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["YituTech/conv-bert-base", "YituTech/conv-bert-medium-small", "YituTech/conv-bert-small"] -) - -TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["YituTech/conv-bert-base", "YituTech/conv-bert-medium-small", "YituTech/conv-bert-small"] -) - -CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/convnext-tiny-224": "https://huggingface.co/facebook/convnext-tiny-224/resolve/main/config.json"} -) - -CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/convnext-tiny-224"]) - -CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json" - } -) - -CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/convnextv2-tiny-1k-224"]) - -CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/config.json"} -) - -CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openbmb/cpm-ant-10b"]) - -CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"Salesforce/ctrl": "https://huggingface.co/Salesforce/ctrl/resolve/main/config.json"} -) - -CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/ctrl"]) - -TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/ctrl"]) - -CVT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json"} -) - -CVT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "microsoft/cvt-13", - "microsoft/cvt-13-384", - "microsoft/cvt-13-384-22k", - "microsoft/cvt-21", - "microsoft/cvt-21-384", - "microsoft/cvt-21-384-22k", - ] -) - -TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "microsoft/cvt-13", - "microsoft/cvt-13-384", - "microsoft/cvt-13-384-22k", - "microsoft/cvt-21", - "microsoft/cvt-21-384", - "microsoft/cvt-21-384-22k", - ] -) - -DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json"} -) - -DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/data2vec-vision-base-ft": "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" - } -) - -DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "facebook/data2vec-audio-base", - "facebook/data2vec-audio-base-10m", - "facebook/data2vec-audio-base-100h", - "facebook/data2vec-audio-base-960h", - ] -) - -DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/data2vec-text-base"]) - -DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/data2vec-vision-base-ft1k"]) - -DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/config.json", - "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/config.json", - "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/config.json", - "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/config.json", - "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/config.json", - "microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/config.json", - } -) - -DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "microsoft/deberta-base", - "microsoft/deberta-large", - "microsoft/deberta-xlarge", - "microsoft/deberta-base-mnli", - "microsoft/deberta-large-mnli", - "microsoft/deberta-xlarge-mnli", - ] -) - -TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["kamalkraj/deberta-base"]) - -DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", - "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", - "microsoft/deberta-v2-xlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json", - "microsoft/deberta-v2-xxlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json", - } -) - -DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "microsoft/deberta-v2-xlarge", - "microsoft/deberta-v2-xxlarge", - "microsoft/deberta-v2-xlarge-mnli", - "microsoft/deberta-v2-xxlarge-mnli", - ] -) - -TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["kamalkraj/deberta-v2-xlarge"]) - -DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "edbeeching/decision-transformer-gym-hopper-medium": "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" - } -) - -DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["edbeeching/decision-transformer-gym-hopper-medium"] -) - -DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json"} -) - -DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["sensetime/deformable-detr"]) - -DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/deit-base-distilled-patch16-224": "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" - } -) - -DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/deit-base-distilled-patch16-224"]) - -TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/deit-base-distilled-patch16-224"]) - -MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json"} -) - -MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["speechbrain/m-ctc-t-large"]) - -OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json"} -) - -RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "yjernite/retribert-base-uncased": "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" - } -) - -RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["yjernite/retribert-base-uncased"]) - -TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" - } -) - -TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["CarlCochet/trajectory-transformer-halfcheetah-medium-v2"] -) - -TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/config.json"} -) - -TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["transfo-xl/transfo-xl-wt103"]) - -TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["transfo-xl/transfo-xl-wt103"]) - -VAN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "Visual-Attention-Network/van-base": "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" - } -) - -VAN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Visual-Attention-Network/van-base"]) - -DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "LiheYoung/depth-anything-small-hf": "https://huggingface.co/LiheYoung/depth-anything-small-hf/resolve/main/config.json" - } -) - -DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["LiheYoung/depth-anything-small-hf"]) - -DETA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json"} -) - -DETA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["jozhang97/deta-swin-large-o365"]) - -DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json"} -) - -DETR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/detr-resnet-50"]) - -DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json"} -) - -DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["shi-labs/dinat-mini-in1k-224"]) - -DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/dinov2-base": "https://huggingface.co/facebook/dinov2-base/resolve/main/config.json"} -) - -DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/dinov2-base"]) - -DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json", - "distilbert-base-uncased-distilled-squad": "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json", - "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json", - "distilbert-base-cased-distilled-squad": "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json", - "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json", - "distilbert-base-multilingual-cased": "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json", - "distilbert-base-uncased-finetuned-sst-2-english": "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json", - } -) - -DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "distilbert-base-uncased", - "distilbert-base-uncased-distilled-squad", - "distilbert-base-cased", - "distilbert-base-cased-distilled-squad", - "distilbert-base-german-cased", - "distilbert-base-multilingual-cased", - "distilbert-base-uncased-finetuned-sst-2-english", - ] -) - -TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "distilbert-base-uncased", - "distilbert-base-uncased-distilled-squad", - "distilbert-base-cased", - "distilbert-base-cased-distilled-squad", - "distilbert-base-multilingual-cased", - "distilbert-base-uncased-finetuned-sst-2-english", - ] -) - -DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json"} -) - -DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["naver-clova-ix/donut-base"]) - -DPR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/dpr-ctx_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json", - "facebook/dpr-question_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json", - "facebook/dpr-reader-single-nq-base": "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json", - "facebook/dpr-ctx_encoder-multiset-base": "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json", - "facebook/dpr-question_encoder-multiset-base": "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json", - "facebook/dpr-reader-multiset-base": "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json", - } -) - -DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base"] -) - -DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/dpr-question_encoder-single-nq-base", "facebook/dpr-question_encoder-multiset-base"] -) - -DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/dpr-reader-single-nq-base", "facebook/dpr-reader-multiset-base"] -) - -TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base"] -) - -TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/dpr-question_encoder-single-nq-base", "facebook/dpr-question_encoder-multiset-base"] -) - -TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/dpr-reader-single-nq-base", "facebook/dpr-reader-multiset-base"] -) - -DPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json"} -) - -DPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Intel/dpt-large", "Intel/dpt-hybrid-midas"]) - -EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "snap-research/efficientformer-l1-300": "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" - } -) - -EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["snap-research/efficientformer-l1-300"]) - -TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["snap-research/efficientformer-l1-300"]) - -EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json"} -) - -EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/efficientnet-b7"]) - -ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/config.json", - "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/config.json", - "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/config.json", - "google/electra-small-discriminator": "https://huggingface.co/google/electra-small-discriminator/resolve/main/config.json", - "google/electra-base-discriminator": "https://huggingface.co/google/electra-base-discriminator/resolve/main/config.json", - "google/electra-large-discriminator": "https://huggingface.co/google/electra-large-discriminator/resolve/main/config.json", - } -) - -ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/electra-small-generator", - "google/electra-base-generator", - "google/electra-large-generator", - "google/electra-small-discriminator", - "google/electra-base-discriminator", - "google/electra-large-discriminator", - ] -) - -TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/electra-small-generator", - "google/electra-base-generator", - "google/electra-large-generator", - "google/electra-small-discriminator", - "google/electra-base-discriminator", - "google/electra-large-discriminator", - ] -) - -ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json", - "facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json", - } -) - -ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/encodec_24khz", "facebook/encodec_48khz"]) - -ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "nghuyong/ernie-1.0-base-zh": "https://huggingface.co/nghuyong/ernie-1.0-base-zh/resolve/main/config.json", - "nghuyong/ernie-2.0-base-en": "https://huggingface.co/nghuyong/ernie-2.0-base-en/resolve/main/config.json", - "nghuyong/ernie-2.0-large-en": "https://huggingface.co/nghuyong/ernie-2.0-large-en/resolve/main/config.json", - "nghuyong/ernie-3.0-base-zh": "https://huggingface.co/nghuyong/ernie-3.0-base-zh/resolve/main/config.json", - "nghuyong/ernie-3.0-medium-zh": "https://huggingface.co/nghuyong/ernie-3.0-medium-zh/resolve/main/config.json", - "nghuyong/ernie-3.0-mini-zh": "https://huggingface.co/nghuyong/ernie-3.0-mini-zh/resolve/main/config.json", - "nghuyong/ernie-3.0-micro-zh": "https://huggingface.co/nghuyong/ernie-3.0-micro-zh/resolve/main/config.json", - "nghuyong/ernie-3.0-nano-zh": "https://huggingface.co/nghuyong/ernie-3.0-nano-zh/resolve/main/config.json", - "nghuyong/ernie-gram-zh": "https://huggingface.co/nghuyong/ernie-gram-zh/resolve/main/config.json", - "nghuyong/ernie-health-zh": "https://huggingface.co/nghuyong/ernie-health-zh/resolve/main/config.json", - } -) - -ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "nghuyong/ernie-1.0-base-zh", - "nghuyong/ernie-2.0-base-en", - "nghuyong/ernie-2.0-large-en", - "nghuyong/ernie-3.0-base-zh", - "nghuyong/ernie-3.0-medium-zh", - "nghuyong/ernie-3.0-mini-zh", - "nghuyong/ernie-3.0-micro-zh", - "nghuyong/ernie-3.0-nano-zh", - "nghuyong/ernie-gram-zh", - "nghuyong/ernie-health-zh", - ] -) - -ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json", - "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json", - } -) - -ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["susnato/ernie-m-base_pytorch", "susnato/ernie-m-large_pytorch"] -) - -ESM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json"} -) - -ESM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/esm2_t6_8M_UR50D", "facebook/esm2_t12_35M_UR50D"]) - -FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", - "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", - } -) - -FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "tiiuae/falcon-40b", - "tiiuae/falcon-40b-instruct", - "tiiuae/falcon-7b", - "tiiuae/falcon-7b-instruct", - "tiiuae/falcon-rw-7b", - "tiiuae/falcon-rw-1b", - ] -) - -FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "espnet/fastspeech2_conformer_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_hifigan/raw/main/config.json" - } -) - -FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/config.json"} -) - -FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "espnet/fastspeech2_conformer_with_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_with_hifigan/raw/main/config.json" - } -) - -FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["espnet/fastspeech2_conformer"]) - -FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/config.json", - "flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/config.json", - "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/config.json", - "flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/config.json", - } -) - -FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "flaubert/flaubert_small_cased", - "flaubert/flaubert_base_uncased", - "flaubert/flaubert_base_cased", - "flaubert/flaubert_large_cased", - ] -) - -TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([]) - -FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/flava-full": "https://huggingface.co/facebook/flava-full/resolve/main/config.json"} -) - -FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/flava-full"]) - -FNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", - "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json", - } -) - -FNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/fnet-base", "google/fnet-large"]) - -FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json"} -) - -FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/focalnet-tiny"]) - -FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({}) - -FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", - "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", - "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", - "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", - "funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json", - "funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json", - "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", - "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", - "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", - "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", - } -) - -FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "funnel-transformer/small", - "funnel-transformer/small-base", - "funnel-transformer/medium", - "funnel-transformer/medium-base", - "funnel-transformer/intermediate", - "funnel-transformer/intermediate-base", - "funnel-transformer/large", - "funnel-transformer/large-base", - "funnel-transformer/xlarge-base", - "funnel-transformer/xlarge", - ] -) - -TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "funnel-transformer/small", - "funnel-transformer/small-base", - "funnel-transformer/medium", - "funnel-transformer/medium-base", - "funnel-transformer/intermediate", - "funnel-transformer/intermediate-base", - "funnel-transformer/large", - "funnel-transformer/large-base", - "funnel-transformer/xlarge-base", - "funnel-transformer/xlarge", - ] -) - -FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"adept/fuyu-8b": "https://huggingface.co/adept/fuyu-8b/resolve/main/config.json"} -) - -GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({}) - -GIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json"} -) - -GIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/git-base"]) - -GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json"} -) - -GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["vinvino02/glpn-kitti"]) - -GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "openai-community/gpt2": "https://huggingface.co/openai-community/gpt2/resolve/main/config.json", - "openai-community/gpt2-medium": "https://huggingface.co/openai-community/gpt2-medium/resolve/main/config.json", - "openai-community/gpt2-large": "https://huggingface.co/openai-community/gpt2-large/resolve/main/config.json", - "openai-community/gpt2-xl": "https://huggingface.co/openai-community/gpt2-xl/resolve/main/config.json", - "distilbert/distilgpt2": "https://huggingface.co/distilbert/distilgpt2/resolve/main/config.json", - } -) - -GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "openai-community/gpt2", - "openai-community/gpt2-medium", - "openai-community/gpt2-large", - "openai-community/gpt2-xl", - "distilbert/distilgpt2", - ] -) - -TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "openai-community/gpt2", - "openai-community/gpt2-medium", - "openai-community/gpt2-large", - "openai-community/gpt2-xl", - "distilbert/distilgpt2", - ] -) - -GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json" - } -) - -GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["bigcode/gpt_bigcode-santacoder"]) - -GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json"} -) - -GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["EleutherAI/gpt-neo-1.3B"]) - -GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json"} -) - -GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["EleutherAI/gpt-neox-20b"]) - -GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json"} -) - -GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json"] -) - -GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json"} -) - -GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["EleutherAI/gpt-j-6B"]) - -GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "tanreinama/GPTSAN-2.8B-spout_is_uniform": "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json" - } -) - -GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Tanrei/GPTSAN-japanese"]) - -GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json"} -) - -GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["clefourrier/graphormer-base-pcqm4mv1", "clefourrier/graphormer-base-pcqm4mv2"] -) - -GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"nvidia/groupvit-gcc-yfcc": "https://huggingface.co/nvidia/groupvit-gcc-yfcc/resolve/main/config.json"} -) - -GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/groupvit-gcc-yfcc"]) - -TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/groupvit-gcc-yfcc"]) - -HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/hubert-base-ls960": "https://huggingface.co/facebook/hubert-base-ls960/resolve/main/config.json"} -) - -HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/hubert-base-ls960"]) - -TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/hubert-base-ls960"]) - -IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", - "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", - "kssteven/ibert-roberta-large-mnli": "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json", - } -) - -IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["kssteven/ibert-roberta-base", "kssteven/ibert-roberta-large", "kssteven/ibert-roberta-large-mnli"] -) - -IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "HuggingFaceM4/idefics-9b": "https://huggingface.co/HuggingFaceM4/idefics-9b/blob/main/config.json", - "HuggingFaceM4/idefics-80b": "https://huggingface.co/HuggingFaceM4/idefics-80b/blob/main/config.json", - } -) - -IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["HuggingFaceM4/idefics-9b", "HuggingFaceM4/idefics-80b"]) - -IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": ""} -) - -IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["openai/imagegpt-small", "openai/imagegpt-medium", "openai/imagegpt-large"] -) - -INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "huggingface/informer-tourism-monthly": "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" - } -) - -INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["huggingface/informer-tourism-monthly"]) - -INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json" - } -) - -INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/instructblip-flan-t5-xl"]) - -JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "openai/jukebox-5b-lyrics": "https://huggingface.co/openai/jukebox-5b-lyrics/blob/main/config.json", - "openai/jukebox-1b-lyrics": "https://huggingface.co/openai/jukebox-1b-lyrics/blob/main/config.json", - } -) - -JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/jukebox-1b-lyrics", "openai/jukebox-5b-lyrics"]) - -KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/kosmos-2-patch14-224": "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/config.json" - } -) - -KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/kosmos-2-patch14-224"]) - -LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/layoutlm-base-uncased": "https://huggingface.co/microsoft/layoutlm-base-uncased/resolve/main/config.json", - "microsoft/layoutlm-large-uncased": "https://huggingface.co/microsoft/layoutlm-large-uncased/resolve/main/config.json", - } -) - -LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["layoutlm-base-uncased", "layoutlm-large-uncased"]) - -TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["microsoft/layoutlm-base-uncased", "microsoft/layoutlm-large-uncased"] -) - -LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.json", - "layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json", - } -) - -LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["microsoft/layoutlmv2-base-uncased", "microsoft/layoutlmv2-large-uncased"] -) - -LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json"} -) - -LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/layoutlmv3-base", "microsoft/layoutlmv3-large"]) - -TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["microsoft/layoutlmv3-base", "microsoft/layoutlmv3-large"] -) - -LED_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/config.json"} -) - -LED_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["allenai/led-base-16384"]) - -LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json"} -) - -LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/levit-128S"]) - -LILT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "SCUT-DLVCLab/lilt-roberta-en-base": "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" - } -) - -LILT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["SCUT-DLVCLab/lilt-roberta-en-base"]) - -LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({}) - -LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"llava-hf/llava-v1.5-7b": "https://huggingface.co/llava-hf/llava-v1.5-7b/resolve/main/config.json"} -) - -LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["llava-hf/llava-1.5-7b-hf", "llava-hf/llava-1.5-13b-hf", "llava-hf/bakLlava-v1-hf"] -) - -LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", - "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", - "allenai/longformer-large-4096-finetuned-triviaqa": "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json", - "allenai/longformer-base-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json", - "allenai/longformer-large-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json", - } -) - -LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "allenai/longformer-base-4096", - "allenai/longformer-large-4096", - "allenai/longformer-large-4096-finetuned-triviaqa", - "allenai/longformer-base-4096-extra.pos.embd.only", - "allenai/longformer-large-4096-extra.pos.embd.only", - ] -) - -TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "allenai/longformer-base-4096", - "allenai/longformer-large-4096", - "allenai/longformer-large-4096-finetuned-triviaqa", - "allenai/longformer-base-4096-extra.pos.embd.only", - "allenai/longformer-large-4096-extra.pos.embd.only", - ] -) - -LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/long-t5-local-base": "https://huggingface.co/google/long-t5-local-base/blob/main/config.json", - "google/long-t5-local-large": "https://huggingface.co/google/long-t5-local-large/blob/main/config.json", - "google/long-t5-tglobal-base": "https://huggingface.co/google/long-t5-tglobal-base/blob/main/config.json", - "google/long-t5-tglobal-large": "https://huggingface.co/google/long-t5-tglobal-large/blob/main/config.json", - } -) - -LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/long-t5-local-base", - "google/long-t5-local-large", - "google/long-t5-tglobal-base", - "google/long-t5-tglobal-large", - ] -) - -LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", - "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", - } -) - -LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["studio-ousia/luke-base", "studio-ousia/luke-large"]) - -LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json"} -) - -TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["unc-nlp/lxmert-base-uncased"]) - -M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/config.json"} -) - -M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/m2m100_418M"]) - -MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"state-spaces/mamba-2.8b": "https://huggingface.co/state-spaces/mamba-2.8b/resolve/main/config.json"} -) - -MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([]) - -MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", - "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", - } -) - -MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/markuplm-base", "microsoft/markuplm-large"]) - -MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/mask2former-swin-small-coco-instance": "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" - } -) - -MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/mask2former-swin-small-coco-instance"]) - -MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/maskformer-swin-base-ade": "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" - } -) - -MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/maskformer-swin-base-ade"]) - -MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"mnaylor/mega-base-wikitext": "https://huggingface.co/mnaylor/mega-base-wikitext/resolve/main/config.json"} -) - -MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["mnaylor/mega-base-wikitext"]) - -MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({}) - -MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/megatron-bert-cased-345m"]) - -MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json"} -) - -MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["alibaba-damo/mgp-str-base"]) - -MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json", - "mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json", - } -) - -MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"mistral-ai/Mixtral-8x7B": "https://huggingface.co/mistral-ai/Mixtral-8x7B/resolve/main/config.json"} -) - -MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/config.json"} -) - -MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/mobilebert-uncased"]) - -TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/mobilebert-uncased"]) - -MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json", - "google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json", - } -) - -MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192"] -) - -MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json", - "google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json", - "google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json", - "google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json", - } -) - -MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/mobilenet_v2_1.4_224", - "google/mobilenet_v2_1.0_224", - "google/mobilenet_v2_0.37_160", - "google/mobilenet_v2_0.35_96", - ] -) - -MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "apple/mobilevit-small": "https://huggingface.co/apple/mobilevit-small/resolve/main/config.json", - "apple/mobilevit-x-small": "https://huggingface.co/apple/mobilevit-x-small/resolve/main/config.json", - "apple/mobilevit-xx-small": "https://huggingface.co/apple/mobilevit-xx-small/resolve/main/config.json", - "apple/deeplabv3-mobilevit-small": "https://huggingface.co/apple/deeplabv3-mobilevit-small/resolve/main/config.json", - "apple/deeplabv3-mobilevit-x-small": "https://huggingface.co/apple/deeplabv3-mobilevit-x-small/resolve/main/config.json", - "apple/deeplabv3-mobilevit-xx-small": "https://huggingface.co/apple/deeplabv3-mobilevit-xx-small/resolve/main/config.json", - } -) - -MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "apple/mobilevit-small", - "apple/mobilevit-x-small", - "apple/mobilevit-xx-small", - "apple/deeplabv3-mobilevit-small", - "apple/deeplabv3-mobilevit-x-small", - "apple/deeplabv3-mobilevit-xx-small", - ] -) - -TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "apple/mobilevit-small", - "apple/mobilevit-x-small", - "apple/mobilevit-xx-small", - "apple/deeplabv3-mobilevit-small", - "apple/deeplabv3-mobilevit-x-small", - "apple/deeplabv3-mobilevit-xx-small", - ] -) - -MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"apple/mobilevitv2-1.0": "https://huggingface.co/apple/mobilevitv2-1.0/resolve/main/config.json"} -) - -MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["apple/mobilevitv2-1.0-imagenet1k-256"]) - -MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/config.json"} -) - -MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/mpnet-base"]) - -TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/mpnet-base"]) - -MPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"mosaicml/mpt-7b": "https://huggingface.co/mosaicml/mpt-7b/resolve/main/config.json"} -) - -MPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "mosaicml/mpt-7b", - "mosaicml/mpt-7b-storywriter", - "mosaicml/mpt-7b-instruct", - "mosaicml/mpt-7b-8k", - "mosaicml/mpt-7b-8k-instruct", - "mosaicml/mpt-7b-8k-chat", - "mosaicml/mpt-30b", - "mosaicml/mpt-30b-instruct", - "mosaicml/mpt-30b-chat", - ] -) - -MRA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json"} -) - -MRA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["uw-madison/mra-base-512-4"]) - -MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/musicgen-small": "https://huggingface.co/facebook/musicgen-small/resolve/main/config.json"} -) - -MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/musicgen-small"]) - -MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/musicgen-melody": "https://huggingface.co/facebook/musicgen-melody/resolve/main/config.json"} -) - -MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/musicgen-melody"]) - -MVP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "RUCAIBox/mvp", - "RUCAIBox/mvp-data-to-text", - "RUCAIBox/mvp-open-dialog", - "RUCAIBox/mvp-question-answering", - "RUCAIBox/mvp-question-generation", - "RUCAIBox/mvp-story", - "RUCAIBox/mvp-summarization", - "RUCAIBox/mvp-task-dialog", - "RUCAIBox/mtl-data-to-text", - "RUCAIBox/mtl-multi-task", - "RUCAIBox/mtl-open-dialog", - "RUCAIBox/mtl-question-answering", - "RUCAIBox/mtl-question-generation", - "RUCAIBox/mtl-story", - "RUCAIBox/mtl-summarization", - ] -) - -NAT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json"} -) - -NAT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["shi-labs/nat-mini-in1k-224"]) - -NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json"} -) - -NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["sijunhe/nezha-cn-base", "sijunhe/nezha-cn-large", "sijunhe/nezha-base-wwm", "sijunhe/nezha-large-wwm"] -) - -NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json"} -) - -NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/nllb-moe-54b"]) - -NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"uw-madison/nystromformer-512": "https://huggingface.co/uw-madison/nystromformer-512/resolve/main/config.json"} -) - -NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["uw-madison/nystromformer-512"]) - -OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "allenai/OLMo-1B-hf": "https://huggingface.co/allenai/OLMo-1B-hf/resolve/main/config.json", - "allenai/OLMo-7B-hf": "https://huggingface.co/allenai/OLMo-7B-hf/resolve/main/config.json", - "allenai/OLMo-7B-Twin-2T-hf": "https://huggingface.co/allenai/OLMo-7B-Twin-2T-hf/resolve/main/config.json", - } -) - -ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "shi-labs/oneformer_ade20k_swin_tiny": "https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny/blob/main/config.json" - } -) - -ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["shi-labs/oneformer_ade20k_swin_tiny"]) - -OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"openai-community/openai-gpt": "https://huggingface.co/openai-community/openai-gpt/resolve/main/config.json"} -) - -OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai-community/openai-gpt"]) - -TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai-community/openai-gpt"]) - -OPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "facebook/opt-125m", - "facebook/opt-350m", - "facebook/opt-1.3b", - "facebook/opt-2.7b", - "facebook/opt-6.7b", - "facebook/opt-13b", - "facebook/opt-30b", - ] -) - -OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/owlv2-base-patch16": "https://huggingface.co/google/owlv2-base-patch16/resolve/main/config.json"} -) - -OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/owlv2-base-patch16-ensemble"]) - -OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json", - "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json", - "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json", - } -) - -OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["google/owlvit-base-patch32", "google/owlvit-base-patch16", "google/owlvit-large-patch14"] -) - -PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "ibm/patchtsmixer-etth1-pretrain": "https://huggingface.co/ibm/patchtsmixer-etth1-pretrain/resolve/main/config.json" - } -) - -PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["ibm/patchtsmixer-etth1-pretrain"]) - -PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"ibm/patchtst-base": "https://huggingface.co/ibm/patchtst-base/resolve/main/config.json"} -) - -PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["ibm/patchtst-etth1-pretrain"]) - -PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json"} -) - -PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/pegasus-x-base": "https://huggingface.co/google/pegasus-x-base/resolve/main/config.json", - "google/pegasus-x-large": "https://huggingface.co/google/pegasus-x-large/resolve/main/config.json", - } -) - -PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/pegasus-x-base", "google/pegasus-x-large"]) - -PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json"} -) - -PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["deepmind/language-perceiver"]) - -PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"adept/persimmon-8b-base": "https://huggingface.co/adept/persimmon-8b-base/resolve/main/config.json"} -) - -PHI_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/phi-1": "https://huggingface.co/microsoft/phi-1/resolve/main/config.json", - "microsoft/phi-1_5": "https://huggingface.co/microsoft/phi-1_5/resolve/main/config.json", - "microsoft/phi-2": "https://huggingface.co/microsoft/phi-2/resolve/main/config.json", - } -) - -PHI_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/phi-1", "microsoft/phi-1_5", "microsoft/phi-2"]) - -PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/pix2struct-textcaps-base": "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" - } -) - -PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/pix2struct-textcaps-base", - "google/pix2struct-textcaps-large", - "google/pix2struct-base", - "google/pix2struct-large", - "google/pix2struct-ai2d-base", - "google/pix2struct-ai2d-large", - "google/pix2struct-widget-captioning-base", - "google/pix2struct-widget-captioning-large", - "google/pix2struct-screen2words-base", - "google/pix2struct-screen2words-large", - "google/pix2struct-docvqa-base", - "google/pix2struct-docvqa-large", - "google/pix2struct-ocrvqa-base", - "google/pix2struct-ocrvqa-large", - "google/pix2struct-chartqa-base", - "google/pix2struct-inforgraphics-vqa-base", - "google/pix2struct-inforgraphics-vqa-large", - ] -) - -PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"uclanlp/plbart-base": "https://huggingface.co/uclanlp/plbart-base/resolve/main/config.json"} -) - -PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["uclanlp/plbart-base", "uclanlp/plbart-cs-java", "uclanlp/plbart-multi_task-all"] -) - -POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json"} -) - -POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["sail/poolformer_s12"]) - -POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"sweetcocoa/pop2piano": "https://huggingface.co/sweetcocoa/pop2piano/blob/main/config.json"} -) - -POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["sweetcocoa/pop2piano"]) - -PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/prophetnet-large-uncased": "https://huggingface.co/microsoft/prophetnet-large-uncased/resolve/main/config.json" - } -) - -PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/prophetnet-large-uncased"]) - -PVT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({"pvt-tiny-224": "https://huggingface.co/Zetatech/pvt-tiny-224"}) - -PVT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Zetatech/pvt-tiny-224"]) - -QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/config.json"} -) - -QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google-bert/bert-base-uncased"]) - -QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json"} -) - -REALM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json", - "google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json", - "google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json", - "google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json", - "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json", - "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json", - "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json", - "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json", - } -) - -REALM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/realm-cc-news-pretrained-embedder", - "google/realm-cc-news-pretrained-encoder", - "google/realm-cc-news-pretrained-scorer", - "google/realm-cc-news-pretrained-openqa", - "google/realm-orqa-nq-openqa", - "google/realm-orqa-nq-reader", - "google/realm-orqa-wq-openqa", - "google/realm-orqa-wq-reader", - ] -) - -REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/config.json", - "google/reformer-enwik8": "https://huggingface.co/google/reformer-enwik8/resolve/main/config.json", - } -) - -REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["google/reformer-crime-and-punishment", "google/reformer-enwik8"] -) - -REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/regnet-y-040": "https://huggingface.co/facebook/regnet-y-040/blob/main/config.json"} -) - -REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/regnet-y-040"]) - -TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/regnet-y-040"]) - -REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/rembert": "https://huggingface.co/google/rembert/resolve/main/config.json"} -) - -REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/rembert"]) - -TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/rembert"]) - -RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json"} -) - -RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/resnet-50"]) - -TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/resnet-50"]) - -ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "FacebookAI/roberta-base": "https://huggingface.co/FacebookAI/roberta-base/resolve/main/config.json", - "FacebookAI/roberta-large": "https://huggingface.co/FacebookAI/roberta-large/resolve/main/config.json", - "FacebookAI/roberta-large-mnli": "https://huggingface.co/FacebookAI/roberta-large-mnli/resolve/main/config.json", - "distilbert/distilroberta-base": "https://huggingface.co/distilbert/distilroberta-base/resolve/main/config.json", - "openai-community/roberta-base-openai-detector": "https://huggingface.co/openai-community/roberta-base-openai-detector/resolve/main/config.json", - "openai-community/roberta-large-openai-detector": "https://huggingface.co/openai-community/roberta-large-openai-detector/resolve/main/config.json", - } -) - -ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "FacebookAI/roberta-base", - "FacebookAI/roberta-large", - "FacebookAI/roberta-large-mnli", - "distilbert/distilroberta-base", - "openai-community/roberta-base-openai-detector", - "openai-community/roberta-large-openai-detector", - ] -) - -TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "FacebookAI/roberta-base", - "FacebookAI/roberta-large", - "FacebookAI/roberta-large-mnli", - "distilbert/distilroberta-base", - ] -) - -ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "andreasmadsen/efficient_mlm_m0.40": "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json" - } -) - -ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "andreasmadsen/efficient_mlm_m0.15", - "andreasmadsen/efficient_mlm_m0.20", - "andreasmadsen/efficient_mlm_m0.30", - "andreasmadsen/efficient_mlm_m0.40", - "andreasmadsen/efficient_mlm_m0.50", - "andreasmadsen/efficient_mlm_m0.60", - "andreasmadsen/efficient_mlm_m0.70", - "andreasmadsen/efficient_mlm_m0.80", - ] -) - -TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "andreasmadsen/efficient_mlm_m0.15", - "andreasmadsen/efficient_mlm_m0.20", - "andreasmadsen/efficient_mlm_m0.30", - "andreasmadsen/efficient_mlm_m0.40", - "andreasmadsen/efficient_mlm_m0.50", - "andreasmadsen/efficient_mlm_m0.60", - "andreasmadsen/efficient_mlm_m0.70", - "andreasmadsen/efficient_mlm_m0.80", - ] -) - -ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json"} -) - -ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["weiweishi/roc-bert-base-zh"]) - -ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", - "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", - "junnyu/roformer_chinese_char_small": "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json", - "junnyu/roformer_chinese_char_base": "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json", - "junnyu/roformer_small_discriminator": "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json", - "junnyu/roformer_small_generator": "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json", - } -) - -ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "junnyu/roformer_chinese_small", - "junnyu/roformer_chinese_base", - "junnyu/roformer_chinese_char_small", - "junnyu/roformer_chinese_char_base", - "junnyu/roformer_small_discriminator", - "junnyu/roformer_small_generator", - ] -) - -TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "junnyu/roformer_chinese_small", - "junnyu/roformer_chinese_base", - "junnyu/roformer_chinese_char_small", - "junnyu/roformer_chinese_char_base", - "junnyu/roformer_small_discriminator", - "junnyu/roformer_small_generator", - ] -) - -RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", - "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", - "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", - "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", - "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", - "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", - "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", - "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", - "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", - "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", - } -) - -RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "RWKV/rwkv-4-169m-pile", - "RWKV/rwkv-4-430m-pile", - "RWKV/rwkv-4-1b5-pile", - "RWKV/rwkv-4-3b-pile", - "RWKV/rwkv-4-7b-pile", - "RWKV/rwkv-4-14b-pile", - "RWKV/rwkv-raven-1b5", - "RWKV/rwkv-raven-3b", - "RWKV/rwkv-raven-7b", - "RWKV/rwkv-raven-14b", - ] -) - -SAM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/sam-vit-huge": "https://huggingface.co/facebook/sam-vit-huge/resolve/main/config.json", - "facebook/sam-vit-large": "https://huggingface.co/facebook/sam-vit-large/resolve/main/config.json", - "facebook/sam-vit-base": "https://huggingface.co/facebook/sam-vit-base/resolve/main/config.json", - } -) - -SAM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/sam-vit-huge", "facebook/sam-vit-large", "facebook/sam-vit-base"] -) - -TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["facebook/sam-vit-huge", "facebook/sam-vit-large", "facebook/sam-vit-base"] -) - -SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/hf-seamless-m4t-medium": "https://huggingface.co/facebook/hf-seamless-m4t-medium/resolve/main/config.json" - } -) - -SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/hf-seamless-m4t-medium"]) - -SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"": "https://huggingface.co//resolve/main/config.json"} -) - -SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/seamless-m4t-v2-large"]) - -SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "nvidia/segformer-b0-finetuned-ade-512-512": "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" - } -) - -SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/segformer-b0-finetuned-ade-512-512"]) - -TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/segformer-b0-finetuned-ade-512-512"]) - -SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"BAAI/seggpt-vit-large": "https://huggingface.co/BAAI/seggpt-vit-large/resolve/main/config.json"} -) - -SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["BAAI/seggpt-vit-large"]) - -SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json"} -) - -SEW_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["asapp/sew-tiny-100k", "asapp/sew-small-100k", "asapp/sew-mid-100k"] -) - -SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json"} -) - -SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "asapp/sew-d-tiny-100k", - "asapp/sew-d-small-100k", - "asapp/sew-d-mid-100k", - "asapp/sew-d-mid-k127-100k", - "asapp/sew-d-base-100k", - "asapp/sew-d-base-plus-100k", - "asapp/sew-d-mid-400k", - "asapp/sew-d-mid-k127-400k", - "asapp/sew-d-base-plus-400k", - ] -) - -SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/config.json" - } -) - -SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/siglip-base-patch16-224"]) - -SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/s2t-small-librispeech-asr": "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" - } -) - -SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/s2t-small-librispeech-asr"]) - -TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/s2t-small-librispeech-asr"]) - -SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/s2t-wav2vec2-large-en-de": "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json" - } -) - -SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/config.json", - "microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/config.json", - "microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/config.json", - } -) - -SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json"} -) - -SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["microsoft/speecht5_asr", "microsoft/speecht5_tts", "microsoft/speecht5_vc"] -) - -SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/config.json", - "tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/config.json", - "tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/config.json", - "tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/config.json", - } -) - -SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["tau/splinter-base", "tau/splinter-base-qass", "tau/splinter-large", "tau/splinter-large-qass"] -) - -SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "squeezebert/squeezebert-uncased": "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json", - "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json", - "squeezebert/squeezebert-mnli-headless": "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json", - } -) - -SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["squeezebert/squeezebert-uncased", "squeezebert/squeezebert-mnli", "squeezebert/squeezebert-mnli-headless"] -) - -STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"stabilityai/stablelm-3b-4e1t": "https://huggingface.co/stabilityai/stablelm-3b-4e1t/resolve/main/config.json"} -) - -STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({}) - -SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"MBZUAI/swiftformer-xs": "https://huggingface.co/MBZUAI/swiftformer-xs/resolve/main/config.json"} -) - -SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["MBZUAI/swiftformer-xs"]) - -SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/swin-tiny-patch4-window7-224": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" - } -) - -SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/swin-tiny-patch4-window7-224"]) - -TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/swin-tiny-patch4-window7-224"]) - -SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "caidas/swin2sr-classicalsr-x2-64": "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" - } -) - -SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["caidas/swin2SR-classical-sr-x2-64"]) - -SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/swinv2-tiny-patch4-window8-256": "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" - } -) - -SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/swinv2-tiny-patch4-window8-256"]) - -SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json"} -) - -SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/switch-base-8", - "google/switch-base-16", - "google/switch-base-32", - "google/switch-base-64", - "google/switch-base-128", - "google/switch-base-256", - "google/switch-large-128", - "google/switch-xxl-128", - "google/switch-c-2048", - ] -) - -T5_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google-t5/t5-small": "https://huggingface.co/google-t5/t5-small/resolve/main/config.json", - "google-t5/t5-base": "https://huggingface.co/google-t5/t5-base/resolve/main/config.json", - "google-t5/t5-large": "https://huggingface.co/google-t5/t5-large/resolve/main/config.json", - "google-t5/t5-3b": "https://huggingface.co/google-t5/t5-3b/resolve/main/config.json", - "google-t5/t5-11b": "https://huggingface.co/google-t5/t5-11b/resolve/main/config.json", - } -) - -T5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b"] -) - -TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b"] -) - -TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/table-transformer-detection": "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" - } -) - -TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/table-transformer-detection"]) - -TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/tapas-base-finetuned-sqa": "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json", - "google/tapas-base-finetuned-wtq": "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json", - "google/tapas-base-finetuned-wikisql-supervised": "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json", - "google/tapas-base-finetuned-tabfact": "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json", - } -) - -TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/tapas-large", - "google/tapas-large-finetuned-sqa", - "google/tapas-large-finetuned-wtq", - "google/tapas-large-finetuned-wikisql-supervised", - "google/tapas-large-finetuned-tabfact", - "google/tapas-base", - "google/tapas-base-finetuned-sqa", - "google/tapas-base-finetuned-wtq", - "google/tapas-base-finetuned-wikisql-supervised", - "google/tapas-base-finetuned-tabfact", - "google/tapas-small", - "google/tapas-small-finetuned-sqa", - "google/tapas-small-finetuned-wtq", - "google/tapas-small-finetuned-wikisql-supervised", - "google/tapas-small-finetuned-tabfact", - "google/tapas-mini", - "google/tapas-mini-finetuned-sqa", - "google/tapas-mini-finetuned-wtq", - "google/tapas-mini-finetuned-wikisql-supervised", - "google/tapas-mini-finetuned-tabfact", - "google/tapas-tiny", - "google/tapas-tiny-finetuned-sqa", - "google/tapas-tiny-finetuned-wtq", - "google/tapas-tiny-finetuned-wikisql-supervised", - "google/tapas-tiny-finetuned-tabfact", - ] -) - -TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "google/tapas-large", - "google/tapas-large-finetuned-sqa", - "google/tapas-large-finetuned-wtq", - "google/tapas-large-finetuned-wikisql-supervised", - "google/tapas-large-finetuned-tabfact", - "google/tapas-base", - "google/tapas-base-finetuned-sqa", - "google/tapas-base-finetuned-wtq", - "google/tapas-base-finetuned-wikisql-supervised", - "google/tapas-base-finetuned-tabfact", - "google/tapas-small", - "google/tapas-small-finetuned-sqa", - "google/tapas-small-finetuned-wtq", - "google/tapas-small-finetuned-wikisql-supervised", - "google/tapas-small-finetuned-tabfact", - "google/tapas-mini", - "google/tapas-mini-finetuned-sqa", - "google/tapas-mini-finetuned-wtq", - "google/tapas-mini-finetuned-wikisql-supervised", - "google/tapas-mini-finetuned-tabfact", - "google/tapas-tiny", - "google/tapas-tiny-finetuned-sqa", - "google/tapas-tiny-finetuned-wtq", - "google/tapas-tiny-finetuned-wikisql-supervised", - "google/tapas-tiny-finetuned-tabfact", - ] -) - -TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "huggingface/time-series-transformer-tourism-monthly": "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" - } -) - -TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["huggingface/time-series-transformer-tourism-monthly"] -) - -TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json"} -) - -TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/timesformer-base-finetuned-k400"]) - -TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/trocr-base-handwritten": "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" - } -) - -TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/trocr-base-handwritten"]) - -TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"ZinengTang/tvlt-base": "https://huggingface.co/ZinengTang/tvlt-base/blob/main/config.json"} -) - -TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["ZinengTang/tvlt-base"]) - -TVP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"Intel/tvp-base": "https://huggingface.co/Intel/tvp-base/resolve/main/config.json"} -) - -TVP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Intel/tvp-base", "Intel/tvp-base-ANet"]) - -UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/config.json"} -) - -UDOP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/udop-large"]) - -UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/unispeech-large-1500h-cv": "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" - } -) - -UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["microsoft/unispeech-large-1500h-cv", "microsoft/unispeech-large-multi-lingual-1500h-cv"] -) - -UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/unispeech-sat-base-100h-libri-ft": "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" - } -) - -UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([]) - -UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"dg845/univnet-dev": "https://huggingface.co/dg845/univnet-dev/resolve/main/config.json"} -) - -UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["dg845/univnet-dev"]) - -VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"MCG-NJU/videomae-base": "https://huggingface.co/MCG-NJU/videomae-base/resolve/main/config.json"} -) - -VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["MCG-NJU/videomae-base"]) - -VILT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"dandelin/vilt-b32-mlm": "https://huggingface.co/dandelin/vilt-b32-mlm/blob/main/config.json"} -) - -VILT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["dandelin/vilt-b32-mlm"]) - -VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"ybelkada/vip-llava-7b-hf": "https://huggingface.co/llava-hf/vip-llava-7b-hf/resolve/main/config.json"} -) - -VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["llava-hf/vip-llava-7b-hf"]) - -VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", - "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", - "uclanlp/visualbert-vqa-coco-pre": "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json", - "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", - "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", - "uclanlp/visualbert-vcr-coco-pre": "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json", - "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", - "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", - "uclanlp/visualbert-nlvr2-coco-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json", - } -) - -VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "uclanlp/visualbert-vqa", - "uclanlp/visualbert-vqa-pre", - "uclanlp/visualbert-vqa-coco-pre", - "uclanlp/visualbert-vcr", - "uclanlp/visualbert-vcr-pre", - "uclanlp/visualbert-vcr-coco-pre", - "uclanlp/visualbert-nlvr2", - "uclanlp/visualbert-nlvr2-pre", - "uclanlp/visualbert-nlvr2-coco-pre", - ] -) - -VIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json"} -) - -VIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/vit-base-patch16-224"]) - -VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"google/vit-hybrid-base-bit-384": "https://huggingface.co/vit-hybrid-base-bit-384/resolve/main/config.json"} -) - -VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/vit-hybrid-base-bit-384"]) - -VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json"} -) - -VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/vit-mae-base"]) - -VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json"} -) - -VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/vit-msn-small"]) - -VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/vit-det-base": "https://huggingface.co/facebook/vit-det-base/resolve/main/config.json"} -) - -VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/vit-det-base"]) - -VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "hustvl/vitmatte-small-composition-1k": "https://huggingface.co/hustvl/vitmatte-small-composition-1k/resolve/main/config.json" - } -) - -VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["hustvl/vitmatte-small-composition-1k"]) - -VITS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/mms-tts-eng": "https://huggingface.co/facebook/mms-tts-eng/resolve/main/config.json"} -) - -VITS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/mms-tts-eng"]) - -VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "google/vivit-b-16x2-kinetics400": "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" - } -) - -VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/vivit-b-16x2-kinetics400"]) - -WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json"} -) - -WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "facebook/wav2vec2-base-960h", - "facebook/wav2vec2-large-960h", - "facebook/wav2vec2-large-960h-lv60", - "facebook/wav2vec2-large-960h-lv60-self", - ] -) - -TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "facebook/wav2vec2-base-960h", - "facebook/wav2vec2-large-960h", - "facebook/wav2vec2-large-960h-lv60", - "facebook/wav2vec2-large-960h-lv60-self", - ] -) - -WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/w2v-bert-2.0": "https://huggingface.co/facebook/w2v-bert-2.0/resolve/main/config.json"} -) - -WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/w2v-bert-2.0"]) - -WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/wav2vec2-conformer-rel-pos-large": "https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large/resolve/main/config.json" - } -) - -WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/wav2vec2-conformer-rel-pos-large"]) - -WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json"} -) - -WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["microsoft/wavlm-base", "microsoft/wavlm-base-plus", "microsoft/wavlm-large"] -) - -WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json"} -) - -WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/whisper-base"]) - -TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/whisper-base"]) - -XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"microsoft/xclip-base-patch32": "https://huggingface.co/microsoft/xclip-base-patch32/resolve/main/config.json"} -) - -XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/xclip-base-patch32"]) - -XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json"} -) - -XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/xglm-564M"]) - -TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/xglm-564M"]) - -XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "FacebookAI/xlm-mlm-en-2048": "https://huggingface.co/FacebookAI/xlm-mlm-en-2048/resolve/main/config.json", - "FacebookAI/xlm-mlm-ende-1024": "https://huggingface.co/FacebookAI/xlm-mlm-ende-1024/resolve/main/config.json", - "FacebookAI/xlm-mlm-enfr-1024": "https://huggingface.co/FacebookAI/xlm-mlm-enfr-1024/resolve/main/config.json", - "FacebookAI/xlm-mlm-enro-1024": "https://huggingface.co/FacebookAI/xlm-mlm-enro-1024/resolve/main/config.json", - "FacebookAI/xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/FacebookAI/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json", - "FacebookAI/xlm-mlm-xnli15-1024": "https://huggingface.co/FacebookAI/xlm-mlm-xnli15-1024/resolve/main/config.json", - "FacebookAI/xlm-clm-enfr-1024": "https://huggingface.co/FacebookAI/xlm-clm-enfr-1024/resolve/main/config.json", - "FacebookAI/xlm-clm-ende-1024": "https://huggingface.co/FacebookAI/xlm-clm-ende-1024/resolve/main/config.json", - "FacebookAI/xlm-mlm-17-1280": "https://huggingface.co/FacebookAI/xlm-mlm-17-1280/resolve/main/config.json", - "FacebookAI/xlm-mlm-100-1280": "https://huggingface.co/FacebookAI/xlm-mlm-100-1280/resolve/main/config.json", - } -) - -XLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "FacebookAI/xlm-mlm-en-2048", - "FacebookAI/xlm-mlm-ende-1024", - "FacebookAI/xlm-mlm-enfr-1024", - "FacebookAI/xlm-mlm-enro-1024", - "FacebookAI/xlm-mlm-tlm-xnli15-1024", - "FacebookAI/xlm-mlm-xnli15-1024", - "FacebookAI/xlm-clm-enfr-1024", - "FacebookAI/xlm-clm-ende-1024", - "FacebookAI/xlm-mlm-17-1280", - "FacebookAI/xlm-mlm-100-1280", - ] -) - -TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "FacebookAI/xlm-mlm-en-2048", - "FacebookAI/xlm-mlm-ende-1024", - "FacebookAI/xlm-mlm-enfr-1024", - "FacebookAI/xlm-mlm-enro-1024", - "FacebookAI/xlm-mlm-tlm-xnli15-1024", - "FacebookAI/xlm-mlm-xnli15-1024", - "FacebookAI/xlm-clm-enfr-1024", - "FacebookAI/xlm-clm-ende-1024", - "FacebookAI/xlm-mlm-17-1280", - "FacebookAI/xlm-mlm-100-1280", - ] -) - -XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "microsoft/xprophetnet-large-wiki100-cased": "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json" - } -) - -XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/xprophetnet-large-wiki100-cased"]) - -XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "FacebookAI/xlm-roberta-base": "https://huggingface.co/FacebookAI/xlm-roberta-base/resolve/main/config.json", - "FacebookAI/xlm-roberta-large": "https://huggingface.co/FacebookAI/xlm-roberta-large/resolve/main/config.json", - "FacebookAI/xlm-roberta-large-finetuned-conll02-dutch": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json", - "FacebookAI/xlm-roberta-large-finetuned-conll02-spanish": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json", - "FacebookAI/xlm-roberta-large-finetuned-conll03-english": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json", - "FacebookAI/xlm-roberta-large-finetuned-conll03-german": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json", - } -) - -XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "FacebookAI/xlm-roberta-base", - "FacebookAI/xlm-roberta-large", - "FacebookAI/xlm-roberta-large-finetuned-conll02-dutch", - "FacebookAI/xlm-roberta-large-finetuned-conll02-spanish", - "FacebookAI/xlm-roberta-large-finetuned-conll03-english", - "FacebookAI/xlm-roberta-large-finetuned-conll03-german", - ] -) - -TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "FacebookAI/xlm-roberta-base", - "FacebookAI/xlm-roberta-large", - "joeddav/xlm-roberta-large-xnli", - "cardiffnlp/twitter-xlm-roberta-base-sentiment", - ] -) - -FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - ["FacebookAI/xlm-roberta-base", "FacebookAI/xlm-roberta-large"] -) - -XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", - "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", - } -) - -XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/xlm-roberta-xl", "facebook/xlm-roberta-xxl"]) - -XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "xlnet/xlnet-base-cased": "https://huggingface.co/xlnet/xlnet-base-cased/resolve/main/config.json", - "xlnet/xlnet-large-cased": "https://huggingface.co/xlnet/xlnet-large-cased/resolve/main/config.json", - } -) - -XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["xlnet/xlnet-base-cased", "xlnet/xlnet-large-cased"]) - -TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["xlnet/xlnet-base-cased", "xlnet/xlnet-large-cased"]) - -XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - { - "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", - "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", - "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", - "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", - "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", - "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", - "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", - "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", - "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", - } -) - -XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList( - [ - "facebook/xmod-base", - "facebook/xmod-large-prenorm", - "facebook/xmod-base-13-125k", - "facebook/xmod-base-30-125k", - "facebook/xmod-base-30-195k", - "facebook/xmod-base-60-125k", - "facebook/xmod-base-60-265k", - "facebook/xmod-base-75-125k", - "facebook/xmod-base-75-269k", - ] -) - -YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json"} -) - -YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["hustvl/yolos-small"]) - -YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict( - {"uw-madison/yoso-4096": "https://huggingface.co/uw-madison/yoso-4096/resolve/main/config.json"} -) - -YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["uw-madison/yoso-4096"]) - - -CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict( - [ - # Add archive maps here) - ("albert", "ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("align", "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("altclip", "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("audio-spectrogram-transformer", "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("autoformer", "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bark", "BARK_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bart", "BART_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("beit", "BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bert", "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("big_bird", "BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bigbird_pegasus", "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("biogpt", "BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bit", "BIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("blenderbot", "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("blenderbot-small", "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("blip", "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("blip-2", "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bloom", "BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bridgetower", "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("bros", "BROS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("camembert", "CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("canine", "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("chinese_clip", "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("clap", "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST"), - ("clip", "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("clipseg", "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("clvp", "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("codegen", "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("conditional_detr", "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("convbert", "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("convnext", "CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("convnextv2", "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("cpmant", "CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("ctrl", "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("cvt", "CVT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("data2vec-audio", "DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("data2vec-text", "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("data2vec-vision", "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("deberta", "DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("deberta-v2", "DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("deformable_detr", "DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("deit", "DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("depth_anything", "DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("deta", "DETA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("detr", "DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("dinat", "DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("dinov2", "DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("distilbert", "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("donut-swin", "DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("dpr", "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("efficientformer", "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("efficientnet", "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("encodec", "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("ernie_m", "ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("esm", "ESM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("falcon", "FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("fastspeech2_conformer", "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("flaubert", "FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("flava", "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("fnet", "FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("focalnet", "FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("fsmt", "FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("funnel", "FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("fuyu", "FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gemma", "GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("git", "GIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("glpn", "GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gpt2", "GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gpt_bigcode", "GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gpt_neo", "GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gpt_neox", "GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gpt_neox_japanese", "GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gptj", "GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("gptsan-japanese", "GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("graphormer", "GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("groupvit", "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("hubert", "HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("ibert", "IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("idefics", "IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("imagegpt", "IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("informer", "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("instructblip", "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("jukebox", "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("kosmos-2", "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("layoutlm", "LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("layoutlmv2", "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("layoutlmv3", "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("led", "LED_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("levit", "LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("lilt", "LILT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("llama", "LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("llava", "LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("longformer", "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("longt5", "LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("luke", "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("lxmert", "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("m2m_100", "M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mamba", "MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("markuplm", "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mask2former", "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("maskformer", "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mbart", "MBART_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mctct", "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mega", "MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("megatron-bert", "MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mgp-str", "MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mistral", "MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mixtral", "MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mobilenet_v1", "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mobilenet_v2", "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mobilevit", "MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mobilevitv2", "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mpnet", "MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mpt", "MPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mra", "MRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("musicgen", "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("mvp", "MVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("nat", "NAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("nezha", "NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("nllb-moe", "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("nystromformer", "NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("oneformer", "ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("olmo", "OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("open-llama", "OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("openai-gpt", "OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("opt", "OPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("owlv2", "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("owlvit", "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("patchtsmixer", "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("patchtst", "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("pegasus", "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("pegasus_x", "PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("perceiver", "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("persimmon", "PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("phi", "PHI_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("pix2struct", "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("plbart", "PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("poolformer", "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("pop2piano", "POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("prophetnet", "PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("pvt", "PVT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("qdqbert", "QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("qwen2", "QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("realm", "REALM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("regnet", "REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("rembert", "REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("resnet", "RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("retribert", "RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("roberta", "ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("roberta-prelayernorm", "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("roc_bert", "ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("roformer", "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("rwkv", "RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("sam", "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("seamless_m4t", "SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("seamless_m4t_v2", "SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("segformer", "SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("seggpt", "SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("sew", "SEW_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("sew-d", "SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("siglip", "SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("speech_to_text", "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("speech_to_text_2", "SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("speecht5", "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("splinter", "SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("squeezebert", "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("stablelm", "STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("starcoder2", "STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("swiftformer", "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("swin", "SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("swin2sr", "SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("swinv2", "SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("switch_transformers", "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("t5", "T5_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("table-transformer", "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("tapas", "TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("time_series_transformer", "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("timesformer", "TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("transfo-xl", "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("tvlt", "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("tvp", "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("udop", "UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("univnet", "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("van", "VAN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("videomae", "VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vilt", "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vipllava", "VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("visual_bert", "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vit", "VIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vit_hybrid", "VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vit_mae", "VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vit_msn", "VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vitdet", "VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vitmatte", "VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vits", "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("vivit", "VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("wav2vec2-bert", "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("whisper", "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xclip", "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xglm", "XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xlm", "XLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xlm-prophetnet", "XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xlm-roberta", "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xlnet", "XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("xmod", "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("yolos", "YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ("yoso", "YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP"), - ] -) diff --git a/src/transformers/models/deprecated/mctct/__init__.py b/src/transformers/models/deprecated/mctct/__init__.py index 567be97b7cd863..4e0a06b1779d2f 100644 --- a/src/transformers/models/deprecated/mctct/__init__.py +++ b/src/transformers/models/deprecated/mctct/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], + "configuration_mctct": ["MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } @@ -30,7 +30,6 @@ pass else: _import_structure["modeling_mctct"] = [ - "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", @@ -38,7 +37,7 @@ if TYPE_CHECKING: - from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig + from .configuration_mctct import MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor @@ -48,7 +47,7 @@ except OptionalDependencyNotAvailable: pass else: - from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel + from .modeling_mctct import MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys diff --git a/src/transformers/models/deprecated/mctct/configuration_mctct.py b/src/transformers/models/deprecated/mctct/configuration_mctct.py index 6546b18eab0522..c5de7347807733 100644 --- a/src/transformers/models/deprecated/mctct/configuration_mctct.py +++ b/src/transformers/models/deprecated/mctct/configuration_mctct.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MCTCTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an diff --git a/src/transformers/models/deprecated/mctct/modeling_mctct.py b/src/transformers/models/deprecated/mctct/modeling_mctct.py index 2d9ef6cf724c28..95c860fa9a49c5 100755 --- a/src/transformers/models/deprecated/mctct/modeling_mctct.py +++ b/src/transformers/models/deprecated/mctct/modeling_mctct.py @@ -52,9 +52,6 @@ _CTC_EXPECTED_LOSS = 1885.65 -from .._archive_maps import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class MCTCTConv1dSubsampler(nn.Module): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation diff --git a/src/transformers/models/deprecated/open_llama/__init__.py b/src/transformers/models/deprecated/open_llama/__init__.py index 446c9f076d3134..085c91fdb69538 100644 --- a/src/transformers/models/deprecated/open_llama/__init__.py +++ b/src/transformers/models/deprecated/open_llama/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_open_llama": ["OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenLlamaConfig"], + "configuration_open_llama": ["OpenLlamaConfig"], } try: @@ -57,7 +57,7 @@ if TYPE_CHECKING: - from .configuration_open_llama import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenLlamaConfig + from .configuration_open_llama import OpenLlamaConfig try: if not is_sentencepiece_available(): diff --git a/src/transformers/models/deprecated/open_llama/configuration_open_llama.py b/src/transformers/models/deprecated/open_llama/configuration_open_llama.py index 0111e031251a2c..ae2add5a5f29ae 100644 --- a/src/transformers/models/deprecated/open_llama/configuration_open_llama.py +++ b/src/transformers/models/deprecated/open_llama/configuration_open_llama.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class OpenLlamaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an diff --git a/src/transformers/models/deprecated/retribert/__init__.py b/src/transformers/models/deprecated/retribert/__init__.py index dba5e14594e16c..ff792f40a2a88c 100644 --- a/src/transformers/models/deprecated/retribert/__init__.py +++ b/src/transformers/models/deprecated/retribert/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig"], + "configuration_retribert": ["RetriBertConfig"], "tokenization_retribert": ["RetriBertTokenizer"], } @@ -37,14 +37,13 @@ pass else: _import_structure["modeling_retribert"] = [ - "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RetriBertModel", "RetriBertPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig + from .configuration_retribert import RetriBertConfig from .tokenization_retribert import RetriBertTokenizer try: @@ -62,7 +61,6 @@ pass else: from .modeling_retribert import ( - RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel, ) diff --git a/src/transformers/models/deprecated/retribert/configuration_retribert.py b/src/transformers/models/deprecated/retribert/configuration_retribert.py index c188c7347a8fb8..dfa7d3b65b6f23 100644 --- a/src/transformers/models/deprecated/retribert/configuration_retribert.py +++ b/src/transformers/models/deprecated/retribert/configuration_retribert.py @@ -20,8 +20,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - class RetriBertConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/deprecated/retribert/modeling_retribert.py b/src/transformers/models/deprecated/retribert/modeling_retribert.py index 7dba8a276eeb56..d8af23fb49ef00 100644 --- a/src/transformers/models/deprecated/retribert/modeling_retribert.py +++ b/src/transformers/models/deprecated/retribert/modeling_retribert.py @@ -33,9 +33,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class RetriBertPreTrainedModel(PreTrainedModel): """ diff --git a/src/transformers/models/deprecated/trajectory_transformer/__init__.py b/src/transformers/models/deprecated/trajectory_transformer/__init__.py index b7af1bb48cb7d6..1ec0385898409b 100644 --- a/src/transformers/models/deprecated/trajectory_transformer/__init__.py +++ b/src/transformers/models/deprecated/trajectory_transformer/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_trajectory_transformer": [ - "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TrajectoryTransformerConfig", - ], + "configuration_trajectory_transformer": ["TrajectoryTransformerConfig"], } try: @@ -30,7 +27,6 @@ pass else: _import_structure["modeling_trajectory_transformer"] = [ - "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", @@ -39,7 +35,6 @@ if TYPE_CHECKING: from .configuration_trajectory_transformer import ( - TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) @@ -50,7 +45,6 @@ pass else: from .modeling_trajectory_transformer import ( - TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, diff --git a/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py b/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py index eccb71fcc429e7..06ec12161fc31b 100644 --- a/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py +++ b/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TrajectoryTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to diff --git a/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py b/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py index 5c98aa45dc2739..24e1815218b256 100644 --- a/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +++ b/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py @@ -42,9 +42,6 @@ _CONFIG_FOR_DOC = "TrajectoryTransformerConfig" -from .._archive_maps import TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/deprecated/transfo_xl/__init__.py b/src/transformers/models/deprecated/transfo_xl/__init__.py index f3674e19665ca7..27829fd9ed169a 100644 --- a/src/transformers/models/deprecated/transfo_xl/__init__.py +++ b/src/transformers/models/deprecated/transfo_xl/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"], + "configuration_transfo_xl": ["TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_transfo_xl"] = [ - "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", @@ -45,7 +44,6 @@ pass else: _import_structure["modeling_tf_transfo_xl"] = [ - "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", @@ -56,7 +54,7 @@ if TYPE_CHECKING: - from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig + from .configuration_transfo_xl import TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: @@ -66,7 +64,6 @@ pass else: from .modeling_transfo_xl import ( - TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, @@ -82,7 +79,6 @@ pass else: from .modeling_tf_transfo_xl import ( - TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, diff --git a/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py index 50bf94ae7ea398..c9dde2e8fd2709 100644 --- a/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TransfoXLConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is diff --git a/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py index 27200a5d63f18b..e490eb8dba3b80 100644 --- a/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "TransfoXLConfig" -from .._archive_maps import TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFPositionalEmbedding(keras.layers.Layer): def __init__(self, demb, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py index 897a3899c74cbd..52989cfd13eae6 100644 --- a/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py @@ -43,9 +43,6 @@ _CONFIG_FOR_DOC = "TransfoXLConfig" -from .._archive_maps import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def build_tf_to_pytorch_map(model, config): """ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original diff --git a/src/transformers/models/deprecated/van/__init__.py b/src/transformers/models/deprecated/van/__init__.py index 2db730984ffa03..59522e4ed46786 100644 --- a/src/transformers/models/deprecated/van/__init__.py +++ b/src/transformers/models/deprecated/van/__init__.py @@ -16,7 +16,7 @@ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]} +_import_structure = {"configuration_van": ["VanConfig"]} try: @@ -26,14 +26,13 @@ pass else: _import_structure["modeling_van"] = [ - "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", "VanForImageClassification", "VanModel", "VanPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig + from .configuration_van import VanConfig try: if not is_torch_available(): @@ -42,7 +41,6 @@ pass else: from .modeling_van import ( - VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, diff --git a/src/transformers/models/deprecated/van/configuration_van.py b/src/transformers/models/deprecated/van/configuration_van.py index f58d0215694a93..68a139ffdfcebc 100644 --- a/src/transformers/models/deprecated/van/configuration_van.py +++ b/src/transformers/models/deprecated/van/configuration_van.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from .._archive_maps import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VanConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VanModel`]. It is used to instantiate a VAN model diff --git a/src/transformers/models/deprecated/van/modeling_van.py b/src/transformers/models/deprecated/van/modeling_van.py index 6fa2b73482e358..0f5940707c1152 100644 --- a/src/transformers/models/deprecated/van/modeling_van.py +++ b/src/transformers/models/deprecated/van/modeling_van.py @@ -48,9 +48,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from .._archive_maps import VAN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.convnext.modeling_convnext.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/depth_anything/__init__.py b/src/transformers/models/depth_anything/__init__.py index 0d0ea5a514a836..0640e211259f77 100644 --- a/src/transformers/models/depth_anything/__init__.py +++ b/src/transformers/models/depth_anything/__init__.py @@ -17,9 +17,7 @@ from ...utils import OptionalDependencyNotAvailable -_import_structure = { - "configuration_depth_anything": ["DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP", "DepthAnythingConfig"] -} +_import_structure = {"configuration_depth_anything": ["DepthAnythingConfig"]} try: if not is_torch_available(): @@ -28,14 +26,13 @@ pass else: _import_structure["modeling_depth_anything"] = [ - "DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST", "DepthAnythingForDepthEstimation", "DepthAnythingPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_depth_anything import DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP, DepthAnythingConfig + from .configuration_depth_anything import DepthAnythingConfig try: if not is_torch_available(): @@ -44,7 +41,6 @@ pass else: from .modeling_depth_anything import ( - DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST, DepthAnythingForDepthEstimation, DepthAnythingPreTrainedModel, ) diff --git a/src/transformers/models/depth_anything/configuration_depth_anything.py b/src/transformers/models/depth_anything/configuration_depth_anything.py index 3d58a3874eedf3..b6d6f388b0910d 100644 --- a/src/transformers/models/depth_anything/configuration_depth_anything.py +++ b/src/transformers/models/depth_anything/configuration_depth_anything.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DepthAnythingConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DepthAnythingModel`]. It is used to instantiate an DepthAnything diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index 043bd0fac807b2..9780278a6fd404 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -39,9 +39,6 @@ _CONFIG_FOR_DOC = "DepthAnythingConfig" -from ..deprecated._archive_maps import DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - DEPTH_ANYTHING_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and diff --git a/src/transformers/models/deta/__init__.py b/src/transformers/models/deta/__init__.py index 2d25a6a71602b3..843a4dc4d803d9 100644 --- a/src/transformers/models/deta/__init__.py +++ b/src/transformers/models/deta/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"], + "configuration_deta": ["DetaConfig"], } try: @@ -36,7 +36,6 @@ pass else: _import_structure["modeling_deta"] = [ - "DETA_PRETRAINED_MODEL_ARCHIVE_LIST", "DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel", @@ -44,7 +43,7 @@ if TYPE_CHECKING: - from .configuration_deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig + from .configuration_deta import DetaConfig try: if not is_vision_available(): @@ -61,7 +60,6 @@ pass else: from .modeling_deta import ( - DETA_PRETRAINED_MODEL_ARCHIVE_LIST, DetaForObjectDetection, DetaModel, DetaPreTrainedModel, diff --git a/src/transformers/models/deta/configuration_deta.py b/src/transformers/models/deta/configuration_deta.py index 1604bc56e6396d..b876e843c0f14c 100644 --- a/src/transformers/models/deta/configuration_deta.py +++ b/src/transformers/models/deta/configuration_deta.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DetaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA diff --git a/src/transformers/models/deta/modeling_deta.py b/src/transformers/models/deta/modeling_deta.py index b90a62dfa5342c..86ced383a0cc34 100644 --- a/src/transformers/models/deta/modeling_deta.py +++ b/src/transformers/models/deta/modeling_deta.py @@ -152,9 +152,6 @@ def backward(context, grad_output): _CHECKPOINT_FOR_DOC = "jozhang97/deta-swin-large-o365" -from ..deprecated._archive_maps import DETA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderOutput with DeformableDetr->Deta class DetaDecoderOutput(ModelOutput): diff --git a/src/transformers/models/detr/__init__.py b/src/transformers/models/detr/__init__.py index 9cbaca9a54581f..422fe98230be45 100644 --- a/src/transformers/models/detr/__init__.py +++ b/src/transformers/models/detr/__init__.py @@ -17,7 +17,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig", "DetrOnnxConfig"]} +_import_structure = {"configuration_detr": ["DetrConfig", "DetrOnnxConfig"]} try: if not is_vision_available(): @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_detr"] = [ - "DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "DetrForObjectDetection", "DetrForSegmentation", "DetrModel", @@ -44,7 +43,7 @@ if TYPE_CHECKING: - from .configuration_detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig, DetrOnnxConfig + from .configuration_detr import DetrConfig, DetrOnnxConfig try: if not is_vision_available(): @@ -62,7 +61,6 @@ pass else: from .modeling_detr import ( - DETR_PRETRAINED_MODEL_ARCHIVE_LIST, DetrForObjectDetection, DetrForSegmentation, DetrModel, diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index db180ef1d41fed..094859bda5d204 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DetrModel`]. It is used to instantiate a DETR diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 0da702db8b67e2..ea8735da9f3d45 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -63,9 +63,6 @@ _CHECKPOINT_FOR_DOC = "facebook/detr-resnet-50" -from ..deprecated._archive_maps import DETR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class DetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ diff --git a/src/transformers/models/dinat/__init__.py b/src/transformers/models/dinat/__init__.py index 88470f1ca9f9bd..207ebfdaa8693f 100644 --- a/src/transformers/models/dinat/__init__.py +++ b/src/transformers/models/dinat/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_dinat": ["DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DinatConfig"]} +_import_structure = {"configuration_dinat": ["DinatConfig"]} try: @@ -26,7 +26,6 @@ pass else: _import_structure["modeling_dinat"] = [ - "DINAT_PRETRAINED_MODEL_ARCHIVE_LIST", "DinatForImageClassification", "DinatModel", "DinatPreTrainedModel", @@ -34,7 +33,7 @@ ] if TYPE_CHECKING: - from .configuration_dinat import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP, DinatConfig + from .configuration_dinat import DinatConfig try: if not is_torch_available(): @@ -43,7 +42,6 @@ pass else: from .modeling_dinat import ( - DINAT_PRETRAINED_MODEL_ARCHIVE_LIST, DinatBackbone, DinatForImageClassification, DinatModel, diff --git a/src/transformers/models/dinat/configuration_dinat.py b/src/transformers/models/dinat/configuration_dinat.py index 4bd38c73857a97..6138e8072b671a 100644 --- a/src/transformers/models/dinat/configuration_dinat.py +++ b/src/transformers/models/dinat/configuration_dinat.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DinatConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat diff --git a/src/transformers/models/dinat/modeling_dinat.py b/src/transformers/models/dinat/modeling_dinat.py index 72bf6d1170094c..03c0fe33a2838b 100644 --- a/src/transformers/models/dinat/modeling_dinat.py +++ b/src/transformers/models/dinat/modeling_dinat.py @@ -68,9 +68,6 @@ def natten2dav(*args, **kwargs): _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import DINAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # drop_path and DinatDropPath are from the timm library. diff --git a/src/transformers/models/dinov2/__init__.py b/src/transformers/models/dinov2/__init__.py index 01d02a9e65fda0..25cf73b315bf2d 100644 --- a/src/transformers/models/dinov2/__init__.py +++ b/src/transformers/models/dinov2/__init__.py @@ -20,9 +20,7 @@ ) -_import_structure = { - "configuration_dinov2": ["DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Dinov2Config", "Dinov2OnnxConfig"] -} +_import_structure = {"configuration_dinov2": ["Dinov2Config", "Dinov2OnnxConfig"]} try: if not is_torch_available(): @@ -31,7 +29,6 @@ pass else: _import_structure["modeling_dinov2"] = [ - "DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Dinov2ForImageClassification", "Dinov2Model", "Dinov2PreTrainedModel", @@ -39,7 +36,7 @@ ] if TYPE_CHECKING: - from .configuration_dinov2 import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Dinov2Config, Dinov2OnnxConfig + from .configuration_dinov2 import Dinov2Config, Dinov2OnnxConfig try: if not is_torch_available(): @@ -48,7 +45,6 @@ pass else: from .modeling_dinov2 import ( - DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST, Dinov2Backbone, Dinov2ForImageClassification, Dinov2Model, diff --git a/src/transformers/models/dinov2/configuration_dinov2.py b/src/transformers/models/dinov2/configuration_dinov2.py index b5fe872a706fc7..48feba23b118e3 100644 --- a/src/transformers/models/dinov2/configuration_dinov2.py +++ b/src/transformers/models/dinov2/configuration_dinov2.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Dinov2Config(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index c90221f145d4ba..505c1b5c06d4af 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -58,9 +58,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class Dinov2Embeddings(nn.Module): """ Construct the CLS token, mask token, position and patch embeddings. diff --git a/src/transformers/models/distilbert/__init__.py b/src/transformers/models/distilbert/__init__.py index 6a2756eb9d1c26..7d6586bfa50809 100644 --- a/src/transformers/models/distilbert/__init__.py +++ b/src/transformers/models/distilbert/__init__.py @@ -26,7 +26,6 @@ _import_structure = { "configuration_distilbert": [ - "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertOnnxConfig", ], @@ -48,7 +47,6 @@ pass else: _import_structure["modeling_distilbert"] = [ - "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", @@ -65,7 +63,6 @@ pass else: _import_structure["modeling_tf_distilbert"] = [ - "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", @@ -95,7 +92,6 @@ if TYPE_CHECKING: from .configuration_distilbert import ( - DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) @@ -116,7 +112,6 @@ pass else: from .modeling_distilbert import ( - DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, @@ -133,7 +128,6 @@ pass else: from .modeling_tf_distilbert import ( - TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, diff --git a/src/transformers/models/distilbert/configuration_distilbert.py b/src/transformers/models/distilbert/configuration_distilbert.py index 5f6b004dc0bbb9..45e2d3b3f060d2 100644 --- a/src/transformers/models/distilbert/configuration_distilbert.py +++ b/src/transformers/models/distilbert/configuration_distilbert.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DistilBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index 3a65e0296116dc..e8357682ffe759 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -63,9 +63,6 @@ _CONFIG_FOR_DOC = "DistilBertConfig" -from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE # diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index c41deac3f2e57e..1e663c92293c78 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -63,9 +63,6 @@ _CONFIG_FOR_DOC = "DistilBertConfig" -from ..deprecated._archive_maps import TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/donut/__init__.py b/src/transformers/models/donut/__init__.py index c548a181a3bf30..f6f38609e6ff54 100644 --- a/src/transformers/models/donut/__init__.py +++ b/src/transformers/models/donut/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"], + "configuration_donut_swin": ["DonutSwinConfig"], "processing_donut": ["DonutProcessor"], } @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_donut_swin"] = [ - "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "DonutSwinModel", "DonutSwinPreTrainedModel", ] @@ -44,7 +43,7 @@ if TYPE_CHECKING: - from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig + from .configuration_donut_swin import DonutSwinConfig from .processing_donut import DonutProcessor try: @@ -54,7 +53,6 @@ pass else: from .modeling_donut_swin import ( - DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, DonutSwinModel, DonutSwinPreTrainedModel, ) diff --git a/src/transformers/models/donut/configuration_donut_swin.py b/src/transformers/models/donut/configuration_donut_swin.py index e57ddb255a7118..80418e71442f6c 100644 --- a/src/transformers/models/donut/configuration_donut_swin.py +++ b/src/transformers/models/donut/configuration_donut_swin.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DonutSwinConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a diff --git a/src/transformers/models/donut/modeling_donut_swin.py b/src/transformers/models/donut/modeling_donut_swin.py index bf293ae1679361..e48e1ddfe14cb2 100644 --- a/src/transformers/models/donut/modeling_donut_swin.py +++ b/src/transformers/models/donut/modeling_donut_swin.py @@ -49,9 +49,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] -from ..deprecated._archive_maps import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin class DonutSwinEncoderOutput(ModelOutput): diff --git a/src/transformers/models/dpr/__init__.py b/src/transformers/models/dpr/__init__.py index 6ea8b78e503739..ef4bccee54d296 100644 --- a/src/transformers/models/dpr/__init__.py +++ b/src/transformers/models/dpr/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_dpr": ["DPR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPRConfig"], + "configuration_dpr": ["DPRConfig"], "tokenization_dpr": [ "DPRContextEncoderTokenizer", "DPRQuestionEncoderTokenizer", @@ -53,9 +53,6 @@ pass else: _import_structure["modeling_dpr"] = [ - "DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", "DPRContextEncoder", "DPRPretrainedContextEncoder", "DPRPreTrainedModel", @@ -72,9 +69,6 @@ pass else: _import_structure["modeling_tf_dpr"] = [ - "TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", - "TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDPRContextEncoder", "TFDPRPretrainedContextEncoder", "TFDPRPretrainedQuestionEncoder", @@ -85,7 +79,7 @@ if TYPE_CHECKING: - from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig + from .configuration_dpr import DPRConfig from .tokenization_dpr import ( DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, @@ -112,9 +106,6 @@ pass else: from .modeling_dpr import ( - DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, DPRContextEncoder, DPRPretrainedContextEncoder, DPRPreTrainedModel, @@ -131,9 +122,6 @@ pass else: from .modeling_tf_dpr import ( - TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, - TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, TFDPRContextEncoder, TFDPRPretrainedContextEncoder, TFDPRPretrainedQuestionEncoder, diff --git a/src/transformers/models/dpr/configuration_dpr.py b/src/transformers/models/dpr/configuration_dpr.py index 74ac90a4beb508..3d7abec3ebed25 100644 --- a/src/transformers/models/dpr/configuration_dpr.py +++ b/src/transformers/models/dpr/configuration_dpr.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DPRConfig(PretrainedConfig): r""" [`DPRConfig`] is the configuration class to store the configuration of a *DPRModel*. diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 928f2b93118ac3..2094440a7fde1f 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -40,13 +40,6 @@ _CHECKPOINT_FOR_DOC = "facebook/dpr-ctx_encoder-single-nq-base" -from ..deprecated._archive_maps import ( # noqa: F401, E402 - DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 - DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 - DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 -) - - ########## # Outputs ########## diff --git a/src/transformers/models/dpr/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py index e8cb1464f70da8..a2e539f73fc26c 100644 --- a/src/transformers/models/dpr/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -40,13 +40,6 @@ _CONFIG_FOR_DOC = "DPRConfig" -from ..deprecated._archive_maps import ( # noqa: F401, E402 - TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 - TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 - TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 -) - - ########## # Outputs ########## diff --git a/src/transformers/models/dpt/__init__.py b/src/transformers/models/dpt/__init__.py index da53011b87b318..ef8999d5efba78 100644 --- a/src/transformers/models/dpt/__init__.py +++ b/src/transformers/models/dpt/__init__.py @@ -17,7 +17,7 @@ from ...utils import OptionalDependencyNotAvailable -_import_structure = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} +_import_structure = {"configuration_dpt": ["DPTConfig"]} try: if not is_vision_available(): @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_dpt"] = [ - "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", @@ -44,7 +43,7 @@ if TYPE_CHECKING: - from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig + from .configuration_dpt import DPTConfig try: if not is_vision_available(): @@ -62,7 +61,6 @@ pass else: from .modeling_dpt import ( - DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, diff --git a/src/transformers/models/dpt/configuration_dpt.py b/src/transformers/models/dpt/configuration_dpt.py index 9bdc8d1ef0affb..b21864e9b0bd22 100644 --- a/src/transformers/models/dpt/configuration_dpt.py +++ b/src/transformers/models/dpt/configuration_dpt.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DPTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DPTModel`]. It is used to instantiate an DPT diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index ef6c8bb853abda..5240b651ccfc3b 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -55,9 +55,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 577, 1024] -from ..deprecated._archive_maps import DPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class BaseModelOutputWithIntermediateActivations(ModelOutput): """ diff --git a/src/transformers/models/efficientformer/__init__.py b/src/transformers/models/efficientformer/__init__.py index 25d60d1ee765ef..9b36518587cf44 100644 --- a/src/transformers/models/efficientformer/__init__.py +++ b/src/transformers/models/efficientformer/__init__.py @@ -22,12 +22,7 @@ ) -_import_structure = { - "configuration_efficientformer": [ - "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "EfficientFormerConfig", - ] -} +_import_structure = {"configuration_efficientformer": ["EfficientFormerConfig"]} try: if not is_vision_available(): @@ -44,7 +39,6 @@ pass else: _import_structure["modeling_efficientformer"] = [ - "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientFormerForImageClassification", "EfficientFormerForImageClassificationWithTeacher", "EfficientFormerModel", @@ -58,7 +52,6 @@ pass else: _import_structure["modeling_tf_efficientformer"] = [ - "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher", "TFEfficientFormerModel", @@ -66,7 +59,7 @@ ] if TYPE_CHECKING: - from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig + from .configuration_efficientformer import EfficientFormerConfig try: if not is_vision_available(): @@ -83,7 +76,6 @@ pass else: from .modeling_efficientformer import ( - EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, @@ -96,7 +88,6 @@ pass else: from .modeling_tf_efficientformer import ( - TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, diff --git a/src/transformers/models/efficientformer/configuration_efficientformer.py b/src/transformers/models/efficientformer/configuration_efficientformer.py index 1641c90711f5d4..0cc7635ea0cde6 100644 --- a/src/transformers/models/efficientformer/configuration_efficientformer.py +++ b/src/transformers/models/efficientformer/configuration_efficientformer.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class EfficientFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`EfficientFormerModel`]. It is used to diff --git a/src/transformers/models/efficientformer/modeling_efficientformer.py b/src/transformers/models/efficientformer/modeling_efficientformer.py index cc62e9cbd21e40..6d79fe5baed4a7 100644 --- a/src/transformers/models/efficientformer/modeling_efficientformer.py +++ b/src/transformers/models/efficientformer/modeling_efficientformer.py @@ -50,9 +50,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat" -from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class EfficientFormerPatchEmbeddings(nn.Module): """ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels, diff --git a/src/transformers/models/efficientformer/modeling_tf_efficientformer.py b/src/transformers/models/efficientformer/modeling_tf_efficientformer.py index 77b62999e772ec..605487e6feccdf 100644 --- a/src/transformers/models/efficientformer/modeling_tf_efficientformer.py +++ b/src/transformers/models/efficientformer/modeling_tf_efficientformer.py @@ -59,9 +59,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_281" -from ..deprecated._archive_maps import TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFEfficientFormerPatchEmbeddings(keras.layers.Layer): """ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels, diff --git a/src/transformers/models/efficientnet/__init__.py b/src/transformers/models/efficientnet/__init__.py index 6df523721aefc5..28cb70490d9675 100644 --- a/src/transformers/models/efficientnet/__init__.py +++ b/src/transformers/models/efficientnet/__init__.py @@ -23,7 +23,6 @@ _import_structure = { "configuration_efficientnet": [ - "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig", "EfficientNetOnnxConfig", ] @@ -44,7 +43,6 @@ pass else: _import_structure["modeling_efficientnet"] = [ - "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", @@ -52,7 +50,6 @@ if TYPE_CHECKING: from .configuration_efficientnet import ( - EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) @@ -72,7 +69,6 @@ pass else: from .modeling_efficientnet import ( - EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, diff --git a/src/transformers/models/efficientnet/configuration_efficientnet.py b/src/transformers/models/efficientnet/configuration_efficientnet.py index 77106c70d7d553..63480a9a6dd7e7 100644 --- a/src/transformers/models/efficientnet/configuration_efficientnet.py +++ b/src/transformers/models/efficientnet/configuration_efficientnet.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class EfficientNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an diff --git a/src/transformers/models/efficientnet/modeling_efficientnet.py b/src/transformers/models/efficientnet/modeling_efficientnet.py index e415d7f1b46a1e..b047e4f89fabb5 100644 --- a/src/transformers/models/efficientnet/modeling_efficientnet.py +++ b/src/transformers/models/efficientnet/modeling_efficientnet.py @@ -53,9 +53,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - EFFICIENTNET_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and diff --git a/src/transformers/models/electra/__init__.py b/src/transformers/models/electra/__init__.py index 09ce039d25fd05..b79f2410bf354e 100644 --- a/src/transformers/models/electra/__init__.py +++ b/src/transformers/models/electra/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"], + "configuration_electra": ["ElectraConfig", "ElectraOnnxConfig"], "tokenization_electra": ["ElectraTokenizer"], } @@ -44,7 +44,6 @@ pass else: _import_structure["modeling_electra"] = [ - "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForCausalLM", "ElectraForMaskedLM", "ElectraForMultipleChoice", @@ -64,7 +63,6 @@ pass else: _import_structure["modeling_tf_electra"] = [ - "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", @@ -95,7 +93,7 @@ if TYPE_CHECKING: - from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig + from .configuration_electra import ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: @@ -113,7 +111,6 @@ pass else: from .modeling_electra import ( - ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, @@ -133,7 +130,6 @@ pass else: from .modeling_tf_electra import ( - TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, diff --git a/src/transformers/models/electra/configuration_electra.py b/src/transformers/models/electra/configuration_electra.py index b6d1368a9d22d2..f4ca3049837996 100644 --- a/src/transformers/models/electra/configuration_electra.py +++ b/src/transformers/models/electra/configuration_electra.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ElectraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index 6fbdda2579c1a4..dd017170bef9a3 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -54,9 +54,6 @@ _CONFIG_FOR_DOC = "ElectraConfig" -from ..deprecated._archive_maps import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index ba60cd8f5d5754..b903e7f082c6a1 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -66,9 +66,6 @@ _CONFIG_FOR_DOC = "ElectraConfig" -from ..deprecated._archive_maps import TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Electra class TFElectraSelfAttention(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): diff --git a/src/transformers/models/encodec/__init__.py b/src/transformers/models/encodec/__init__.py index d3d9488968bf2c..d67075e5560c75 100644 --- a/src/transformers/models/encodec/__init__.py +++ b/src/transformers/models/encodec/__init__.py @@ -21,10 +21,7 @@ _import_structure = { - "configuration_encodec": [ - "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", - "EncodecConfig", - ], + "configuration_encodec": ["EncodecConfig"], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } @@ -35,14 +32,12 @@ pass else: _import_structure["modeling_encodec"] = [ - "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( - ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor @@ -54,7 +49,6 @@ pass else: from .modeling_encodec import ( - ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) diff --git a/src/transformers/models/encodec/configuration_encodec.py b/src/transformers/models/encodec/configuration_encodec.py index 4e18bb178adf23..4d8611a1788c0d 100644 --- a/src/transformers/models/encodec/configuration_encodec.py +++ b/src/transformers/models/encodec/configuration_encodec.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class EncodecConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a diff --git a/src/transformers/models/encodec/modeling_encodec.py b/src/transformers/models/encodec/modeling_encodec.py index 48498b741d18ca..0df2a2a6ca5ba9 100644 --- a/src/transformers/models/encodec/modeling_encodec.py +++ b/src/transformers/models/encodec/modeling_encodec.py @@ -40,9 +40,6 @@ _CONFIG_FOR_DOC = "EncodecConfig" -from ..deprecated._archive_maps import ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class EncodecOutput(ModelOutput): """ diff --git a/src/transformers/models/ernie/__init__.py b/src/transformers/models/ernie/__init__.py index ea7f077f928d39..ddd3b30365d80a 100644 --- a/src/transformers/models/ernie/__init__.py +++ b/src/transformers/models/ernie/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], + "configuration_ernie": ["ErnieConfig", "ErnieOnnxConfig"], } try: @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_ernie"] = [ - "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", @@ -42,7 +41,7 @@ ] if TYPE_CHECKING: - from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig + from .configuration_ernie import ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): @@ -51,7 +50,6 @@ pass else: from .modeling_ernie import ( - ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, diff --git a/src/transformers/models/ernie/configuration_ernie.py b/src/transformers/models/ernie/configuration_ernie.py index 81ed03596303ee..16f14489903fde 100644 --- a/src/transformers/models/ernie/configuration_ernie.py +++ b/src/transformers/models/ernie/configuration_ernie.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ErnieConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to diff --git a/src/transformers/models/ernie/modeling_ernie.py b/src/transformers/models/ernie/modeling_ernie.py index 95e27121bc2046..4c2dc767f85fa0 100644 --- a/src/transformers/models/ernie/modeling_ernie.py +++ b/src/transformers/models/ernie/modeling_ernie.py @@ -56,9 +56,6 @@ _CONFIG_FOR_DOC = "ErnieConfig" -from ..deprecated._archive_maps import ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class ErnieEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/ernie_m/__init__.py b/src/transformers/models/ernie_m/__init__.py index b7cd3bdd0681c1..fc7076e4394552 100644 --- a/src/transformers/models/ernie_m/__init__.py +++ b/src/transformers/models/ernie_m/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"], + "configuration_ernie_m": ["ErnieMConfig"], } try: @@ -36,7 +36,6 @@ pass else: _import_structure["modeling_ernie_m"] = [ - "ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieMForMultipleChoice", "ErnieMForQuestionAnswering", "ErnieMForSequenceClassification", @@ -48,7 +47,7 @@ if TYPE_CHECKING: - from .configuration_ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig + from .configuration_ernie_m import ErnieMConfig try: if not is_sentencepiece_available(): @@ -65,7 +64,6 @@ pass else: from .modeling_ernie_m import ( - ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieMForInformationExtraction, ErnieMForMultipleChoice, ErnieMForQuestionAnswering, diff --git a/src/transformers/models/ernie_m/configuration_ernie_m.py b/src/transformers/models/ernie_m/configuration_ernie_m.py index 96451c9d9c999c..9c67a8642131f9 100644 --- a/src/transformers/models/ernie_m/configuration_ernie_m.py +++ b/src/transformers/models/ernie_m/configuration_ernie_m.py @@ -20,7 +20,6 @@ from typing import Dict from ...configuration_utils import PretrainedConfig -from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class ErnieMConfig(PretrainedConfig): diff --git a/src/transformers/models/ernie_m/modeling_ernie_m.py b/src/transformers/models/ernie_m/modeling_ernie_m.py index ac56e120a0c3d4..b8f7e87c850248 100755 --- a/src/transformers/models/ernie_m/modeling_ernie_m.py +++ b/src/transformers/models/ernie_m/modeling_ernie_m.py @@ -45,9 +45,6 @@ _TOKENIZER_FOR_DOC = "ErnieMTokenizer" -from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings class ErnieMEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" diff --git a/src/transformers/models/esm/__init__.py b/src/transformers/models/esm/__init__.py index 1b07db5a5eea64..a764bedc3fadfd 100644 --- a/src/transformers/models/esm/__init__.py +++ b/src/transformers/models/esm/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig"], + "configuration_esm": ["EsmConfig"], "tokenization_esm": ["EsmTokenizer"], } @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_esm"] = [ - "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", "EsmForMaskedLM", "EsmForSequenceClassification", "EsmForTokenClassification", @@ -44,7 +43,6 @@ pass else: _import_structure["modeling_tf_esm"] = [ - "TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFEsmForMaskedLM", "TFEsmForSequenceClassification", "TFEsmForTokenClassification", @@ -53,7 +51,7 @@ ] if TYPE_CHECKING: - from .configuration_esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig + from .configuration_esm import EsmConfig from .tokenization_esm import EsmTokenizer try: @@ -63,7 +61,6 @@ pass else: from .modeling_esm import ( - ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, @@ -79,7 +76,6 @@ pass else: from .modeling_tf_esm import ( - TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, diff --git a/src/transformers/models/esm/configuration_esm.py b/src/transformers/models/esm/configuration_esm.py index 31d309cb04a017..feb06031e5afcf 100644 --- a/src/transformers/models/esm/configuration_esm.py +++ b/src/transformers/models/esm/configuration_esm.py @@ -25,8 +25,6 @@ # TODO Update this -from ..deprecated._archive_maps import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - class EsmConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/esm/modeling_esm.py b/src/transformers/models/esm/modeling_esm.py index a97ea58d7b81d9..e5683a3c99c264 100755 --- a/src/transformers/models/esm/modeling_esm.py +++ b/src/transformers/models/esm/modeling_esm.py @@ -41,9 +41,6 @@ _CONFIG_FOR_DOC = "EsmConfig" -from ..deprecated._archive_maps import ESM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def rotate_half(x): x1, x2 = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-1) diff --git a/src/transformers/models/falcon/__init__.py b/src/transformers/models/falcon/__init__.py index 070e0cc033fbf6..62c1c9262b70fc 100644 --- a/src/transformers/models/falcon/__init__.py +++ b/src/transformers/models/falcon/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], + "configuration_falcon": ["FalconConfig"], } try: @@ -32,7 +32,6 @@ pass else: _import_structure["modeling_falcon"] = [ - "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", @@ -43,7 +42,7 @@ if TYPE_CHECKING: - from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig + from .configuration_falcon import FalconConfig try: if not is_torch_available(): @@ -52,7 +51,6 @@ pass else: from .modeling_falcon import ( - FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, diff --git a/src/transformers/models/falcon/configuration_falcon.py b/src/transformers/models/falcon/configuration_falcon.py index 61d202b0960829..ce10faeb20cf91 100644 --- a/src/transformers/models/falcon/configuration_falcon.py +++ b/src/transformers/models/falcon/configuration_falcon.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FalconConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 1f4fd41afa2e89..76ca4110e81848 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -59,8 +59,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FALCON_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - _CHECKPOINT_FOR_DOC = "Rocketknight1/falcon-rw-1b" _CONFIG_FOR_DOC = "FalconConfig" diff --git a/src/transformers/models/fastspeech2_conformer/__init__.py b/src/transformers/models/fastspeech2_conformer/__init__.py index 1fd5cbf1dc272e..2014f74be1f772 100644 --- a/src/transformers/models/fastspeech2_conformer/__init__.py +++ b/src/transformers/models/fastspeech2_conformer/__init__.py @@ -22,9 +22,6 @@ _import_structure = { "configuration_fastspeech2_conformer": [ - "FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP", - "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "FastSpeech2ConformerConfig", "FastSpeech2ConformerHifiGanConfig", "FastSpeech2ConformerWithHifiGanConfig", @@ -39,7 +36,6 @@ pass else: _import_structure["modeling_fastspeech2_conformer"] = [ - "FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FastSpeech2ConformerWithHifiGan", "FastSpeech2ConformerHifiGan", "FastSpeech2ConformerModel", @@ -48,9 +44,6 @@ if TYPE_CHECKING: from .configuration_fastspeech2_conformer import ( - FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, - FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, - FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig, @@ -64,7 +57,6 @@ pass else: from .modeling_fastspeech2_conformer import ( - FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FastSpeech2ConformerHifiGan, FastSpeech2ConformerModel, FastSpeech2ConformerPreTrainedModel, diff --git a/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py index adb038ad1b2a0b..b8e1ae851232ce 100644 --- a/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py @@ -23,13 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ( # noqa: F401, E402 - FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, # noqa: F401, E402 - FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, # noqa: F401, E402 - FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP, # noqa: F401, E402 -) - - class FastSpeech2ConformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to diff --git a/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py index c46ef2a8365f0c..01c2cece550adf 100644 --- a/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py @@ -34,9 +34,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class FastSpeech2ConformerModelOutput(ModelOutput): """ diff --git a/src/transformers/models/flaubert/__init__.py b/src/transformers/models/flaubert/__init__.py index 210d80b00f9ea2..94cf7b66139643 100644 --- a/src/transformers/models/flaubert/__init__.py +++ b/src/transformers/models/flaubert/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertOnnxConfig"], + "configuration_flaubert": ["FlaubertConfig", "FlaubertOnnxConfig"], "tokenization_flaubert": ["FlaubertTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_flaubert"] = [ - "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaubertForMultipleChoice", "FlaubertForQuestionAnswering", "FlaubertForQuestionAnsweringSimple", @@ -47,7 +46,6 @@ pass else: _import_structure["modeling_tf_flaubert"] = [ - "TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFlaubertForMultipleChoice", "TFFlaubertForQuestionAnsweringSimple", "TFFlaubertForSequenceClassification", @@ -59,7 +57,7 @@ if TYPE_CHECKING: - from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertOnnxConfig + from .configuration_flaubert import FlaubertConfig, FlaubertOnnxConfig from .tokenization_flaubert import FlaubertTokenizer try: @@ -69,7 +67,6 @@ pass else: from .modeling_flaubert import ( - FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, @@ -87,7 +84,6 @@ pass else: from .modeling_tf_flaubert import ( - TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, diff --git a/src/transformers/models/flaubert/configuration_flaubert.py b/src/transformers/models/flaubert/configuration_flaubert.py index fb4ef2992cbb88..98939e7091f5a4 100644 --- a/src/transformers/models/flaubert/configuration_flaubert.py +++ b/src/transformers/models/flaubert/configuration_flaubert.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FlaubertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 49c2008cd10ac6..86db691557f3e8 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "FlaubertConfig" -from ..deprecated._archive_maps import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.xlm.modeling_xlm.create_sinusoidal_embeddings def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index 08e573daa99458..0538fa061c4023 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -68,9 +68,6 @@ _CONFIG_FOR_DOC = "FlaubertConfig" -from ..deprecated._archive_maps import TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - FLAUBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the diff --git a/src/transformers/models/flava/__init__.py b/src/transformers/models/flava/__init__.py index 8d026a9443271c..9fbe54524a6dea 100644 --- a/src/transformers/models/flava/__init__.py +++ b/src/transformers/models/flava/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_flava": [ - "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlavaConfig", "FlavaImageCodebookConfig", "FlavaImageConfig", @@ -44,7 +43,6 @@ pass else: _import_structure["modeling_flava"] = [ - "FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlavaForPreTraining", "FlavaImageCodebook", "FlavaImageModel", @@ -56,7 +54,6 @@ if TYPE_CHECKING: from .configuration_flava import ( - FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, @@ -81,7 +78,6 @@ pass else: from .modeling_flava import ( - FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, diff --git a/src/transformers/models/flava/configuration_flava.py b/src/transformers/models/flava/configuration_flava.py index 2c8642bfd2759f..b78b5fdeac79ef 100644 --- a/src/transformers/models/flava/configuration_flava.py +++ b/src/transformers/models/flava/configuration_flava.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FlavaImageConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index d967335d8e0068..661bfb2dce1a5d 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -55,10 +55,7 @@ _CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = "FlavaMultimodalConfig" _EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768] -from ..deprecated._archive_maps import FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - -FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST = ["facebook/flava-image-codebook"] LOGIT_SCALE_CLAMP_MIN = 0 LOGIT_SCALE_CLAMP_MAX = 4.6052 diff --git a/src/transformers/models/fnet/__init__.py b/src/transformers/models/fnet/__init__.py index 485160d1ccaa69..08b6ddf864e15f 100644 --- a/src/transformers/models/fnet/__init__.py +++ b/src/transformers/models/fnet/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} +_import_structure = {"configuration_fnet": ["FNetConfig"]} try: if not is_sentencepiece_available(): @@ -47,7 +47,6 @@ pass else: _import_structure["modeling_fnet"] = [ - "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", @@ -62,7 +61,7 @@ if TYPE_CHECKING: - from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig + from .configuration_fnet import FNetConfig try: if not is_sentencepiece_available(): @@ -87,7 +86,6 @@ pass else: from .modeling_fnet import ( - FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, diff --git a/src/transformers/models/fnet/configuration_fnet.py b/src/transformers/models/fnet/configuration_fnet.py index 4678cae92e2a29..18ad6187998003 100644 --- a/src/transformers/models/fnet/configuration_fnet.py +++ b/src/transformers/models/fnet/configuration_fnet.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index a11b1c87a0254c..a29a6f801623c8 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -60,9 +60,6 @@ _CONFIG_FOR_DOC = "FNetConfig" -from ..deprecated._archive_maps import FNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py def _two_dim_matmul(x, matrix_dim_one, matrix_dim_two): """Applies 2D matrix multiplication to 3D input arrays.""" diff --git a/src/transformers/models/focalnet/__init__.py b/src/transformers/models/focalnet/__init__.py index b043a006f93766..ceacb8a52a170b 100644 --- a/src/transformers/models/focalnet/__init__.py +++ b/src/transformers/models/focalnet/__init__.py @@ -17,7 +17,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} +_import_structure = {"configuration_focalnet": ["FocalNetConfig"]} try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_focalnet"] = [ - "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", @@ -36,7 +35,7 @@ ] if TYPE_CHECKING: - from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig + from .configuration_focalnet import FocalNetConfig try: if not is_torch_available(): @@ -45,7 +44,6 @@ pass else: from .modeling_focalnet import ( - FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, diff --git a/src/transformers/models/focalnet/configuration_focalnet.py b/src/transformers/models/focalnet/configuration_focalnet.py index 7f590b9c2c00a4..1b2cca8b48367b 100644 --- a/src/transformers/models/focalnet/configuration_focalnet.py +++ b/src/transformers/models/focalnet/configuration_focalnet.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FocalNetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a diff --git a/src/transformers/models/focalnet/modeling_focalnet.py b/src/transformers/models/focalnet/modeling_focalnet.py index ef3e2de52fbe96..5958eb6a0ebf78 100644 --- a/src/transformers/models/focalnet/modeling_focalnet.py +++ b/src/transformers/models/focalnet/modeling_focalnet.py @@ -54,9 +54,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class FocalNetEncoderOutput(ModelOutput): """ diff --git a/src/transformers/models/fsmt/__init__.py b/src/transformers/models/fsmt/__init__.py index 65aba047469da1..db960e4a5ce9c3 100644 --- a/src/transformers/models/fsmt/__init__.py +++ b/src/transformers/models/fsmt/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig"], + "configuration_fsmt": ["FSMTConfig"], "tokenization_fsmt": ["FSMTTokenizer"], } @@ -32,7 +32,7 @@ if TYPE_CHECKING: - from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig + from .configuration_fsmt import FSMTConfig from .tokenization_fsmt import FSMTTokenizer try: diff --git a/src/transformers/models/fsmt/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py index 68abe47c019aba..7ed34a679273cd 100644 --- a/src/transformers/models/fsmt/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class DecoderConfig(PretrainedConfig): r""" Configuration class for FSMT's decoder specific things. note: this is a private helper class diff --git a/src/transformers/models/fsmt/tokenization_fsmt.py b/src/transformers/models/fsmt/tokenization_fsmt.py index 8b0be1f8be2498..0df585ed46cb89 100644 --- a/src/transformers/models/fsmt/tokenization_fsmt.py +++ b/src/transformers/models/fsmt/tokenization_fsmt.py @@ -201,7 +201,7 @@ def __init__( raise ValueError( f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. " "Usually that means that tokenizer can't find a mapping for the given model path " - "in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer." + "in and other maps of this tokenizer." ) with open(src_vocab_file, encoding="utf-8") as src_vocab_handle: diff --git a/src/transformers/models/funnel/__init__.py b/src/transformers/models/funnel/__init__.py index 28b9a34290c826..aa620540dc3fd6 100644 --- a/src/transformers/models/funnel/__init__.py +++ b/src/transformers/models/funnel/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], + "configuration_funnel": ["FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } @@ -44,7 +44,6 @@ pass else: _import_structure["modeling_funnel"] = [ - "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", @@ -64,7 +63,6 @@ pass else: _import_structure["modeling_tf_funnel"] = [ - "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", @@ -78,7 +76,7 @@ if TYPE_CHECKING: - from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig + from .configuration_funnel import FunnelConfig from .tokenization_funnel import FunnelTokenizer try: @@ -96,7 +94,6 @@ pass else: from .modeling_funnel import ( - FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, @@ -116,7 +113,6 @@ pass else: from .modeling_tf_funnel import ( - TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, diff --git a/src/transformers/models/funnel/configuration_funnel.py b/src/transformers/models/funnel/configuration_funnel.py index 0b49c22fb4c345..c3d6ff9ee49eb9 100644 --- a/src/transformers/models/funnel/configuration_funnel.py +++ b/src/transformers/models/funnel/configuration_funnel.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FunnelConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FunnelModel`] or a [`TFBertModel`]. It is used to diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index 50e98e4c046e9e..1e7eaee1619c8b 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -50,9 +50,6 @@ _CHECKPOINT_FOR_DOC = "funnel-transformer/small" -from ..deprecated._archive_maps import FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - INF = 1e6 diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index b50b96df1c5408..dea3ad550c5db9 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -63,9 +63,6 @@ _CONFIG_FOR_DOC = "FunnelConfig" -from ..deprecated._archive_maps import TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - INF = 1e6 diff --git a/src/transformers/models/fuyu/__init__.py b/src/transformers/models/fuyu/__init__.py index 51a72a53661403..403acb1964c1ed 100644 --- a/src/transformers/models/fuyu/__init__.py +++ b/src/transformers/models/fuyu/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_fuyu": ["FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP", "FuyuConfig"], + "configuration_fuyu": ["FuyuConfig"], } @@ -44,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_fuyu import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP, FuyuConfig + from .configuration_fuyu import FuyuConfig try: if not is_vision_available(): diff --git a/src/transformers/models/fuyu/configuration_fuyu.py b/src/transformers/models/fuyu/configuration_fuyu.py index 40b09492d8f161..8a5013a65134c9 100644 --- a/src/transformers/models/fuyu/configuration_fuyu.py +++ b/src/transformers/models/fuyu/configuration_fuyu.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class FuyuConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FuyuForCausalLM`]. It is used to instantiate an diff --git a/src/transformers/models/gemma/__init__.py b/src/transformers/models/gemma/__init__.py index 64ff3445382816..1c832e9051b38c 100644 --- a/src/transformers/models/gemma/__init__.py +++ b/src/transformers/models/gemma/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_gemma": ["GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "GemmaConfig"], + "configuration_gemma": ["GemmaConfig"], } try: @@ -71,7 +71,7 @@ if TYPE_CHECKING: - from .configuration_gemma import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP, GemmaConfig + from .configuration_gemma import GemmaConfig try: if not is_sentencepiece_available(): diff --git a/src/transformers/models/gemma/configuration_gemma.py b/src/transformers/models/gemma/configuration_gemma.py index 87e5a2c6693f0d..c8a55045166751 100644 --- a/src/transformers/models/gemma/configuration_gemma.py +++ b/src/transformers/models/gemma/configuration_gemma.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GemmaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma diff --git a/src/transformers/models/git/__init__.py b/src/transformers/models/git/__init__.py index e234a4b01db188..02f5f6d88a1194 100644 --- a/src/transformers/models/git/__init__.py +++ b/src/transformers/models/git/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], + "configuration_git": ["GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_git"] = [ - "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", @@ -37,7 +36,7 @@ ] if TYPE_CHECKING: - from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig + from .configuration_git import GitConfig, GitVisionConfig from .processing_git import GitProcessor try: @@ -47,7 +46,6 @@ pass else: from .modeling_git import ( - GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, diff --git a/src/transformers/models/git/configuration_git.py b/src/transformers/models/git/configuration_git.py index 0c28bbabff6b0b..21091445bc85ff 100644 --- a/src/transformers/models/git/configuration_git.py +++ b/src/transformers/models/git/configuration_git.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GitVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GitVisionModel`]. It is used to instantiate a GIT diff --git a/src/transformers/models/git/modeling_git.py b/src/transformers/models/git/modeling_git.py index 12821609f037bf..87b0cb8073b506 100644 --- a/src/transformers/models/git/modeling_git.py +++ b/src/transformers/models/git/modeling_git.py @@ -46,9 +46,6 @@ _CONFIG_FOR_DOC = "GitConfig" -from ..deprecated._archive_maps import GIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git class GitVisionModelOutput(ModelOutput): diff --git a/src/transformers/models/glpn/__init__.py b/src/transformers/models/glpn/__init__.py index 94788dcb85e76f..9896e801c93ae7 100644 --- a/src/transformers/models/glpn/__init__.py +++ b/src/transformers/models/glpn/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]} +_import_structure = {"configuration_glpn": ["GLPNConfig"]} try: if not is_vision_available(): @@ -34,7 +34,6 @@ pass else: _import_structure["modeling_glpn"] = [ - "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", @@ -43,7 +42,7 @@ if TYPE_CHECKING: - from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig + from .configuration_glpn import GLPNConfig try: if not is_vision_available(): @@ -61,7 +60,6 @@ pass else: from .modeling_glpn import ( - GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, diff --git a/src/transformers/models/glpn/configuration_glpn.py b/src/transformers/models/glpn/configuration_glpn.py index c3341192169aa0..e1e4b535050ca4 100644 --- a/src/transformers/models/glpn/configuration_glpn.py +++ b/src/transformers/models/glpn/configuration_glpn.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GLPNConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN diff --git a/src/transformers/models/glpn/modeling_glpn.py b/src/transformers/models/glpn/modeling_glpn.py index 0791cc0434daff..a9a8bc9b3681d8 100755 --- a/src/transformers/models/glpn/modeling_glpn.py +++ b/src/transformers/models/glpn/modeling_glpn.py @@ -47,9 +47,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20] -from ..deprecated._archive_maps import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/gpt2/__init__.py b/src/transformers/models/gpt2/__init__.py index e99658ac1e885e..8c77c68445a830 100644 --- a/src/transformers/models/gpt2/__init__.py +++ b/src/transformers/models/gpt2/__init__.py @@ -27,7 +27,7 @@ _import_structure = { - "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"], + "configuration_gpt2": ["GPT2Config", "GPT2OnnxConfig"], "tokenization_gpt2": ["GPT2Tokenizer"], } @@ -46,7 +46,6 @@ pass else: _import_structure["modeling_gpt2"] = [ - "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", "GPT2DoubleHeadsModel", "GPT2ForQuestionAnswering", "GPT2ForSequenceClassification", @@ -64,7 +63,6 @@ pass else: _import_structure["modeling_tf_gpt2"] = [ - "TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGPT2DoubleHeadsModel", "TFGPT2ForSequenceClassification", "TFGPT2LMHeadModel", @@ -90,7 +88,7 @@ _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"] if TYPE_CHECKING: - from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig + from .configuration_gpt2 import GPT2Config, GPT2OnnxConfig from .tokenization_gpt2 import GPT2Tokenizer try: @@ -108,7 +106,6 @@ pass else: from .modeling_gpt2 import ( - GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, @@ -126,7 +123,6 @@ pass else: from .modeling_tf_gpt2 import ( - TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, diff --git a/src/transformers/models/gpt2/configuration_gpt2.py b/src/transformers/models/gpt2/configuration_gpt2.py index 45495c0012fdd8..249decbbaa7659 100644 --- a/src/transformers/models/gpt2/configuration_gpt2.py +++ b/src/transformers/models/gpt2/configuration_gpt2.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPT2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index c44d27a23c5d05..6264d31fea5801 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -63,9 +63,6 @@ _CONFIG_FOR_DOC = "GPT2Config" -from ..deprecated._archive_maps import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index 26a4e7a398ae8d..c6f5883e228ea6 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -59,9 +59,6 @@ _CONFIG_FOR_DOC = "GPT2Config" -from ..deprecated._archive_maps import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFAttention(keras.layers.Layer): def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/models/gpt_bigcode/__init__.py b/src/transformers/models/gpt_bigcode/__init__.py index 33660eb81e4fae..60eec86ca541d7 100644 --- a/src/transformers/models/gpt_bigcode/__init__.py +++ b/src/transformers/models/gpt_bigcode/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], + "configuration_gpt_bigcode": ["GPTBigCodeConfig"], } try: @@ -32,7 +32,6 @@ pass else: _import_structure["modeling_gpt_bigcode"] = [ - "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", @@ -41,7 +40,7 @@ ] if TYPE_CHECKING: - from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig + from .configuration_gpt_bigcode import GPTBigCodeConfig try: if not is_torch_available(): @@ -50,7 +49,6 @@ pass else: from .modeling_gpt_bigcode import ( - GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, diff --git a/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py index ef5e02ffdc43af..0c295e17a12cec 100644 --- a/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPTBigCodeConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index d61877cb1f1e7e..37ed2aba620861 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "GPTBigCodeConfig" -from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Fused kernels # Use separate functions for each case because conditionals prevent kernel fusion. # TODO: Could have better fused kernels depending on scaling, dropout and head mask. diff --git a/src/transformers/models/gpt_neo/__init__.py b/src/transformers/models/gpt_neo/__init__.py index 02ca0a11949b73..6c314c89f713a4 100644 --- a/src/transformers/models/gpt_neo/__init__.py +++ b/src/transformers/models/gpt_neo/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], + "configuration_gpt_neo": ["GPTNeoConfig", "GPTNeoOnnxConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_gpt_neo"] = [ - "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", @@ -51,7 +50,7 @@ if TYPE_CHECKING: - from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig + from .configuration_gpt_neo import GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): @@ -60,7 +59,6 @@ pass else: from .modeling_gpt_neo import ( - GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, diff --git a/src/transformers/models/gpt_neo/configuration_gpt_neo.py b/src/transformers/models/gpt_neo/configuration_gpt_neo.py index 411b392180b018..66c3b6812d1a47 100644 --- a/src/transformers/models/gpt_neo/configuration_gpt_neo.py +++ b/src/transformers/models/gpt_neo/configuration_gpt_neo.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPTNeoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoModel`]. It is used to instantiate a GPT diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 2fbf4677ca6f44..b2891526b3ddef 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -68,9 +68,6 @@ _CONFIG_FOR_DOC = "GPTNeoConfig" -from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" diff --git a/src/transformers/models/gpt_neox/__init__.py b/src/transformers/models/gpt_neox/__init__.py index 46f06b1991afe7..05a6982acb0b08 100644 --- a/src/transformers/models/gpt_neox/__init__.py +++ b/src/transformers/models/gpt_neox/__init__.py @@ -17,7 +17,7 @@ from ...utils import OptionalDependencyNotAvailable -_import_structure = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]} +_import_structure = {"configuration_gpt_neox": ["GPTNeoXConfig"]} try: if not is_tokenizers_available(): @@ -34,7 +34,6 @@ pass else: _import_structure["modeling_gpt_neox"] = [ - "GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXForCausalLM", "GPTNeoXForQuestionAnswering", "GPTNeoXForSequenceClassification", @@ -46,7 +45,7 @@ if TYPE_CHECKING: - from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig + from .configuration_gpt_neox import GPTNeoXConfig try: if not is_tokenizers_available(): @@ -63,7 +62,6 @@ pass else: from .modeling_gpt_neox import ( - GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, diff --git a/src/transformers/models/gpt_neox/configuration_gpt_neox.py b/src/transformers/models/gpt_neox/configuration_gpt_neox.py index 7f583f139448f9..d559148a7221f3 100644 --- a/src/transformers/models/gpt_neox/configuration_gpt_neox.py +++ b/src/transformers/models/gpt_neox/configuration_gpt_neox.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPTNeoXConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index e338c529abf293..e0b2309fc9658b 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "GPTNeoXConfig" -from ..deprecated._archive_maps import GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/gpt_neox_japanese/__init__.py b/src/transformers/models/gpt_neox_japanese/__init__.py index bf04db7676c8b6..c43391c04958d4 100644 --- a/src/transformers/models/gpt_neox_japanese/__init__.py +++ b/src/transformers/models/gpt_neox_japanese/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], + "configuration_gpt_neox_japanese": ["GPTNeoXJapaneseConfig"], "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_gpt_neox_japanese"] = [ - "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", @@ -38,7 +37,7 @@ if TYPE_CHECKING: - from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig + from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: @@ -48,7 +47,6 @@ pass else: from .modeling_gpt_neox_japanese import ( - GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, diff --git a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py index 8ee73257b64c7c..bde828790d9828 100644 --- a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPTNeoXJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTNeoXModelJapanese`]. It is used to instantiate diff --git a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py index 9fdff2c8387006..ea934581aa4f21 100755 --- a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py @@ -35,9 +35,6 @@ _CONFIG_FOR_DOC = "GPTNeoXJapaneseConfig" -from ..deprecated._archive_maps import GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class GPTNeoXJapanesePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained diff --git a/src/transformers/models/gptj/__init__.py b/src/transformers/models/gptj/__init__.py index 4e59ed47062048..51520484529f85 100644 --- a/src/transformers/models/gptj/__init__.py +++ b/src/transformers/models/gptj/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig", "GPTJOnnxConfig"]} +_import_structure = {"configuration_gptj": ["GPTJConfig", "GPTJOnnxConfig"]} try: if not is_torch_available(): @@ -31,7 +31,6 @@ pass else: _import_structure["modeling_gptj"] = [ - "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTJForCausalLM", "GPTJForQuestionAnswering", "GPTJForSequenceClassification", @@ -67,7 +66,7 @@ if TYPE_CHECKING: - from .configuration_gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig, GPTJOnnxConfig + from .configuration_gptj import GPTJConfig, GPTJOnnxConfig try: if not is_torch_available(): @@ -76,7 +75,6 @@ pass else: from .modeling_gptj import ( - GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST, GPTJForCausalLM, GPTJForQuestionAnswering, GPTJForSequenceClassification, diff --git a/src/transformers/models/gptj/configuration_gptj.py b/src/transformers/models/gptj/configuration_gptj.py index 56d6042764a19a..c9c6c4a1c50e91 100644 --- a/src/transformers/models/gptj/configuration_gptj.py +++ b/src/transformers/models/gptj/configuration_gptj.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPTJConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 3c6ddac4ecf4ca..5f1e1d6612999d 100644 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -57,9 +57,6 @@ _CONFIG_FOR_DOC = "GPTJConfig" -from ..deprecated._archive_maps import GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/gptsan_japanese/__init__.py b/src/transformers/models/gptsan_japanese/__init__.py index b3635ace911635..9ae8af34667e34 100644 --- a/src/transformers/models/gptsan_japanese/__init__.py +++ b/src/transformers/models/gptsan_japanese/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"], + "configuration_gptsan_japanese": ["GPTSanJapaneseConfig"], "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"], } @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_gptsan_japanese"] = [ - "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTSanJapaneseForConditionalGeneration", "GPTSanJapaneseModel", "GPTSanJapanesePreTrainedModel", @@ -46,7 +45,7 @@ if TYPE_CHECKING: - from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig + from .configuration_gptsan_japanese import GPTSanJapaneseConfig from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer try: @@ -56,7 +55,6 @@ pass else: from .modeling_gptsan_japanese import ( - GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapanesePreTrainedModel, diff --git a/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py index e0a17d1c114aef..7630d67bff2165 100644 --- a/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py @@ -20,9 +20,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GPTSanJapaneseConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate diff --git a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py index 2582d0468db8ee..f897a26483109c 100644 --- a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -45,8 +45,6 @@ # for the pretrained weights provided with the models #################################################### -from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - # Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func def router_z_loss_func(router_logits: torch.Tensor) -> float: diff --git a/src/transformers/models/graphormer/__init__.py b/src/transformers/models/graphormer/__init__.py index 4263525682147f..f8140c81c1bb9b 100644 --- a/src/transformers/models/graphormer/__init__.py +++ b/src/transformers/models/graphormer/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], + "configuration_graphormer": ["GraphormerConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_graphormer"] = [ - "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", @@ -35,7 +34,7 @@ if TYPE_CHECKING: - from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig + from .configuration_graphormer import GraphormerConfig try: if not is_torch_available(): @@ -44,7 +43,6 @@ pass else: from .modeling_graphormer import ( - GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, diff --git a/src/transformers/models/graphormer/configuration_graphormer.py b/src/transformers/models/graphormer/configuration_graphormer.py index 8d1f1359843174..7aaba3aa76a64f 100644 --- a/src/transformers/models/graphormer/configuration_graphormer.py +++ b/src/transformers/models/graphormer/configuration_graphormer.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GraphormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an diff --git a/src/transformers/models/graphormer/modeling_graphormer.py b/src/transformers/models/graphormer/modeling_graphormer.py index 8b484fe1e433e5..45ffb91a916f0a 100755 --- a/src/transformers/models/graphormer/modeling_graphormer.py +++ b/src/transformers/models/graphormer/modeling_graphormer.py @@ -37,9 +37,6 @@ _CONFIG_FOR_DOC = "GraphormerConfig" -from ..deprecated._archive_maps import GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def quant_noise(module: nn.Module, p: float, block_size: int): """ From: diff --git a/src/transformers/models/grounding_dino/__init__.py b/src/transformers/models/grounding_dino/__init__.py index 3b0f792068c5f0..7cd3e115e15d57 100644 --- a/src/transformers/models/grounding_dino/__init__.py +++ b/src/transformers/models/grounding_dino/__init__.py @@ -18,10 +18,7 @@ _import_structure = { - "configuration_grounding_dino": [ - "GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP", - "GroundingDinoConfig", - ], + "configuration_grounding_dino": ["GroundingDinoConfig"], "processing_grounding_dino": ["GroundingDinoProcessor"], } @@ -32,7 +29,6 @@ pass else: _import_structure["modeling_grounding_dino"] = [ - "GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST", "GroundingDinoForObjectDetection", "GroundingDinoModel", "GroundingDinoPreTrainedModel", @@ -49,7 +45,6 @@ if TYPE_CHECKING: from .configuration_grounding_dino import ( - GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP, GroundingDinoConfig, ) from .processing_grounding_dino import GroundingDinoProcessor @@ -61,7 +56,6 @@ pass else: from .modeling_grounding_dino import ( - GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST, GroundingDinoForObjectDetection, GroundingDinoModel, GroundingDinoPreTrainedModel, diff --git a/src/transformers/models/grounding_dino/configuration_grounding_dino.py b/src/transformers/models/grounding_dino/configuration_grounding_dino.py index fe683035039600..177b4bab7e80b3 100644 --- a/src/transformers/models/grounding_dino/configuration_grounding_dino.py +++ b/src/transformers/models/grounding_dino/configuration_grounding_dino.py @@ -21,10 +21,6 @@ logger = logging.get_logger(__name__) -GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "IDEA-Research/grounding-dino-tiny": "https://huggingface.co/IDEA-Research/grounding-dino-tiny/resolve/main/config.json", -} - class GroundingDinoConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/grounding_dino/modeling_grounding_dino.py b/src/transformers/models/grounding_dino/modeling_grounding_dino.py index da8dd29a5cb54d..dc3523f33d46bf 100644 --- a/src/transformers/models/grounding_dino/modeling_grounding_dino.py +++ b/src/transformers/models/grounding_dino/modeling_grounding_dino.py @@ -152,11 +152,6 @@ def backward(context, grad_output): _CONFIG_FOR_DOC = "GroundingDinoConfig" _CHECKPOINT_FOR_DOC = "IDEA-Research/grounding-dino-tiny" -GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "IDEA-Research/grounding-dino-tiny", - # See all Grounding DINO models at https://huggingface.co/models?filter=grounding-dino -] - @dataclass class GroundingDinoDecoderOutput(ModelOutput): diff --git a/src/transformers/models/groupvit/__init__.py b/src/transformers/models/groupvit/__init__.py index d0de4a00bd1500..98fc6f4eccef08 100644 --- a/src/transformers/models/groupvit/__init__.py +++ b/src/transformers/models/groupvit/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_groupvit": [ - "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_groupvit"] = [ - "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", @@ -47,7 +45,6 @@ pass else: _import_structure["modeling_tf_groupvit"] = [ - "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", @@ -56,7 +53,6 @@ if TYPE_CHECKING: from .configuration_groupvit import ( - GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, @@ -70,7 +66,6 @@ pass else: from .modeling_groupvit import ( - GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, @@ -84,7 +79,6 @@ pass else: from .modeling_tf_groupvit import ( - TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index 3c46c277f3519e..1f17a0a7e60ad9 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -31,9 +31,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class GroupViTTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 13e152fc80e34e..aa8e803357e6ef 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -44,9 +44,6 @@ _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" -from ..deprecated._archive_maps import GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py index 0b22a28260aa9e..78f7f793c33b2d 100644 --- a/src/transformers/models/groupvit/modeling_tf_groupvit.py +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -76,9 +76,6 @@ _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" -from ..deprecated._archive_maps import TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/hubert/__init__.py b/src/transformers/models/hubert/__init__.py index f0b72a1f297bf8..30331ed0d146a4 100644 --- a/src/transformers/models/hubert/__init__.py +++ b/src/transformers/models/hubert/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available -_import_structure = {"configuration_hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"]} +_import_structure = {"configuration_hubert": ["HubertConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_hubert"] = [ - "HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "HubertForCTC", "HubertForSequenceClassification", "HubertModel", @@ -40,14 +39,13 @@ pass else: _import_structure["modeling_tf_hubert"] = [ - "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFHubertForCTC", "TFHubertModel", "TFHubertPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig + from .configuration_hubert import HubertConfig try: if not is_torch_available(): @@ -56,7 +54,6 @@ pass else: from .modeling_hubert import ( - HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, HubertForCTC, HubertForSequenceClassification, HubertModel, @@ -70,7 +67,6 @@ pass else: from .modeling_tf_hubert import ( - TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFHubertForCTC, TFHubertModel, TFHubertPreTrainedModel, diff --git a/src/transformers/models/hubert/configuration_hubert.py b/src/transformers/models/hubert/configuration_hubert.py index 00a3244a31074d..293b9f76f2cfce 100644 --- a/src/transformers/models/hubert/configuration_hubert.py +++ b/src/transformers/models/hubert/configuration_hubert.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class HubertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index 8ab9465de1026f..3d1d0884c6aebc 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -66,9 +66,6 @@ _SEQ_CLASS_EXPECTED_LOSS = 8.53 -from ..deprecated._archive_maps import HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index 0dc696f8a78917..142ab0961dd5cb 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -46,9 +46,6 @@ _CONFIG_FOR_DOC = "HubertConfig" -from ..deprecated._archive_maps import TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/ibert/__init__.py b/src/transformers/models/ibert/__init__.py index 637eb08eaf412d..3b147e414c2edf 100644 --- a/src/transformers/models/ibert/__init__.py +++ b/src/transformers/models/ibert/__init__.py @@ -17,7 +17,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} +_import_structure = {"configuration_ibert": ["IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): @@ -26,7 +26,6 @@ pass else: _import_structure["modeling_ibert"] = [ - "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", @@ -37,7 +36,7 @@ ] if TYPE_CHECKING: - from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig + from .configuration_ibert import IBertConfig, IBertOnnxConfig try: if not is_torch_available(): @@ -46,7 +45,6 @@ pass else: from .modeling_ibert import ( - IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, diff --git a/src/transformers/models/ibert/configuration_ibert.py b/src/transformers/models/ibert/configuration_ibert.py index 94e040d417ef8d..afa67d6d6acab4 100644 --- a/src/transformers/models/ibert/configuration_ibert.py +++ b/src/transformers/models/ibert/configuration_ibert.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class IBertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`IBertModel`]. It is used to instantiate a I-BERT diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index f06557c2616078..d9dcbb3de86ee9 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -48,9 +48,6 @@ _CONFIG_FOR_DOC = "IBertConfig" -from ..deprecated._archive_maps import IBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class IBertEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. diff --git a/src/transformers/models/idefics/__init__.py b/src/transformers/models/idefics/__init__.py index 68ff40fc18dc24..7a4e8056f540d5 100644 --- a/src/transformers/models/idefics/__init__.py +++ b/src/transformers/models/idefics/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_idefics": ["IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP", "IdeficsConfig"]} +_import_structure = {"configuration_idefics": ["IdeficsConfig"]} try: if not is_vision_available(): @@ -33,7 +33,6 @@ pass else: _import_structure["modeling_idefics"] = [ - "IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST", "IdeficsForVisionText2Text", "IdeficsModel", "IdeficsPreTrainedModel", @@ -42,7 +41,7 @@ if TYPE_CHECKING: - from .configuration_idefics import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP, IdeficsConfig + from .configuration_idefics import IdeficsConfig try: if not is_vision_available(): @@ -59,7 +58,6 @@ pass else: from .modeling_idefics import ( - IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST, IdeficsForVisionText2Text, IdeficsModel, IdeficsPreTrainedModel, diff --git a/src/transformers/models/idefics/configuration_idefics.py b/src/transformers/models/idefics/configuration_idefics.py index 07a92432aee3af..8b61238ed90fb8 100644 --- a/src/transformers/models/idefics/configuration_idefics.py +++ b/src/transformers/models/idefics/configuration_idefics.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class IdeficsVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index a01c2279c15586..622e336fe4034e 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -49,9 +49,6 @@ _CONFIG_FOR_DOC = "IdeficsConfig" -from ..deprecated._archive_maps import IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class IdeficsBaseModelOutputWithPast(ModelOutput): """ diff --git a/src/transformers/models/idefics2/__init__.py b/src/transformers/models/idefics2/__init__.py index 3b1996ef9580c7..1d8d3e4b571df2 100644 --- a/src/transformers/models/idefics2/__init__.py +++ b/src/transformers/models/idefics2/__init__.py @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_idefics2"] = [ - "IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST", "Idefics2ForConditionalGeneration", "Idefics2PreTrainedModel", "Idefics2Model", @@ -60,7 +59,6 @@ pass else: from .modeling_idefics2 import ( - IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST, Idefics2ForConditionalGeneration, Idefics2Model, Idefics2PreTrainedModel, diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index d0e99158062774..15df0aa4ac5383 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -53,11 +53,6 @@ _CONFIG_FOR_DOC = "Idefics2Config" -IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "HuggingFaceM4/idefics2-8b", - # See all IDEFICS2 models at https://huggingface.co/models?filter=idefics2 -] - @dataclass class Idefics2BaseModelOutputWithPast(ModelOutput): diff --git a/src/transformers/models/imagegpt/__init__.py b/src/transformers/models/imagegpt/__init__.py index 7d3e1440da942e..a64dd9affdbe35 100644 --- a/src/transformers/models/imagegpt/__init__.py +++ b/src/transformers/models/imagegpt/__init__.py @@ -17,9 +17,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = { - "configuration_imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig", "ImageGPTOnnxConfig"] -} +_import_structure = {"configuration_imagegpt": ["ImageGPTConfig", "ImageGPTOnnxConfig"]} try: if not is_vision_available(): @@ -37,7 +35,6 @@ pass else: _import_structure["modeling_imagegpt"] = [ - "IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "ImageGPTForCausalImageModeling", "ImageGPTForImageClassification", "ImageGPTModel", @@ -47,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig, ImageGPTOnnxConfig + from .configuration_imagegpt import ImageGPTConfig, ImageGPTOnnxConfig try: if not is_vision_available(): @@ -65,7 +62,6 @@ pass else: from .modeling_imagegpt import ( - IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST, ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, diff --git a/src/transformers/models/imagegpt/configuration_imagegpt.py b/src/transformers/models/imagegpt/configuration_imagegpt.py index 2a8d62f9b5e629..906e61eeef5c24 100644 --- a/src/transformers/models/imagegpt/configuration_imagegpt.py +++ b/src/transformers/models/imagegpt/configuration_imagegpt.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ImageGPTConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ImageGPTModel`] or a [`TFImageGPTModel`]. It is diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index 81b41078633aa9..c0b0a83c24d66f 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -43,9 +43,6 @@ _CONFIG_FOR_DOC = "ImageGPTConfig" -from ..deprecated._archive_maps import IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path): """ Load tf checkpoints in a pytorch model diff --git a/src/transformers/models/informer/__init__.py b/src/transformers/models/informer/__init__.py index 478ad56a72ba3c..fba309ee2b52b1 100644 --- a/src/transformers/models/informer/__init__.py +++ b/src/transformers/models/informer/__init__.py @@ -18,10 +18,7 @@ _import_structure = { - "configuration_informer": [ - "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "InformerConfig", - ], + "configuration_informer": ["InformerConfig"], } try: @@ -31,7 +28,6 @@ pass else: _import_structure["modeling_informer"] = [ - "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", @@ -39,7 +35,7 @@ if TYPE_CHECKING: - from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig + from .configuration_informer import InformerConfig try: if not is_torch_available(): @@ -48,7 +44,6 @@ pass else: from .modeling_informer import ( - INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, diff --git a/src/transformers/models/informer/configuration_informer.py b/src/transformers/models/informer/configuration_informer.py index 93b3f3556c97fe..d933ac6fd530fe 100644 --- a/src/transformers/models/informer/configuration_informer.py +++ b/src/transformers/models/informer/configuration_informer.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class InformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index cf20477f375dd9..844bf474fafe48 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -40,9 +40,6 @@ _CONFIG_FOR_DOC = "InformerConfig" -from ..deprecated._archive_maps import INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Informer class InformerFeatureEmbedder(nn.Module): """ diff --git a/src/transformers/models/instructblip/__init__.py b/src/transformers/models/instructblip/__init__.py index 201db4d272d4b7..093b9f00f6fc4d 100644 --- a/src/transformers/models/instructblip/__init__.py +++ b/src/transformers/models/instructblip/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_instructblip": [ - "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_instructblip"] = [ - "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", @@ -42,7 +40,6 @@ if TYPE_CHECKING: from .configuration_instructblip import ( - INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, @@ -56,7 +53,6 @@ pass else: from .modeling_instructblip import ( - INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, diff --git a/src/transformers/models/instructblip/configuration_instructblip.py b/src/transformers/models/instructblip/configuration_instructblip.py index 152389d337f19b..23b743ecb52d91 100644 --- a/src/transformers/models/instructblip/configuration_instructblip.py +++ b/src/transformers/models/instructblip/configuration_instructblip.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class InstructBlipVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`InstructBlipVisionModel`]. It is used to diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index 52f8fa610a948e..291db19721ed6d 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -48,9 +48,6 @@ _CHECKPOINT_FOR_DOC = "Salesforce/instructblip-flan-t5-xl" -from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGenerationModelOutput with Blip2->InstructBlip class InstructBlipForConditionalGenerationModelOutput(ModelOutput): diff --git a/src/transformers/models/jukebox/__init__.py b/src/transformers/models/jukebox/__init__.py index d96fba4d47b5e7..441b11329cf8ff 100644 --- a/src/transformers/models/jukebox/__init__.py +++ b/src/transformers/models/jukebox/__init__.py @@ -19,7 +19,6 @@ _import_structure = { "configuration_jukebox": [ - "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", @@ -34,7 +33,6 @@ pass else: _import_structure["modeling_jukebox"] = [ - "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", @@ -43,7 +41,6 @@ if TYPE_CHECKING: from .configuration_jukebox import ( - JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, @@ -57,7 +54,6 @@ pass else: from .modeling_jukebox import ( - JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, diff --git a/src/transformers/models/jukebox/configuration_jukebox.py b/src/transformers/models/jukebox/configuration_jukebox.py index 4c680513102488..19203732a9e782 100644 --- a/src/transformers/models/jukebox/configuration_jukebox.py +++ b/src/transformers/models/jukebox/configuration_jukebox.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - _LARGE_ATTENTION = [ "block_attn", "transpose_block_attn", diff --git a/src/transformers/models/jukebox/modeling_jukebox.py b/src/transformers/models/jukebox/modeling_jukebox.py index 282cfdc5b4439b..9af8dbd6847170 100755 --- a/src/transformers/models/jukebox/modeling_jukebox.py +++ b/src/transformers/models/jukebox/modeling_jukebox.py @@ -34,9 +34,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering diff --git a/src/transformers/models/kosmos2/__init__.py b/src/transformers/models/kosmos2/__init__.py index 8d26304c72e199..171a5cc7071e53 100644 --- a/src/transformers/models/kosmos2/__init__.py +++ b/src/transformers/models/kosmos2/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_kosmos2": ["KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Kosmos2Config"], + "configuration_kosmos2": ["Kosmos2Config"], "processing_kosmos2": ["Kosmos2Processor"], } @@ -34,7 +34,6 @@ pass else: _import_structure["modeling_kosmos2"] = [ - "KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST", "Kosmos2ForConditionalGeneration", "Kosmos2Model", "Kosmos2PreTrainedModel", @@ -42,7 +41,7 @@ if TYPE_CHECKING: - from .configuration_kosmos2 import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP, Kosmos2Config + from .configuration_kosmos2 import Kosmos2Config from .processing_kosmos2 import Kosmos2Processor try: @@ -52,7 +51,6 @@ pass else: from .modeling_kosmos2 import ( - KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST, Kosmos2ForConditionalGeneration, Kosmos2Model, Kosmos2PreTrainedModel, diff --git a/src/transformers/models/kosmos2/configuration_kosmos2.py b/src/transformers/models/kosmos2/configuration_kosmos2.py index ae5afd637b28be..f922b6093406b4 100644 --- a/src/transformers/models/kosmos2/configuration_kosmos2.py +++ b/src/transformers/models/kosmos2/configuration_kosmos2.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Kosmos2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 2e3a945c331592..161ebbf95c1fd6 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -47,9 +47,6 @@ _CONFIG_FOR_DOC = Kosmos2Config -from ..deprecated._archive_maps import KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. diff --git a/src/transformers/models/layoutlm/__init__.py b/src/transformers/models/layoutlm/__init__.py index e172dd1dc79101..070b42368ef958 100644 --- a/src/transformers/models/layoutlm/__init__.py +++ b/src/transformers/models/layoutlm/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMOnnxConfig"], + "configuration_layoutlm": ["LayoutLMConfig", "LayoutLMOnnxConfig"], "tokenization_layoutlm": ["LayoutLMTokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_layoutlm"] = [ - "LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMForMaskedLM", "LayoutLMForSequenceClassification", "LayoutLMForTokenClassification", @@ -59,7 +58,6 @@ pass else: _import_structure["modeling_tf_layoutlm"] = [ - "TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLayoutLMForMaskedLM", "TFLayoutLMForSequenceClassification", "TFLayoutLMForTokenClassification", @@ -71,7 +69,7 @@ if TYPE_CHECKING: - from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMOnnxConfig + from .configuration_layoutlm import LayoutLMConfig, LayoutLMOnnxConfig from .tokenization_layoutlm import LayoutLMTokenizer try: @@ -89,7 +87,6 @@ pass else: from .modeling_layoutlm import ( - LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, @@ -104,7 +101,6 @@ pass else: from .modeling_tf_layoutlm import ( - TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, diff --git a/src/transformers/models/layoutlm/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py index c7c6886fedbec5..88081831bc5166 100644 --- a/src/transformers/models/layoutlm/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LayoutLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 98765b3f75ff29..dbaa1aff8101c6 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -44,9 +44,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/layoutlm-base-uncased" -from ..deprecated._archive_maps import LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LayoutLMLayerNorm = nn.LayerNorm diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index 0125fc3ed60232..42bcbe00d94d08 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -55,9 +55,6 @@ _CONFIG_FOR_DOC = "LayoutLMConfig" -from ..deprecated._archive_maps import TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFLayoutLMEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/layoutlmv2/__init__.py b/src/transformers/models/layoutlmv2/__init__.py index 9eccb238780f7e..1c45a9f76abb3a 100644 --- a/src/transformers/models/layoutlmv2/__init__.py +++ b/src/transformers/models/layoutlmv2/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], + "configuration_layoutlmv2": ["LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } @@ -53,7 +53,6 @@ pass else: _import_structure["modeling_layoutlmv2"] = [ - "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", @@ -63,7 +62,7 @@ ] if TYPE_CHECKING: - from .configuration_layoutlmv2 import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv2Config + from .configuration_layoutlmv2 import LayoutLMv2Config from .processing_layoutlmv2 import LayoutLMv2Processor from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer @@ -90,7 +89,6 @@ pass else: from .modeling_layoutlmv2 import ( - LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, diff --git a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py index 4528923a5d7598..6a2eb9ff39d762 100644 --- a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - # soft dependency if is_detectron2_available(): import detectron2 diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index e3c58fa47e51ad..3e3b7f2b149ccb 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -54,9 +54,6 @@ _CONFIG_FOR_DOC = "LayoutLMv2Config" -from ..deprecated._archive_maps import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class LayoutLMv2Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/layoutlmv3/__init__.py b/src/transformers/models/layoutlmv3/__init__.py index ca1c31091e8b6e..a8ef90906e7a5b 100644 --- a/src/transformers/models/layoutlmv3/__init__.py +++ b/src/transformers/models/layoutlmv3/__init__.py @@ -26,7 +26,6 @@ _import_structure = { "configuration_layoutlmv3": [ - "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv3Config", "LayoutLMv3OnnxConfig", ], @@ -49,7 +48,6 @@ pass else: _import_structure["modeling_layoutlmv3"] = [ - "LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv3ForQuestionAnswering", "LayoutLMv3ForSequenceClassification", "LayoutLMv3ForTokenClassification", @@ -64,7 +62,6 @@ pass else: _import_structure["modeling_tf_layoutlmv3"] = [ - "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLayoutLMv3ForQuestionAnswering", "TFLayoutLMv3ForSequenceClassification", "TFLayoutLMv3ForTokenClassification", @@ -84,7 +81,6 @@ if TYPE_CHECKING: from .configuration_layoutlmv3 import ( - LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv3Config, LayoutLMv3OnnxConfig, ) @@ -106,7 +102,6 @@ pass else: from .modeling_layoutlmv3 import ( - LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, @@ -121,7 +116,6 @@ pass else: from .modeling_tf_layoutlmv3 import ( - TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMv3ForQuestionAnswering, TFLayoutLMv3ForSequenceClassification, TFLayoutLMv3ForTokenClassification, diff --git a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py index d6f9b6c9f10f9a..592adaa2f57ecf 100644 --- a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py @@ -33,9 +33,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LayoutLMv3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index 0db2bd775fe439..43ba0f522ff604 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -42,9 +42,6 @@ _CONFIG_FOR_DOC = "LayoutLMv3Config" -from ..deprecated._archive_maps import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LAYOUTLMV3_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 531eb59d876359..7479754f0f833c 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -58,9 +58,6 @@ ] -from ..deprecated._archive_maps import TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/led/__init__.py b/src/transformers/models/led/__init__.py index dd1c53b886eb37..2dbd59dcc34705 100644 --- a/src/transformers/models/led/__init__.py +++ b/src/transformers/models/led/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig"], + "configuration_led": ["LEDConfig"], "tokenization_led": ["LEDTokenizer"], } @@ -42,7 +42,6 @@ pass else: _import_structure["modeling_led"] = [ - "LED_PRETRAINED_MODEL_ARCHIVE_LIST", "LEDForConditionalGeneration", "LEDForQuestionAnswering", "LEDForSequenceClassification", @@ -61,7 +60,7 @@ if TYPE_CHECKING: - from .configuration_led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig + from .configuration_led import LEDConfig from .tokenization_led import LEDTokenizer try: @@ -79,7 +78,6 @@ pass else: from .modeling_led import ( - LED_PRETRAINED_MODEL_ARCHIVE_LIST, LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, diff --git a/src/transformers/models/led/configuration_led.py b/src/transformers/models/led/configuration_led.py index 59a2793cc89e08..9933ef225367e2 100644 --- a/src/transformers/models/led/configuration_led.py +++ b/src/transformers/models/led/configuration_led.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LED_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LEDConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index b2a5f440e0f25d..982de403c25c23 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "LEDConfig" -from ..deprecated._archive_maps import LED_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. diff --git a/src/transformers/models/levit/__init__.py b/src/transformers/models/levit/__init__.py index 84adf04084e61d..266889963c90f2 100644 --- a/src/transformers/models/levit/__init__.py +++ b/src/transformers/models/levit/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig", "LevitOnnxConfig"]} +_import_structure = {"configuration_levit": ["LevitConfig", "LevitOnnxConfig"]} try: if not is_vision_available(): @@ -34,7 +34,6 @@ pass else: _import_structure["modeling_levit"] = [ - "LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "LevitForImageClassification", "LevitForImageClassificationWithTeacher", "LevitModel", @@ -43,7 +42,7 @@ if TYPE_CHECKING: - from .configuration_levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig, LevitOnnxConfig + from .configuration_levit import LevitConfig, LevitOnnxConfig try: if not is_vision_available(): @@ -61,7 +60,6 @@ pass else: from .modeling_levit import ( - LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index fd840f519f26f9..197e5fe5ec2c0d 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LevitConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT diff --git a/src/transformers/models/levit/modeling_levit.py b/src/transformers/models/levit/modeling_levit.py index 00dccf9eff7362..0de3789c2c3213 100644 --- a/src/transformers/models/levit/modeling_levit.py +++ b/src/transformers/models/levit/modeling_levit.py @@ -48,9 +48,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class LevitForImageClassificationWithTeacherOutput(ModelOutput): """ diff --git a/src/transformers/models/lilt/__init__.py b/src/transformers/models/lilt/__init__.py index 50c493e352bc75..5b73f3aebd9c2f 100644 --- a/src/transformers/models/lilt/__init__.py +++ b/src/transformers/models/lilt/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], + "configuration_lilt": ["LiltConfig"], } try: @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_lilt"] = [ - "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", @@ -37,7 +36,7 @@ ] if TYPE_CHECKING: - from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig + from .configuration_lilt import LiltConfig try: if not is_torch_available(): @@ -46,7 +45,6 @@ pass else: from .modeling_lilt import ( - LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, diff --git a/src/transformers/models/lilt/configuration_lilt.py b/src/transformers/models/lilt/configuration_lilt.py index f1cfa98c6c3c13..fdfa2192a143ff 100644 --- a/src/transformers/models/lilt/configuration_lilt.py +++ b/src/transformers/models/lilt/configuration_lilt.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LiltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LiltModel`]. It is used to instantiate a LiLT diff --git a/src/transformers/models/lilt/modeling_lilt.py b/src/transformers/models/lilt/modeling_lilt.py index adf8edcdc2ab71..4e4ee12c3dec1c 100644 --- a/src/transformers/models/lilt/modeling_lilt.py +++ b/src/transformers/models/lilt/modeling_lilt.py @@ -41,9 +41,6 @@ _CONFIG_FOR_DOC = "LiltConfig" -from ..deprecated._archive_maps import LILT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class LiltTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() diff --git a/src/transformers/models/llama/__init__.py b/src/transformers/models/llama/__init__.py index b5262941cb0e5c..4b8a33118ccc8e 100644 --- a/src/transformers/models/llama/__init__.py +++ b/src/transformers/models/llama/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], + "configuration_llama": ["LlamaConfig"], } try: @@ -67,7 +67,7 @@ if TYPE_CHECKING: - from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig + from .configuration_llama import LlamaConfig try: if not is_sentencepiece_available(): diff --git a/src/transformers/models/llama/configuration_llama.py b/src/transformers/models/llama/configuration_llama.py index 66d668be882658..b406b12fc702c5 100644 --- a/src/transformers/models/llama/configuration_llama.py +++ b/src/transformers/models/llama/configuration_llama.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LlamaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA diff --git a/src/transformers/models/llava/__init__.py b/src/transformers/models/llava/__init__.py index 79f7b3ea309559..3dabdc1f678f03 100644 --- a/src/transformers/models/llava/__init__.py +++ b/src/transformers/models/llava/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_llava": ["LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaConfig"], + "configuration_llava": ["LlavaConfig"], "processing_llava": ["LlavaProcessor"], } @@ -29,14 +29,13 @@ pass else: _import_structure["modeling_llava"] = [ - "LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", "LlavaForConditionalGeneration", "LlavaPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_llava import LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaConfig + from .configuration_llava import LlavaConfig from .processing_llava import LlavaProcessor try: @@ -46,7 +45,6 @@ pass else: from .modeling_llava import ( - LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, LlavaForConditionalGeneration, LlavaPreTrainedModel, ) diff --git a/src/transformers/models/llava/configuration_llava.py b/src/transformers/models/llava/configuration_llava.py index 8c322f41de7de2..f14601450578f8 100644 --- a/src/transformers/models/llava/configuration_llava.py +++ b/src/transformers/models/llava/configuration_llava.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LlavaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index 0b26a9ad27fc9b..0426776beed1ca 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -40,9 +40,6 @@ _CONFIG_FOR_DOC = "LlavaConfig" -from ..deprecated._archive_maps import LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Llava class LlavaCausalLMOutputWithPast(ModelOutput): diff --git a/src/transformers/models/llava_next/__init__.py b/src/transformers/models/llava_next/__init__.py index d6cc871565a6b2..0fb2ff2b6f28fa 100644 --- a/src/transformers/models/llava_next/__init__.py +++ b/src/transformers/models/llava_next/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_llava_next": ["LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaNextConfig"], + "configuration_llava_next": ["LlavaNextConfig"], "processing_llava_next": ["LlavaNextProcessor"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_llava_next"] = [ - "LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "LlavaNextForConditionalGeneration", "LlavaNextPreTrainedModel", ] @@ -44,7 +43,7 @@ if TYPE_CHECKING: - from .configuration_llava_next import LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaNextConfig + from .configuration_llava_next import LlavaNextConfig from .processing_llava_next import LlavaNextProcessor try: @@ -54,7 +53,6 @@ pass else: from .modeling_llava_next import ( - LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST, LlavaNextForConditionalGeneration, LlavaNextPreTrainedModel, ) diff --git a/src/transformers/models/llava_next/configuration_llava_next.py b/src/transformers/models/llava_next/configuration_llava_next.py index 1097a716bad95d..21f3d499e34bd7 100644 --- a/src/transformers/models/llava_next/configuration_llava_next.py +++ b/src/transformers/models/llava_next/configuration_llava_next.py @@ -20,10 +20,6 @@ logger = logging.get_logger(__name__) -LLAVA_NEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "llava-hf/llava-v1.6-mistral-7b-hf": "https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf/resolve/main/config.json", -} - class LlavaNextConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/llava_next/modeling_llava_next.py b/src/transformers/models/llava_next/modeling_llava_next.py index 5bea1118af0ead..c94fdd3a4fb595 100644 --- a/src/transformers/models/llava_next/modeling_llava_next.py +++ b/src/transformers/models/llava_next/modeling_llava_next.py @@ -41,11 +41,6 @@ _CONFIG_FOR_DOC = "LlavaNextConfig" -LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "llava-hf/llava-v1.6-mistral-7b-hf", - # See all LLaVA-NeXT models at https://huggingface.co/models?filter=llava_next -] - def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ diff --git a/src/transformers/models/longformer/__init__.py b/src/transformers/models/longformer/__init__.py index 66ef7c953cff43..ddbd8a68ecc6dc 100644 --- a/src/transformers/models/longformer/__init__.py +++ b/src/transformers/models/longformer/__init__.py @@ -25,7 +25,6 @@ _import_structure = { "configuration_longformer": [ - "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerOnnxConfig", ], @@ -47,7 +46,6 @@ pass else: _import_structure["modeling_longformer"] = [ - "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "LongformerForMaskedLM", "LongformerForMultipleChoice", "LongformerForQuestionAnswering", @@ -65,7 +63,6 @@ pass else: _import_structure["modeling_tf_longformer"] = [ - "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", @@ -79,7 +76,6 @@ if TYPE_CHECKING: from .configuration_longformer import ( - LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) @@ -100,7 +96,6 @@ pass else: from .modeling_longformer import ( - LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, @@ -118,7 +113,6 @@ pass else: from .modeling_tf_longformer import ( - TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, diff --git a/src/transformers/models/longformer/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py index 7dce8a74a631c7..0cafbf5d6936cc 100644 --- a/src/transformers/models/longformer/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -29,9 +29,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LongformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index f2da2a22b70d6a..b12e2927593f3d 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -43,9 +43,6 @@ _CONFIG_FOR_DOC = "LongformerConfig" -from ..deprecated._archive_maps import LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class LongformerBaseModelOutput(ModelOutput): """ diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index 907fbbddf1e68f..be3ccf761b2e21 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -57,9 +57,6 @@ LARGE_NEGATIVE = -1e8 -from ..deprecated._archive_maps import TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TFLongformerBaseModelOutput(ModelOutput): """ diff --git a/src/transformers/models/longt5/__init__.py b/src/transformers/models/longt5/__init__.py index 93b9121c33f393..97d2bbe8ccd330 100644 --- a/src/transformers/models/longt5/__init__.py +++ b/src/transformers/models/longt5/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], + "configuration_longt5": ["LongT5Config", "LongT5OnnxConfig"], } try: @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_longt5"] = [ - "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", @@ -49,7 +48,7 @@ if TYPE_CHECKING: - from .configuration_longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config, LongT5OnnxConfig + from .configuration_longt5 import LongT5Config, LongT5OnnxConfig try: if not is_torch_available(): @@ -58,7 +57,6 @@ pass else: from .modeling_longt5 import ( - LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, diff --git a/src/transformers/models/longt5/configuration_longt5.py b/src/transformers/models/longt5/configuration_longt5.py index f6e8284ed0af84..839428f27ef682 100644 --- a/src/transformers/models/longt5/configuration_longt5.py +++ b/src/transformers/models/longt5/configuration_longt5.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LongT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index e16e0951208f77..314785b82c193e 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -52,8 +52,6 @@ # TODO: Update before the merge -from ..deprecated._archive_maps import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: """Pad a tensor so that a sequence length will be a multiple of `block_len`""" diff --git a/src/transformers/models/luke/__init__.py b/src/transformers/models/luke/__init__.py index 91ef5f22221856..5ae6f488116ff4 100644 --- a/src/transformers/models/luke/__init__.py +++ b/src/transformers/models/luke/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], + "configuration_luke": ["LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_luke"] = [ - "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", @@ -44,7 +43,7 @@ if TYPE_CHECKING: - from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig + from .configuration_luke import LukeConfig from .tokenization_luke import LukeTokenizer try: @@ -54,7 +53,6 @@ pass else: from .modeling_luke import ( - LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, diff --git a/src/transformers/models/luke/configuration_luke.py b/src/transformers/models/luke/configuration_luke.py index 257c9a25535f33..d3f14decac9664 100644 --- a/src/transformers/models/luke/configuration_luke.py +++ b/src/transformers/models/luke/configuration_luke.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LukeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LukeModel`]. It is used to instantiate a LUKE diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 3523e739f5b69f..803f4396a2b6a1 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -44,9 +44,6 @@ _CHECKPOINT_FOR_DOC = "studio-ousia/luke-base" -from ..deprecated._archive_maps import LUKE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling): """ diff --git a/src/transformers/models/lxmert/__init__.py b/src/transformers/models/lxmert/__init__.py index 4f7e775431dd0a..007beb4ecd2dcf 100644 --- a/src/transformers/models/lxmert/__init__.py +++ b/src/transformers/models/lxmert/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"], + "configuration_lxmert": ["LxmertConfig"], "tokenization_lxmert": ["LxmertTokenizer"], } @@ -59,7 +59,6 @@ pass else: _import_structure["modeling_tf_lxmert"] = [ - "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", @@ -69,7 +68,7 @@ if TYPE_CHECKING: - from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig + from .configuration_lxmert import LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: @@ -103,7 +102,6 @@ pass else: from .modeling_tf_lxmert import ( - TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, diff --git a/src/transformers/models/lxmert/configuration_lxmert.py b/src/transformers/models/lxmert/configuration_lxmert.py index b79fb67908d27e..ec3a7c1864becc 100644 --- a/src/transformers/models/lxmert/configuration_lxmert.py +++ b/src/transformers/models/lxmert/configuration_lxmert.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class LxmertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index c4741196031a79..2b51fb7510308d 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -54,9 +54,6 @@ _CONFIG_FOR_DOC = "LxmertConfig" -from ..deprecated._archive_maps import TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TFLxmertModelOutput(ModelOutput): """ diff --git a/src/transformers/models/m2m_100/__init__.py b/src/transformers/models/m2m_100/__init__.py index db2f0223bf04d6..45232f1390a53b 100644 --- a/src/transformers/models/m2m_100/__init__.py +++ b/src/transformers/models/m2m_100/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], + "configuration_m2m_100": ["M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_m2m_100"] = [ - "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", @@ -37,7 +36,7 @@ if TYPE_CHECKING: - from .configuration_m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config, M2M100OnnxConfig + from .configuration_m2m_100 import M2M100Config, M2M100OnnxConfig from .tokenization_m2m_100 import M2M100Tokenizer try: @@ -47,7 +46,6 @@ pass else: from .modeling_m2m_100 import ( - M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, M2M100ForConditionalGeneration, M2M100Model, M2M100PreTrainedModel, diff --git a/src/transformers/models/m2m_100/configuration_m2m_100.py b/src/transformers/models/m2m_100/configuration_m2m_100.py index b211527e8088b4..73840e5132d348 100644 --- a/src/transformers/models/m2m_100/configuration_m2m_100.py +++ b/src/transformers/models/m2m_100/configuration_m2m_100.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class M2M100Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`M2M100Model`]. It is used to instantiate an diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 1080d28c946055..9e9c68d68fb1b0 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -56,9 +56,6 @@ _CHECKPOINT_FOR_DOC = "facebook/m2m100_418M" -from ..deprecated._archive_maps import M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ diff --git a/src/transformers/models/mamba/__init__.py b/src/transformers/models/mamba/__init__.py index 7a1c142e05d51e..80cb8e1c68a21d 100644 --- a/src/transformers/models/mamba/__init__.py +++ b/src/transformers/models/mamba/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_mamba": ["MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MambaConfig", "MambaOnnxConfig"], + "configuration_mamba": ["MambaConfig", "MambaOnnxConfig"], } try: @@ -32,7 +32,6 @@ pass else: _import_structure["modeling_mamba"] = [ - "MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST", "MambaForCausalLM", "MambaModel", "MambaPreTrainedModel", @@ -40,7 +39,7 @@ if TYPE_CHECKING: - from .configuration_mamba import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP, MambaConfig, MambaOnnxConfig + from .configuration_mamba import MambaConfig, MambaOnnxConfig try: if not is_torch_available(): @@ -49,7 +48,6 @@ pass else: from .modeling_mamba import ( - MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST, MambaForCausalLM, MambaModel, MambaPreTrainedModel, diff --git a/src/transformers/models/mamba/configuration_mamba.py b/src/transformers/models/mamba/configuration_mamba.py index b3e9b4eb946b93..460c1f3b32acbf 100644 --- a/src/transformers/models/mamba/configuration_mamba.py +++ b/src/transformers/models/mamba/configuration_mamba.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MambaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MambaModel`]. It is used to instantiate a MAMBA diff --git a/src/transformers/models/mamba/modeling_mamba.py b/src/transformers/models/mamba/modeling_mamba.py index 8f19c361269e27..4834fd62473195 100644 --- a/src/transformers/models/mamba/modeling_mamba.py +++ b/src/transformers/models/mamba/modeling_mamba.py @@ -57,9 +57,6 @@ _CONFIG_FOR_DOC = "MambaConfig" -from ..deprecated._archive_maps import MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class MambaCache: """ Arguments: diff --git a/src/transformers/models/marian/__init__.py b/src/transformers/models/marian/__init__.py index 56f0a4e86afba2..e3a8c473aeeedf 100644 --- a/src/transformers/models/marian/__init__.py +++ b/src/transformers/models/marian/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_marian": ["MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarianConfig", "MarianOnnxConfig"], + "configuration_marian": ["MarianConfig", "MarianOnnxConfig"], } try: @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_marian"] = [ - "MARIAN_PRETRAINED_MODEL_ARCHIVE_LIST", "MarianForCausalLM", "MarianModel", "MarianMTModel", @@ -67,7 +66,7 @@ _import_structure["modeling_flax_marian"] = ["FlaxMarianModel", "FlaxMarianMTModel", "FlaxMarianPreTrainedModel"] if TYPE_CHECKING: - from .configuration_marian import MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP, MarianConfig, MarianOnnxConfig + from .configuration_marian import MarianConfig, MarianOnnxConfig try: if not is_sentencepiece_available(): @@ -84,7 +83,6 @@ pass else: from .modeling_marian import ( - MARIAN_PRETRAINED_MODEL_ARCHIVE_LIST, MarianForCausalLM, MarianModel, MarianMTModel, diff --git a/src/transformers/models/markuplm/__init__.py b/src/transformers/models/markuplm/__init__.py index f8df88ce16f683..368834f13e98f8 100644 --- a/src/transformers/models/markuplm/__init__.py +++ b/src/transformers/models/markuplm/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_markuplm": ["MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarkupLMConfig"], + "configuration_markuplm": ["MarkupLMConfig"], "feature_extraction_markuplm": ["MarkupLMFeatureExtractor"], "processing_markuplm": ["MarkupLMProcessor"], "tokenization_markuplm": ["MarkupLMTokenizer"], @@ -38,7 +38,6 @@ pass else: _import_structure["modeling_markuplm"] = [ - "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST", "MarkupLMForQuestionAnswering", "MarkupLMForSequenceClassification", "MarkupLMForTokenClassification", @@ -48,7 +47,7 @@ if TYPE_CHECKING: - from .configuration_markuplm import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig + from .configuration_markuplm import MarkupLMConfig from .feature_extraction_markuplm import MarkupLMFeatureExtractor from .processing_markuplm import MarkupLMProcessor from .tokenization_markuplm import MarkupLMTokenizer @@ -68,7 +67,6 @@ pass else: from .modeling_markuplm import ( - MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST, MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py index aeb80ae51f96ba..581cc0f349c3e7 100644 --- a/src/transformers/models/markuplm/configuration_markuplm.py +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MarkupLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a diff --git a/src/transformers/models/markuplm/modeling_markuplm.py b/src/transformers/models/markuplm/modeling_markuplm.py index 707f612459ddc0..29f433ae0f1fe7 100755 --- a/src/transformers/models/markuplm/modeling_markuplm.py +++ b/src/transformers/models/markuplm/modeling_markuplm.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "MarkupLMConfig" -from ..deprecated._archive_maps import MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class XPathEmbeddings(nn.Module): """Construct the embeddings from xpath tags and subscripts. diff --git a/src/transformers/models/mask2former/__init__.py b/src/transformers/models/mask2former/__init__.py index d6db4a478ac1d8..7ede863452bc72 100644 --- a/src/transformers/models/mask2former/__init__.py +++ b/src/transformers/models/mask2former/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_mask2former": [ - "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Mask2FormerConfig", - ], + "configuration_mask2former": ["Mask2FormerConfig"], } try: @@ -38,14 +35,13 @@ pass else: _import_structure["modeling_mask2former"] = [ - "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_mask2former import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Mask2FormerConfig + from .configuration_mask2former import Mask2FormerConfig try: if not is_vision_available(): @@ -62,7 +58,6 @@ pass else: from .modeling_mask2former import ( - MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, Mask2FormerForUniversalSegmentation, Mask2FormerModel, Mask2FormerPreTrainedModel, diff --git a/src/transformers/models/mask2former/configuration_mask2former.py b/src/transformers/models/mask2former/configuration_mask2former.py index f0d13b8e030ed1..ed97ed24582363 100644 --- a/src/transformers/models/mask2former/configuration_mask2former.py +++ b/src/transformers/models/mask2former/configuration_mask2former.py @@ -18,7 +18,6 @@ from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING -from ..deprecated._archive_maps import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 logger = logging.get_logger(__name__) diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index 3a9a74345363a6..f37b5b14fcaab0 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -55,9 +55,6 @@ _IMAGE_PROCESSOR_FOR_DOC = "Mask2FormerImageProcessor" -from ..deprecated._archive_maps import MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class Mask2FormerPixelDecoderOutput(ModelOutput): """ diff --git a/src/transformers/models/maskformer/__init__.py b/src/transformers/models/maskformer/__init__.py index efb2290f2c9ceb..78aa54a4656150 100644 --- a/src/transformers/models/maskformer/__init__.py +++ b/src/transformers/models/maskformer/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], + "configuration_maskformer": ["MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } @@ -38,7 +38,6 @@ pass else: _import_structure["modeling_maskformer"] = [ - "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", @@ -50,7 +49,7 @@ ] if TYPE_CHECKING: - from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig + from .configuration_maskformer import MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: @@ -68,7 +67,6 @@ pass else: from .modeling_maskformer import ( - MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, diff --git a/src/transformers/models/maskformer/configuration_maskformer.py b/src/transformers/models/maskformer/configuration_maskformer.py index 653350ca056dda..f82fe199b7bc34 100644 --- a/src/transformers/models/maskformer/configuration_maskformer.py +++ b/src/transformers/models/maskformer/configuration_maskformer.py @@ -18,7 +18,6 @@ from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING -from ..deprecated._archive_maps import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 from ..detr import DetrConfig from ..swin import SwinConfig diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index 4419a36e9f840a..74cc6cc4c9e9e3 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -58,9 +58,6 @@ _CHECKPOINT_FOR_DOC = "facebook/maskformer-swin-base-ade" -from ..deprecated._archive_maps import MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput class DetrDecoderOutput(BaseModelOutputWithCrossAttentions): diff --git a/src/transformers/models/mbart/__init__.py b/src/transformers/models/mbart/__init__.py index bae4593c87d89c..12575fcab74036 100644 --- a/src/transformers/models/mbart/__init__.py +++ b/src/transformers/models/mbart/__init__.py @@ -24,7 +24,7 @@ ) -_import_structure = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} +_import_structure = {"configuration_mbart": ["MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): @@ -49,7 +49,6 @@ pass else: _import_structure["modeling_mbart"] = [ - "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", @@ -86,7 +85,7 @@ if TYPE_CHECKING: - from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig + from .configuration_mbart import MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): @@ -111,7 +110,6 @@ pass else: from .modeling_mbart import ( - MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, diff --git a/src/transformers/models/mega/__init__.py b/src/transformers/models/mega/__init__.py index 728499ef2d385f..3e3b204d8b1727 100644 --- a/src/transformers/models/mega/__init__.py +++ b/src/transformers/models/mega/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], + "configuration_mega": ["MegaConfig", "MegaOnnxConfig"], } try: @@ -32,7 +32,6 @@ pass else: _import_structure["modeling_mega"] = [ - "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", "MegaForCausalLM", "MegaForMaskedLM", "MegaForMultipleChoice", @@ -44,7 +43,7 @@ ] if TYPE_CHECKING: - from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig + from .configuration_mega import MegaConfig, MegaOnnxConfig try: if not is_torch_available(): @@ -53,7 +52,6 @@ pass else: from .modeling_mega import ( - MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, diff --git a/src/transformers/models/mega/configuration_mega.py b/src/transformers/models/mega/configuration_mega.py index 993a21cf7035d6..8287a3938e2445 100644 --- a/src/transformers/models/mega/configuration_mega.py +++ b/src/transformers/models/mega/configuration_mega.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MegaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega diff --git a/src/transformers/models/mega/modeling_mega.py b/src/transformers/models/mega/modeling_mega.py index 069c717a737572..65fff1cd49735a 100644 --- a/src/transformers/models/mega/modeling_mega.py +++ b/src/transformers/models/mega/modeling_mega.py @@ -51,9 +51,6 @@ _CONFIG_FOR_DOC = "MegaConfig" -from ..deprecated._archive_maps import MEGA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class MegaEmbeddings(nn.Module): """ Mega's basic implementation does not incorporate token type embeddings, so this is a stripped-down version of diff --git a/src/transformers/models/megatron_bert/__init__.py b/src/transformers/models/megatron_bert/__init__.py index 477802fdc0098d..259e56c25b59a4 100644 --- a/src/transformers/models/megatron_bert/__init__.py +++ b/src/transformers/models/megatron_bert/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], + "configuration_megatron_bert": ["MegatronBertConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_megatron_bert"] = [ - "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", @@ -41,7 +40,7 @@ ] if TYPE_CHECKING: - from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig + from .configuration_megatron_bert import MegatronBertConfig try: if not is_torch_available(): @@ -50,7 +49,6 @@ pass else: from .modeling_megatron_bert import ( - MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, diff --git a/src/transformers/models/megatron_bert/configuration_megatron_bert.py b/src/transformers/models/megatron_bert/configuration_megatron_bert.py index 177bc146a22261..d3be4db99bcbb2 100644 --- a/src/transformers/models/megatron_bert/configuration_megatron_bert.py +++ b/src/transformers/models/megatron_bert/configuration_megatron_bert.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MegatronBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MegatronBertModel`]. It is used to instantiate a diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index a9d228bf3bb652..270caf98564320 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -58,9 +58,6 @@ _CHECKPOINT_FOR_DOC = "nvidia/megatron-bert-cased-345m" -from ..deprecated._archive_maps import MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_megatron_bert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/mgp_str/__init__.py b/src/transformers/models/mgp_str/__init__.py index 1bb9ae50b291cf..901425ca45d61a 100644 --- a/src/transformers/models/mgp_str/__init__.py +++ b/src/transformers/models/mgp_str/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], + "configuration_mgp_str": ["MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } @@ -33,14 +33,13 @@ pass else: _import_structure["modeling_mgp_str"] = [ - "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: - from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig + from .configuration_mgp_str import MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer @@ -51,7 +50,6 @@ pass else: from .modeling_mgp_str import ( - MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, diff --git a/src/transformers/models/mgp_str/configuration_mgp_str.py b/src/transformers/models/mgp_str/configuration_mgp_str.py index 2d341309a8a41c..2ce4ffd0c61b10 100644 --- a/src/transformers/models/mgp_str/configuration_mgp_str.py +++ b/src/transformers/models/mgp_str/configuration_mgp_str.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MgpstrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MgpstrModel`]. It is used to instantiate an diff --git a/src/transformers/models/mgp_str/modeling_mgp_str.py b/src/transformers/models/mgp_str/modeling_mgp_str.py index 2997e5903cca71..6754cebcae7d73 100644 --- a/src/transformers/models/mgp_str/modeling_mgp_str.py +++ b/src/transformers/models/mgp_str/modeling_mgp_str.py @@ -45,9 +45,6 @@ _CHECKPOINT_FOR_DOC = "alibaba-damo/mgp-str-base" -from ..deprecated._archive_maps import MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/mistral/__init__.py b/src/transformers/models/mistral/__init__.py index 34727d98cf05af..dc0b85980ff600 100644 --- a/src/transformers/models/mistral/__init__.py +++ b/src/transformers/models/mistral/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_mistral": ["MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MistralConfig"], + "configuration_mistral": ["MistralConfig"], } @@ -48,7 +48,7 @@ if TYPE_CHECKING: - from .configuration_mistral import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MistralConfig + from .configuration_mistral import MistralConfig try: if not is_torch_available(): diff --git a/src/transformers/models/mistral/configuration_mistral.py b/src/transformers/models/mistral/configuration_mistral.py index 83dd0e7a621cff..e281802792d325 100644 --- a/src/transformers/models/mistral/configuration_mistral.py +++ b/src/transformers/models/mistral/configuration_mistral.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MistralConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an diff --git a/src/transformers/models/mixtral/__init__.py b/src/transformers/models/mixtral/__init__.py index ebde04ea4ae81c..7b8f061dac8362 100644 --- a/src/transformers/models/mixtral/__init__.py +++ b/src/transformers/models/mixtral/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_mixtral": ["MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MixtralConfig"], + "configuration_mixtral": ["MixtralConfig"], } @@ -40,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_mixtral import MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MixtralConfig + from .configuration_mixtral import MixtralConfig try: if not is_torch_available(): diff --git a/src/transformers/models/mixtral/configuration_mixtral.py b/src/transformers/models/mixtral/configuration_mixtral.py index 66c779248423e1..5304afd5130b05 100644 --- a/src/transformers/models/mixtral/configuration_mixtral.py +++ b/src/transformers/models/mixtral/configuration_mixtral.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MixtralConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an diff --git a/src/transformers/models/mobilebert/__init__.py b/src/transformers/models/mobilebert/__init__.py index 0d202eb4d4234f..c085c3d8636c1e 100644 --- a/src/transformers/models/mobilebert/__init__.py +++ b/src/transformers/models/mobilebert/__init__.py @@ -25,7 +25,6 @@ _import_structure = { "configuration_mobilebert": [ - "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], @@ -47,7 +46,6 @@ pass else: _import_structure["modeling_mobilebert"] = [ - "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", @@ -68,7 +66,6 @@ pass else: _import_structure["modeling_tf_mobilebert"] = [ - "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", @@ -84,7 +81,6 @@ if TYPE_CHECKING: from .configuration_mobilebert import ( - MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) @@ -105,7 +101,6 @@ pass else: from .modeling_mobilebert import ( - MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, @@ -126,7 +121,6 @@ pass else: from .modeling_tf_mobilebert import ( - TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, diff --git a/src/transformers/models/mobilebert/configuration_mobilebert.py b/src/transformers/models/mobilebert/configuration_mobilebert.py index d66dba8c02bde9..197e29fcfe27f3 100644 --- a/src/transformers/models/mobilebert/configuration_mobilebert.py +++ b/src/transformers/models/mobilebert/configuration_mobilebert.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MobileBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileBertModel`] or a [`TFMobileBertModel`]. It diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 92a18dfe599041..44007667c6b6af 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -77,9 +77,6 @@ _SEQ_CLASS_EXPECTED_LOSS = "4.72" -from ..deprecated._archive_maps import MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index 8526e636a2ac48..bab2cbac8ed5fd 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -85,9 +85,6 @@ _SEQ_CLASS_EXPECTED_LOSS = "4.72" -from ..deprecated._archive_maps import TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss class TFMobileBertPreTrainingLoss: """ diff --git a/src/transformers/models/mobilenet_v1/__init__.py b/src/transformers/models/mobilenet_v1/__init__.py index dec8eeec2de566..6ff5725a21a8aa 100644 --- a/src/transformers/models/mobilenet_v1/__init__.py +++ b/src/transformers/models/mobilenet_v1/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_mobilenet_v1": [ - "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV1Config", "MobileNetV1OnnxConfig", ], @@ -40,7 +39,6 @@ pass else: _import_structure["modeling_mobilenet_v1"] = [ - "MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV1ForImageClassification", "MobileNetV1Model", "MobileNetV1PreTrainedModel", @@ -50,7 +48,6 @@ if TYPE_CHECKING: from .configuration_mobilenet_v1 import ( - MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetV1Config, MobileNetV1OnnxConfig, ) @@ -71,7 +68,6 @@ pass else: from .modeling_mobilenet_v1 import ( - MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetV1ForImageClassification, MobileNetV1Model, MobileNetV1PreTrainedModel, diff --git a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py index 2b575cb6a1dc48..70075bcc94e622 100644 --- a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MobileNetV1Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a diff --git a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py index af9d232be8050e..85a398f8981a3a 100755 --- a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py @@ -43,9 +43,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def _build_tf_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. diff --git a/src/transformers/models/mobilenet_v2/__init__.py b/src/transformers/models/mobilenet_v2/__init__.py index e3d89c8b59479a..5fcab8fe7c4e58 100644 --- a/src/transformers/models/mobilenet_v2/__init__.py +++ b/src/transformers/models/mobilenet_v2/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_mobilenet_v2": [ - "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], @@ -41,7 +40,6 @@ pass else: _import_structure["modeling_mobilenet_v2"] = [ - "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", @@ -52,7 +50,6 @@ if TYPE_CHECKING: from .configuration_mobilenet_v2 import ( - MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetV2Config, MobileNetV2OnnxConfig, ) @@ -73,7 +70,6 @@ pass else: from .modeling_mobilenet_v2 import ( - MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation, MobileNetV2Model, diff --git a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py index dd9f6d17cd340a..81e590d5a357f9 100644 --- a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MobileNetV2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a diff --git a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py index e555941baca938..3c8fd5193acf44 100755 --- a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py @@ -53,9 +53,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def _build_tf_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. diff --git a/src/transformers/models/mobilevit/__init__.py b/src/transformers/models/mobilevit/__init__.py index 5615c622186299..942a963227b955 100644 --- a/src/transformers/models/mobilevit/__init__.py +++ b/src/transformers/models/mobilevit/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], + "configuration_mobilevit": ["MobileViTConfig", "MobileViTOnnxConfig"], } try: @@ -42,7 +42,6 @@ pass else: _import_structure["modeling_mobilevit"] = [ - "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", @@ -56,7 +55,6 @@ pass else: _import_structure["modeling_tf_mobilevit"] = [ - "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", @@ -64,7 +62,7 @@ ] if TYPE_CHECKING: - from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig + from .configuration_mobilevit import MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): @@ -82,7 +80,6 @@ pass else: from .modeling_mobilevit import ( - MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, @@ -96,7 +93,6 @@ pass else: from .modeling_tf_mobilevit import ( - TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, diff --git a/src/transformers/models/mobilevit/configuration_mobilevit.py b/src/transformers/models/mobilevit/configuration_mobilevit.py index 8f13112447f113..5650002b3c55b0 100644 --- a/src/transformers/models/mobilevit/configuration_mobilevit.py +++ b/src/transformers/models/mobilevit/configuration_mobilevit.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MobileViTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a diff --git a/src/transformers/models/mobilevit/modeling_mobilevit.py b/src/transformers/models/mobilevit/modeling_mobilevit.py index 04105effffb2e9..7e23d1b5cadf38 100755 --- a/src/transformers/models/mobilevit/modeling_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_mobilevit.py @@ -59,9 +59,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the diff --git a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py index 8434c9685e570f..179f209e871b8a 100644 --- a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py @@ -61,9 +61,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the diff --git a/src/transformers/models/mobilevitv2/__init__.py b/src/transformers/models/mobilevitv2/__init__.py index 043caf7b7526fc..770736c03df7ed 100644 --- a/src/transformers/models/mobilevitv2/__init__.py +++ b/src/transformers/models/mobilevitv2/__init__.py @@ -23,7 +23,6 @@ _import_structure = { "configuration_mobilevitv2": [ - "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTV2Config", "MobileViTV2OnnxConfig", ], @@ -37,7 +36,6 @@ pass else: _import_structure["modeling_mobilevitv2"] = [ - "MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTV2ForImageClassification", "MobileViTV2ForSemanticSegmentation", "MobileViTV2Model", @@ -46,7 +44,6 @@ if TYPE_CHECKING: from .configuration_mobilevitv2 import ( - MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTV2Config, MobileViTV2OnnxConfig, ) @@ -58,7 +55,6 @@ pass else: from .modeling_mobilevitv2 import ( - MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTV2ForImageClassification, MobileViTV2ForSemanticSegmentation, MobileViTV2Model, diff --git a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py index f8f1be141b52bd..957a43f770fc7e 100644 --- a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MobileViTV2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a diff --git a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py index 1943f52f5129e9..3d6c4c1b3954db 100644 --- a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py @@ -57,9 +57,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.mobilevit.modeling_mobilevit.make_divisible def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: """ diff --git a/src/transformers/models/mpnet/__init__.py b/src/transformers/models/mpnet/__init__.py index 993a99c0819bd6..54c20d9f1967dd 100644 --- a/src/transformers/models/mpnet/__init__.py +++ b/src/transformers/models/mpnet/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig"], + "configuration_mpnet": ["MPNetConfig"], "tokenization_mpnet": ["MPNetTokenizer"], } @@ -44,7 +44,6 @@ pass else: _import_structure["modeling_mpnet"] = [ - "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", "MPNetForMaskedLM", "MPNetForMultipleChoice", "MPNetForQuestionAnswering", @@ -62,7 +61,6 @@ pass else: _import_structure["modeling_tf_mpnet"] = [ - "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMPNetEmbeddings", "TFMPNetForMaskedLM", "TFMPNetForMultipleChoice", @@ -76,7 +74,7 @@ if TYPE_CHECKING: - from .configuration_mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig + from .configuration_mpnet import MPNetConfig from .tokenization_mpnet import MPNetTokenizer try: @@ -94,7 +92,6 @@ pass else: from .modeling_mpnet import ( - MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, @@ -112,7 +109,6 @@ pass else: from .modeling_tf_mpnet import ( - TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFMPNetEmbeddings, TFMPNetForMaskedLM, TFMPNetForMultipleChoice, diff --git a/src/transformers/models/mpnet/configuration_mpnet.py b/src/transformers/models/mpnet/configuration_mpnet.py index a8cb07894bde1c..9c53e45d98fce8 100644 --- a/src/transformers/models/mpnet/configuration_mpnet.py +++ b/src/transformers/models/mpnet/configuration_mpnet.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MPNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 12e6bdbffaaa7b..bc811fd22f5ca5 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -45,9 +45,6 @@ _CONFIG_FOR_DOC = "MPNetConfig" -from ..deprecated._archive_maps import MPNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class MPNetPreTrainedModel(PreTrainedModel): config_class = MPNetConfig base_model_prefix = "mpnet" diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index b57132d81398d0..f0afba869b81d7 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -64,9 +64,6 @@ _CONFIG_FOR_DOC = "MPNetConfig" -from ..deprecated._archive_maps import TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFMPNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained diff --git a/src/transformers/models/mpt/__init__.py b/src/transformers/models/mpt/__init__.py index d24a5fad7b9d2c..49b3a0d61fcdb3 100644 --- a/src/transformers/models/mpt/__init__.py +++ b/src/transformers/models/mpt/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_mpt": ["MPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MptConfig", "MptOnnxConfig"], + "configuration_mpt": ["MptConfig", "MptOnnxConfig"], } try: @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_mpt"] = [ - "MPT_PRETRAINED_MODEL_ARCHIVE_LIST", "MptForCausalLM", "MptModel", "MptPreTrainedModel", @@ -38,7 +37,7 @@ ] if TYPE_CHECKING: - from .configuration_mpt import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP, MptConfig, MptOnnxConfig + from .configuration_mpt import MptConfig, MptOnnxConfig try: if not is_torch_available(): @@ -47,7 +46,6 @@ pass else: from .modeling_mpt import ( - MPT_PRETRAINED_MODEL_ARCHIVE_LIST, MptForCausalLM, MptForQuestionAnswering, MptForSequenceClassification, diff --git a/src/transformers/models/mpt/configuration_mpt.py b/src/transformers/models/mpt/configuration_mpt.py index 5c1cb4d783b307..5d18b1419e37b5 100644 --- a/src/transformers/models/mpt/configuration_mpt.py +++ b/src/transformers/models/mpt/configuration_mpt.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MptAttentionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate diff --git a/src/transformers/models/mpt/modeling_mpt.py b/src/transformers/models/mpt/modeling_mpt.py index 864e9c09ca3cb7..cffb4b7117e4aa 100644 --- a/src/transformers/models/mpt/modeling_mpt.py +++ b/src/transformers/models/mpt/modeling_mpt.py @@ -43,9 +43,6 @@ _CONFIG_FOR_DOC = "MptConfig" -from ..deprecated._archive_maps import MPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device=None): r""" Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it diff --git a/src/transformers/models/mra/__init__.py b/src/transformers/models/mra/__init__.py index d27ee2f1719321..21d82eb3dabac1 100644 --- a/src/transformers/models/mra/__init__.py +++ b/src/transformers/models/mra/__init__.py @@ -21,7 +21,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available -_import_structure = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} +_import_structure = {"configuration_mra": ["MraConfig"]} try: if not is_torch_available(): @@ -30,7 +30,6 @@ pass else: _import_structure["modeling_mra"] = [ - "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", @@ -43,7 +42,7 @@ if TYPE_CHECKING: - from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig + from .configuration_mra import MraConfig try: if not is_torch_available(): @@ -52,7 +51,6 @@ pass else: from .modeling_mra import ( - MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, diff --git a/src/transformers/models/mra/configuration_mra.py b/src/transformers/models/mra/configuration_mra.py index 2b3bec041633ea..30c38795b57fc7 100644 --- a/src/transformers/models/mra/configuration_mra.py +++ b/src/transformers/models/mra/configuration_mra.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA diff --git a/src/transformers/models/mra/modeling_mra.py b/src/transformers/models/mra/modeling_mra.py index db918484d986cd..b47a41d73d1950 100644 --- a/src/transformers/models/mra/modeling_mra.py +++ b/src/transformers/models/mra/modeling_mra.py @@ -54,9 +54,6 @@ _TOKENIZER_FOR_DOC = "AutoTokenizer" -from ..deprecated._archive_maps import MRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - mra_cuda_kernel = None diff --git a/src/transformers/models/musicgen/__init__.py b/src/transformers/models/musicgen/__init__.py index 7fa695eba80863..3b03adae12fc76 100644 --- a/src/transformers/models/musicgen/__init__.py +++ b/src/transformers/models/musicgen/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_musicgen": [ - "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "MusicgenConfig", "MusicgenDecoderConfig", ], @@ -32,7 +31,6 @@ pass else: _import_structure["modeling_musicgen"] = [ - "MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST", "MusicgenForConditionalGeneration", "MusicgenForCausalLM", "MusicgenModel", @@ -41,7 +39,6 @@ if TYPE_CHECKING: from .configuration_musicgen import ( - MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, MusicgenConfig, MusicgenDecoderConfig, ) @@ -54,7 +51,6 @@ pass else: from .modeling_musicgen import ( - MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST, MusicgenForCausalLM, MusicgenForConditionalGeneration, MusicgenModel, diff --git a/src/transformers/models/musicgen/configuration_musicgen.py b/src/transformers/models/musicgen/configuration_musicgen.py index b102d67630254b..58be655f384bf4 100644 --- a/src/transformers/models/musicgen/configuration_musicgen.py +++ b/src/transformers/models/musicgen/configuration_musicgen.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class MusicgenDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index 08f42ce69e18a2..ad22fbec39f17e 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -69,9 +69,6 @@ _CHECKPOINT_FOR_DOC = "facebook/musicgen-small" -from ..deprecated._archive_maps import MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/musicgen_melody/__init__.py b/src/transformers/models/musicgen_melody/__init__.py index 082c8f4ea66ea4..20c8507aaed7b3 100644 --- a/src/transformers/models/musicgen_melody/__init__.py +++ b/src/transformers/models/musicgen_melody/__init__.py @@ -23,7 +23,6 @@ _import_structure = { "configuration_musicgen_melody": [ - "MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP", "MusicgenMelodyConfig", "MusicgenMelodyDecoderConfig", ], @@ -36,7 +35,6 @@ pass else: _import_structure["modeling_musicgen_melody"] = [ - "MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST", "MusicgenMelodyForConditionalGeneration", "MusicgenMelodyForCausalLM", "MusicgenMelodyModel", @@ -55,7 +53,6 @@ if TYPE_CHECKING: from .configuration_musicgen_melody import ( - MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP, MusicgenMelodyConfig, MusicgenMelodyDecoderConfig, ) @@ -67,7 +64,6 @@ pass else: from .modeling_musicgen_melody import ( - MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST, MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration, MusicgenMelodyModel, diff --git a/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py b/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py index 335c0514163f1f..050bbbc2b6d7a8 100644 --- a/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py @@ -21,8 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - class MusicgenMelodyDecoderConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py index 55850e0acf9e73..2cfc1b7ba79957 100644 --- a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py @@ -60,8 +60,6 @@ _CONFIG_FOR_DOC = "MusicgenMelodyConfig" _CHECKPOINT_FOR_DOC = "facebook/musicgen-melody" -from ..deprecated._archive_maps import MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): diff --git a/src/transformers/models/mvp/__init__.py b/src/transformers/models/mvp/__init__.py index 406dc531e96f78..e865b8827c5cd8 100644 --- a/src/transformers/models/mvp/__init__.py +++ b/src/transformers/models/mvp/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], + "configuration_mvp": ["MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } @@ -36,7 +36,6 @@ pass else: _import_structure["modeling_mvp"] = [ - "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", @@ -46,7 +45,7 @@ ] if TYPE_CHECKING: - from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig + from .configuration_mvp import MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: @@ -64,7 +63,6 @@ pass else: from .modeling_mvp import ( - MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, diff --git a/src/transformers/models/mvp/modeling_mvp.py b/src/transformers/models/mvp/modeling_mvp.py index fe289dc81e6a43..7c0f47856778cd 100644 --- a/src/transformers/models/mvp/modeling_mvp.py +++ b/src/transformers/models/mvp/modeling_mvp.py @@ -54,9 +54,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] -from ..deprecated._archive_maps import MVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ diff --git a/src/transformers/models/nat/__init__.py b/src/transformers/models/nat/__init__.py index 19ddb46e8266fa..bcf05ddf41ed9b 100644 --- a/src/transformers/models/nat/__init__.py +++ b/src/transformers/models/nat/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"]} +_import_structure = {"configuration_nat": ["NatConfig"]} try: @@ -26,7 +26,6 @@ pass else: _import_structure["modeling_nat"] = [ - "NAT_PRETRAINED_MODEL_ARCHIVE_LIST", "NatForImageClassification", "NatModel", "NatPreTrainedModel", @@ -34,7 +33,7 @@ ] if TYPE_CHECKING: - from .configuration_nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig + from .configuration_nat import NatConfig try: if not is_torch_available(): @@ -43,7 +42,6 @@ pass else: from .modeling_nat import ( - NAT_PRETRAINED_MODEL_ARCHIVE_LIST, NatBackbone, NatForImageClassification, NatModel, diff --git a/src/transformers/models/nat/configuration_nat.py b/src/transformers/models/nat/configuration_nat.py index bb3b85a80c263b..baf0ea13a5f902 100644 --- a/src/transformers/models/nat/configuration_nat.py +++ b/src/transformers/models/nat/configuration_nat.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class NatConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NatModel`]. It is used to instantiate a Nat model diff --git a/src/transformers/models/nat/modeling_nat.py b/src/transformers/models/nat/modeling_nat.py index 2434b65161a47c..b9c332c894e775 100644 --- a/src/transformers/models/nat/modeling_nat.py +++ b/src/transformers/models/nat/modeling_nat.py @@ -68,9 +68,6 @@ def natten2dav(*args, **kwargs): _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" -from ..deprecated._archive_maps import NAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # drop_path and NatDropPath are from the timm library. diff --git a/src/transformers/models/nezha/__init__.py b/src/transformers/models/nezha/__init__.py index f9078fc4a5667a..5149adf3a0cdfa 100644 --- a/src/transformers/models/nezha/__init__.py +++ b/src/transformers/models/nezha/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], + "configuration_nezha": ["NezhaConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_nezha"] = [ - "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", @@ -41,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig + from .configuration_nezha import NezhaConfig try: if not is_torch_available(): @@ -50,7 +49,6 @@ pass else: from .modeling_nezha import ( - NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, diff --git a/src/transformers/models/nezha/configuration_nezha.py b/src/transformers/models/nezha/configuration_nezha.py index a19c27d62a4a92..4e145e4b687529 100644 --- a/src/transformers/models/nezha/configuration_nezha.py +++ b/src/transformers/models/nezha/configuration_nezha.py @@ -1,5 +1,4 @@ from ... import PretrainedConfig -from ..deprecated._archive_maps import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class NezhaConfig(PretrainedConfig): diff --git a/src/transformers/models/nezha/modeling_nezha.py b/src/transformers/models/nezha/modeling_nezha.py index 5ab2dc8958dff0..e10ba416968265 100644 --- a/src/transformers/models/nezha/modeling_nezha.py +++ b/src/transformers/models/nezha/modeling_nezha.py @@ -56,9 +56,6 @@ _CONFIG_FOR_DOC = "NezhaConfig" -from ..deprecated._archive_maps import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_nezha(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/nllb_moe/__init__.py b/src/transformers/models/nllb_moe/__init__.py index ea0f7752ed0cac..ccb961ba38e8c0 100644 --- a/src/transformers/models/nllb_moe/__init__.py +++ b/src/transformers/models/nllb_moe/__init__.py @@ -17,12 +17,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = { - "configuration_nllb_moe": [ - "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", - "NllbMoeConfig", - ] -} +_import_structure = {"configuration_nllb_moe": ["NllbMoeConfig"]} try: if not is_torch_available(): @@ -31,7 +26,6 @@ pass else: _import_structure["modeling_nllb_moe"] = [ - "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", @@ -42,7 +36,6 @@ if TYPE_CHECKING: from .configuration_nllb_moe import ( - NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) @@ -53,7 +46,6 @@ pass else: from .modeling_nllb_moe import ( - NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, diff --git a/src/transformers/models/nllb_moe/configuration_nllb_moe.py b/src/transformers/models/nllb_moe/configuration_nllb_moe.py index 48172824ff2425..98c8397c185b81 100644 --- a/src/transformers/models/nllb_moe/configuration_nllb_moe.py +++ b/src/transformers/models/nllb_moe/configuration_nllb_moe.py @@ -20,9 +20,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class NllbMoeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an diff --git a/src/transformers/models/nllb_moe/modeling_nllb_moe.py b/src/transformers/models/nllb_moe/modeling_nllb_moe.py index e8c827b6087ca6..1f39dfd39ba0ef 100644 --- a/src/transformers/models/nllb_moe/modeling_nllb_moe.py +++ b/src/transformers/models/nllb_moe/modeling_nllb_moe.py @@ -54,8 +54,6 @@ # for the pretrained weights provided with the models #################################################### -from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): diff --git a/src/transformers/models/nystromformer/__init__.py b/src/transformers/models/nystromformer/__init__.py index 4e94fc8f263965..74f8a620204f3f 100644 --- a/src/transformers/models/nystromformer/__init__.py +++ b/src/transformers/models/nystromformer/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_nystromformer": ["NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "NystromformerConfig"], + "configuration_nystromformer": ["NystromformerConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_nystromformer"] = [ - "NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "NystromformerForMaskedLM", "NystromformerForMultipleChoice", "NystromformerForQuestionAnswering", @@ -40,7 +39,7 @@ if TYPE_CHECKING: - from .configuration_nystromformer import NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, NystromformerConfig + from .configuration_nystromformer import NystromformerConfig try: if not is_torch_available(): @@ -49,7 +48,6 @@ pass else: from .modeling_nystromformer import ( - NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, diff --git a/src/transformers/models/nystromformer/configuration_nystromformer.py b/src/transformers/models/nystromformer/configuration_nystromformer.py index af6e8d2c21b099..ca277e266d5a16 100644 --- a/src/transformers/models/nystromformer/configuration_nystromformer.py +++ b/src/transformers/models/nystromformer/configuration_nystromformer.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class NystromformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate diff --git a/src/transformers/models/nystromformer/modeling_nystromformer.py b/src/transformers/models/nystromformer/modeling_nystromformer.py index df0dd0e405c0ef..cb027c314e989d 100755 --- a/src/transformers/models/nystromformer/modeling_nystromformer.py +++ b/src/transformers/models/nystromformer/modeling_nystromformer.py @@ -44,9 +44,6 @@ _CONFIG_FOR_DOC = "NystromformerConfig" -from ..deprecated._archive_maps import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class NystromformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/olmo/__init__.py b/src/transformers/models/olmo/__init__.py index 3cead944521b41..b94350cd331047 100644 --- a/src/transformers/models/olmo/__init__.py +++ b/src/transformers/models/olmo/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_olmo": ["OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP", "OlmoConfig"], + "configuration_olmo": ["OlmoConfig"], } try: @@ -39,7 +39,7 @@ ] if TYPE_CHECKING: - from .configuration_olmo import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP, OlmoConfig + from .configuration_olmo import OlmoConfig try: if not is_torch_available(): diff --git a/src/transformers/models/olmo/configuration_olmo.py b/src/transformers/models/olmo/configuration_olmo.py index 17a790227683bf..56cd01f7f2a72d 100644 --- a/src/transformers/models/olmo/configuration_olmo.py +++ b/src/transformers/models/olmo/configuration_olmo.py @@ -21,7 +21,6 @@ from ...configuration_utils import PretrainedConfig from ...utils import logging -from ..deprecated._archive_maps import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 logger = logging.get_logger(__name__) diff --git a/src/transformers/models/oneformer/__init__.py b/src/transformers/models/oneformer/__init__.py index 01bbaa1398142c..11ddde65d05991 100644 --- a/src/transformers/models/oneformer/__init__.py +++ b/src/transformers/models/oneformer/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_oneformer": ["ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "OneFormerConfig"], + "configuration_oneformer": ["OneFormerConfig"], "processing_oneformer": ["OneFormerProcessor"], } @@ -36,14 +36,13 @@ pass else: _import_structure["modeling_oneformer"] = [ - "ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "OneFormerForUniversalSegmentation", "OneFormerModel", "OneFormerPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_oneformer import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, OneFormerConfig + from .configuration_oneformer import OneFormerConfig from .processing_oneformer import OneFormerProcessor try: @@ -60,7 +59,6 @@ pass else: from .modeling_oneformer import ( - ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, OneFormerForUniversalSegmentation, OneFormerModel, OneFormerPreTrainedModel, diff --git a/src/transformers/models/oneformer/configuration_oneformer.py b/src/transformers/models/oneformer/configuration_oneformer.py index 1cbd2ab7dbc18f..f3c01191d98fb8 100644 --- a/src/transformers/models/oneformer/configuration_oneformer.py +++ b/src/transformers/models/oneformer/configuration_oneformer.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class OneFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OneFormerModel`]. It is used to instantiate a diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index 6af4226995bfa1..fff665b2ffbda5 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -52,9 +52,6 @@ _CHECKPOINT_FOR_DOC = "shi-labs/oneformer_ade20k_swin_tiny" -from ..deprecated._archive_maps import ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - if is_scipy_available(): from scipy.optimize import linear_sum_assignment diff --git a/src/transformers/models/openai/__init__.py b/src/transformers/models/openai/__init__.py index b7dba0b5dc0cf8..af4ebbfee6630b 100644 --- a/src/transformers/models/openai/__init__.py +++ b/src/transformers/models/openai/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig"], + "configuration_openai": ["OpenAIGPTConfig"], "tokenization_openai": ["OpenAIGPTTokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_openai"] = [ - "OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OpenAIGPTDoubleHeadsModel", "OpenAIGPTForSequenceClassification", "OpenAIGPTLMHeadModel", @@ -59,7 +58,6 @@ pass else: _import_structure["modeling_tf_openai"] = [ - "TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFOpenAIGPTDoubleHeadsModel", "TFOpenAIGPTForSequenceClassification", "TFOpenAIGPTLMHeadModel", @@ -70,7 +68,7 @@ if TYPE_CHECKING: - from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig + from .configuration_openai import OpenAIGPTConfig from .tokenization_openai import OpenAIGPTTokenizer try: @@ -88,7 +86,6 @@ pass else: from .modeling_openai import ( - OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, @@ -104,7 +101,6 @@ pass else: from .modeling_tf_openai import ( - TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification, TFOpenAIGPTLMHeadModel, diff --git a/src/transformers/models/openai/configuration_openai.py b/src/transformers/models/openai/configuration_openai.py index 422922c7912dec..64411455eefd35 100644 --- a/src/transformers/models/openai/configuration_openai.py +++ b/src/transformers/models/openai/configuration_openai.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class OpenAIGPTConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`OpenAIGPTModel`] or a [`TFOpenAIGPTModel`]. It is diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 637aa90cff9f1d..1c754daa0e317a 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -47,9 +47,6 @@ _CONFIG_FOR_DOC = "OpenAIGPTConfig" -from ..deprecated._archive_maps import OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path): """Load tf pre-trained weights in a pytorch model (from NumPy arrays here)""" import re diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index b826936c51fbd6..e2d0ae885cea52 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -56,9 +56,6 @@ _CONFIG_FOR_DOC = "OpenAIGPTConfig" -from ..deprecated._archive_maps import TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFAttention(keras.layers.Layer): def __init__(self, nx, config, scale=False, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/models/opt/__init__.py b/src/transformers/models/opt/__init__.py index db1c9300824b38..5ae39344b2ffce 100644 --- a/src/transformers/models/opt/__init__.py +++ b/src/transformers/models/opt/__init__.py @@ -23,7 +23,7 @@ ) -_import_structure = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} +_import_structure = {"configuration_opt": ["OPTConfig"]} try: if not is_torch_available(): @@ -32,7 +32,6 @@ pass else: _import_structure["modeling_opt"] = [ - "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", @@ -62,7 +61,7 @@ if TYPE_CHECKING: - from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig + from .configuration_opt import OPTConfig try: if not is_torch_available(): @@ -71,7 +70,6 @@ pass else: from .modeling_opt import ( - OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 5e9e53a2ac3251..f93c3866aeca58 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -61,9 +61,6 @@ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" -from ..deprecated._archive_maps import OPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/owlv2/__init__.py b/src/transformers/models/owlv2/__init__.py index 895379db36309a..83d432766d6992 100644 --- a/src/transformers/models/owlv2/__init__.py +++ b/src/transformers/models/owlv2/__init__.py @@ -23,7 +23,6 @@ _import_structure = { "configuration_owlv2": [ - "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Owlv2Config", "Owlv2TextConfig", "Owlv2VisionConfig", @@ -47,7 +46,6 @@ pass else: _import_structure["modeling_owlv2"] = [ - "OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Owlv2Model", "Owlv2PreTrainedModel", "Owlv2TextModel", @@ -57,7 +55,6 @@ if TYPE_CHECKING: from .configuration_owlv2 import ( - OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig, @@ -79,7 +76,6 @@ pass else: from .modeling_owlv2 import ( - OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST, Owlv2ForObjectDetection, Owlv2Model, Owlv2PreTrainedModel, diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index fe96ff8fa4c5f1..4b09166b70c2bc 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - # Copied from transformers.models.owlvit.configuration_owlvit.OwlViTTextConfig with OwlViT->Owlv2, owlvit-base-patch32->owlv2-base-patch16, owlvit->owlv2, OWL-ViT->OWLv2 class Owlv2TextConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index d99b269012d183..bbeeb386d4afae 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -48,8 +48,6 @@ # See all Owlv2 models at https://huggingface.co/models?filter=owlv2 -from ..deprecated._archive_maps import OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - # Copied from transformers.models.clip.modeling_clip.contrastive_loss with clip->owlv2 def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/owlvit/__init__.py b/src/transformers/models/owlvit/__init__.py index 599508e0e5cae7..a6da47da9a0fb7 100644 --- a/src/transformers/models/owlvit/__init__.py +++ b/src/transformers/models/owlvit/__init__.py @@ -26,7 +26,6 @@ _import_structure = { "configuration_owlvit": [ - "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OwlViTConfig", "OwlViTOnnxConfig", "OwlViTTextConfig", @@ -52,7 +51,6 @@ pass else: _import_structure["modeling_owlvit"] = [ - "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "OwlViTModel", "OwlViTPreTrainedModel", "OwlViTTextModel", @@ -62,7 +60,6 @@ if TYPE_CHECKING: from .configuration_owlvit import ( - OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, @@ -86,7 +83,6 @@ pass else: from .modeling_owlvit import ( - OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index d223cdf81270d7..747f1c3ccbe78a 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -31,9 +31,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class OwlViTTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 751f9c9a52ee9f..8d0673341c6f71 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -48,8 +48,6 @@ # See all OwlViT models at https://huggingface.co/models?filter=owlvit -from ..deprecated._archive_maps import OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - # Copied from transformers.models.clip.modeling_clip.contrastive_loss with clip->owlvit def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/patchtsmixer/__init__.py b/src/transformers/models/patchtsmixer/__init__.py index 63f433791e1fe8..b227ca1655c440 100644 --- a/src/transformers/models/patchtsmixer/__init__.py +++ b/src/transformers/models/patchtsmixer/__init__.py @@ -18,10 +18,7 @@ _import_structure = { - "configuration_patchtsmixer": [ - "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "PatchTSMixerConfig", - ], + "configuration_patchtsmixer": ["PatchTSMixerConfig"], } try: @@ -31,7 +28,6 @@ pass else: _import_structure["modeling_patchtsmixer"] = [ - "PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST", "PatchTSMixerPreTrainedModel", "PatchTSMixerModel", "PatchTSMixerForPretraining", @@ -43,7 +39,6 @@ if TYPE_CHECKING: from .configuration_patchtsmixer import ( - PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSMixerConfig, ) @@ -54,7 +49,6 @@ pass else: from .modeling_patchtsmixer import ( - PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST, PatchTSMixerForPrediction, PatchTSMixerForPretraining, PatchTSMixerForRegression, diff --git a/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py b/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py index 2f4f1dc7619215..c3766c33250cfc 100644 --- a/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PatchTSMixerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PatchTSMixerModel`]. It is used to instantiate a diff --git a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py index dade06dfde053a..a824faa0409456 100644 --- a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py @@ -39,9 +39,6 @@ _CONFIG_FOR_DOC = "PatchTSMixerConfig" -from ..deprecated._archive_maps import PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - PATCHTSMIXER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the diff --git a/src/transformers/models/patchtst/__init__.py b/src/transformers/models/patchtst/__init__.py index 8c7db64c198406..5ba6316505afdf 100644 --- a/src/transformers/models/patchtst/__init__.py +++ b/src/transformers/models/patchtst/__init__.py @@ -18,10 +18,7 @@ _import_structure = { - "configuration_patchtst": [ - "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", - "PatchTSTConfig", - ], + "configuration_patchtst": ["PatchTSTConfig"], } try: @@ -31,7 +28,6 @@ pass else: _import_structure["modeling_patchtst"] = [ - "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", "PatchTSTModel", "PatchTSTPreTrainedModel", "PatchTSTForPrediction", @@ -42,7 +38,7 @@ if TYPE_CHECKING: - from .configuration_patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig + from .configuration_patchtst import PatchTSTConfig try: if not is_torch_available(): @@ -51,7 +47,6 @@ pass else: from .modeling_patchtst import ( - PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, PatchTSTForClassification, PatchTSTForPrediction, PatchTSTForPretraining, diff --git a/src/transformers/models/patchtst/configuration_patchtst.py b/src/transformers/models/patchtst/configuration_patchtst.py index dc95429d90995a..acae3d0dc60d29 100644 --- a/src/transformers/models/patchtst/configuration_patchtst.py +++ b/src/transformers/models/patchtst/configuration_patchtst.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PatchTSTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an diff --git a/src/transformers/models/patchtst/modeling_patchtst.py b/src/transformers/models/patchtst/modeling_patchtst.py index 22b206726e16d3..884cd44c83e86b 100755 --- a/src/transformers/models/patchtst/modeling_patchtst.py +++ b/src/transformers/models/patchtst/modeling_patchtst.py @@ -34,9 +34,6 @@ _CONFIG_FOR_DOC = "PatchTSTConfig" -from ..deprecated._archive_maps import PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTST class PatchTSTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" diff --git a/src/transformers/models/pegasus/__init__.py b/src/transformers/models/pegasus/__init__.py index 97d6ddb31ac00c..15ac3b56cff038 100644 --- a/src/transformers/models/pegasus/__init__.py +++ b/src/transformers/models/pegasus/__init__.py @@ -24,7 +24,7 @@ ) -_import_structure = {"configuration_pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig"]} +_import_structure = {"configuration_pegasus": ["PegasusConfig"]} try: if not is_sentencepiece_available(): @@ -49,7 +49,6 @@ pass else: _import_structure["modeling_pegasus"] = [ - "PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel", @@ -82,7 +81,7 @@ if TYPE_CHECKING: - from .configuration_pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig + from .configuration_pegasus import PegasusConfig try: if not is_sentencepiece_available(): @@ -107,7 +106,6 @@ pass else: from .modeling_pegasus import ( - PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel, diff --git a/src/transformers/models/pegasus/configuration_pegasus.py b/src/transformers/models/pegasus/configuration_pegasus.py index 39d3865fd57b4e..7dff1a7f85a32c 100644 --- a/src/transformers/models/pegasus/configuration_pegasus.py +++ b/src/transformers/models/pegasus/configuration_pegasus.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PegasusConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an diff --git a/src/transformers/models/pegasus_x/__init__.py b/src/transformers/models/pegasus_x/__init__.py index 32003120c6a0b1..ce26210d3bc6b9 100644 --- a/src/transformers/models/pegasus_x/__init__.py +++ b/src/transformers/models/pegasus_x/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], + "configuration_pegasus_x": ["PegasusXConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_pegasus_x"] = [ - "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", @@ -35,7 +34,7 @@ if TYPE_CHECKING: - from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig + from .configuration_pegasus_x import PegasusXConfig try: if not is_torch_available(): @@ -44,7 +43,6 @@ pass else: from .modeling_pegasus_x import ( - PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, diff --git a/src/transformers/models/pegasus_x/configuration_pegasus_x.py b/src/transformers/models/pegasus_x/configuration_pegasus_x.py index fa1f3da6d364a3..166f3b18ab0623 100644 --- a/src/transformers/models/pegasus_x/configuration_pegasus_x.py +++ b/src/transformers/models/pegasus_x/configuration_pegasus_x.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PegasusXConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a diff --git a/src/transformers/models/pegasus_x/modeling_pegasus_x.py b/src/transformers/models/pegasus_x/modeling_pegasus_x.py index 2a5e9a1fc24809..ba99256a16eba1 100755 --- a/src/transformers/models/pegasus_x/modeling_pegasus_x.py +++ b/src/transformers/models/pegasus_x/modeling_pegasus_x.py @@ -49,9 +49,6 @@ _CONFIG_FOR_DOC = "PegasusXConfig" -from ..deprecated._archive_maps import PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclasses.dataclass class DimensionInfo: """Wrapper for dimension info.""" diff --git a/src/transformers/models/perceiver/__init__.py b/src/transformers/models/perceiver/__init__.py index 997f88234fc2c8..5cc52d61977203 100644 --- a/src/transformers/models/perceiver/__init__.py +++ b/src/transformers/models/perceiver/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"], + "configuration_perceiver": ["PerceiverConfig", "PerceiverOnnxConfig"], "tokenization_perceiver": ["PerceiverTokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_perceiver"] = [ - "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", @@ -58,7 +57,7 @@ if TYPE_CHECKING: - from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig + from .configuration_perceiver import PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: @@ -77,7 +76,6 @@ pass else: from .modeling_perceiver import ( - PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, diff --git a/src/transformers/models/perceiver/configuration_perceiver.py b/src/transformers/models/perceiver/configuration_perceiver.py index eb9458989cad01..b4b996aef02a4b 100644 --- a/src/transformers/models/perceiver/configuration_perceiver.py +++ b/src/transformers/models/perceiver/configuration_perceiver.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PerceiverConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index 5de7635355ddb3..f768df991b09c0 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "PerceiverConfig" -from ..deprecated._archive_maps import PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class PerceiverModelOutput(ModelOutput): """ diff --git a/src/transformers/models/persimmon/__init__.py b/src/transformers/models/persimmon/__init__.py index 4c88459362eb72..75bc218a2913c7 100644 --- a/src/transformers/models/persimmon/__init__.py +++ b/src/transformers/models/persimmon/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_persimmon": ["PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP", "PersimmonConfig"], + "configuration_persimmon": ["PersimmonConfig"], } @@ -40,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_persimmon import PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP, PersimmonConfig + from .configuration_persimmon import PersimmonConfig try: if not is_torch_available(): diff --git a/src/transformers/models/persimmon/configuration_persimmon.py b/src/transformers/models/persimmon/configuration_persimmon.py index 8408ef8dea20bb..04bf792964c89b 100644 --- a/src/transformers/models/persimmon/configuration_persimmon.py +++ b/src/transformers/models/persimmon/configuration_persimmon.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PersimmonConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PersimmonModel`]. It is used to instantiate an diff --git a/src/transformers/models/phi/__init__.py b/src/transformers/models/phi/__init__.py index ba79ac81a6b9e5..662c0a9bf3487d 100644 --- a/src/transformers/models/phi/__init__.py +++ b/src/transformers/models/phi/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"], + "configuration_phi": ["PhiConfig"], } try: @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_phi"] = [ - "PHI_PRETRAINED_MODEL_ARCHIVE_LIST", "PhiPreTrainedModel", "PhiModel", "PhiForCausalLM", @@ -45,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig + from .configuration_phi import PhiConfig try: if not is_torch_available(): @@ -54,7 +53,6 @@ pass else: from .modeling_phi import ( - PHI_PRETRAINED_MODEL_ARCHIVE_LIST, PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification, diff --git a/src/transformers/models/phi/configuration_phi.py b/src/transformers/models/phi/configuration_phi.py index 59d63ae65da062..d221255f1182b9 100644 --- a/src/transformers/models/phi/configuration_phi.py +++ b/src/transformers/models/phi/configuration_phi.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PhiConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi diff --git a/src/transformers/models/phi/modeling_phi.py b/src/transformers/models/phi/modeling_phi.py index b23073d332e4d5..795ff18e5bcd1f 100644 --- a/src/transformers/models/phi/modeling_phi.py +++ b/src/transformers/models/phi/modeling_phi.py @@ -63,9 +63,6 @@ _CONFIG_FOR_DOC = "PhiConfig" -from ..deprecated._archive_maps import PHI_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/phi3/__init__.py b/src/transformers/models/phi3/__init__.py index 20cb69f4abc801..bfe766dfac9fef 100644 --- a/src/transformers/models/phi3/__init__.py +++ b/src/transformers/models/phi3/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_phi3": ["PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP", "Phi3Config"], + "configuration_phi3": ["Phi3Config"], } try: @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_phi3"] = [ - "PHI3_PRETRAINED_MODEL_ARCHIVE_LIST", "Phi3PreTrainedModel", "Phi3Model", "Phi3ForCausalLM", @@ -45,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_phi3 import PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP, Phi3Config + from .configuration_phi3 import Phi3Config try: if not is_torch_available(): @@ -54,7 +53,6 @@ pass else: from .modeling_phi3 import ( - PHI3_PRETRAINED_MODEL_ARCHIVE_LIST, Phi3ForCausalLM, Phi3ForSequenceClassification, Phi3ForTokenClassification, diff --git a/src/transformers/models/phi3/configuration_phi3.py b/src/transformers/models/phi3/configuration_phi3.py index e835c50f63eed5..dfa576ad61f837 100644 --- a/src/transformers/models/phi3/configuration_phi3.py +++ b/src/transformers/models/phi3/configuration_phi3.py @@ -22,11 +22,6 @@ logger = logging.get_logger(__name__) -PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "microsoft/Phi-3-mini-4k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json", - "microsoft/Phi-3-mini-128k-instruct": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json", -} - class Phi3Config(PretrainedConfig): r""" diff --git a/src/transformers/models/phi3/modeling_phi3.py b/src/transformers/models/phi3/modeling_phi3.py index 530c22a87449d7..db88d607a36584 100644 --- a/src/transformers/models/phi3/modeling_phi3.py +++ b/src/transformers/models/phi3/modeling_phi3.py @@ -59,12 +59,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct" _CONFIG_FOR_DOC = "Phi3Config" -PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "microsoft/Phi-3-mini-4k-instruct", - "microsoft/Phi-3-mini-128k-instruct", - # See all Phi-3 models at https://huggingface.co/models?filter=Phi-3 -] - # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3 class Phi3RMSNorm(nn.Module): diff --git a/src/transformers/models/pix2struct/__init__.py b/src/transformers/models/pix2struct/__init__.py index 8b395b31d8be19..581d5d7240c664 100644 --- a/src/transformers/models/pix2struct/__init__.py +++ b/src/transformers/models/pix2struct/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_pix2struct": [ - "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", @@ -42,7 +41,6 @@ pass else: _import_structure["modeling_pix2struct"] = [ - "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", @@ -51,7 +49,6 @@ if TYPE_CHECKING: from .configuration_pix2struct import ( - PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig, @@ -73,7 +70,6 @@ pass else: from .modeling_pix2struct import ( - PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, Pix2StructForConditionalGeneration, Pix2StructPreTrainedModel, Pix2StructTextModel, diff --git a/src/transformers/models/pix2struct/configuration_pix2struct.py b/src/transformers/models/pix2struct/configuration_pix2struct.py index 12bf998d58c00a..2ad2509e441d25 100644 --- a/src/transformers/models/pix2struct/configuration_pix2struct.py +++ b/src/transformers/models/pix2struct/configuration_pix2struct.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Pix2StructTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate diff --git a/src/transformers/models/pix2struct/modeling_pix2struct.py b/src/transformers/models/pix2struct/modeling_pix2struct.py index e8032fcef6690b..86ccb1dd740786 100644 --- a/src/transformers/models/pix2struct/modeling_pix2struct.py +++ b/src/transformers/models/pix2struct/modeling_pix2struct.py @@ -49,9 +49,6 @@ _CONFIG_FOR_DOC = "Pix2StructConfig" -from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct class Pix2StructLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): diff --git a/src/transformers/models/plbart/__init__.py b/src/transformers/models/plbart/__init__.py index ade03d8aa5cdf8..cd4c46fad3dd7d 100644 --- a/src/transformers/models/plbart/__init__.py +++ b/src/transformers/models/plbart/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]} +_import_structure = {"configuration_plbart": ["PLBartConfig"]} try: if not is_sentencepiece_available(): @@ -39,7 +39,6 @@ pass else: _import_structure["modeling_plbart"] = [ - "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", @@ -49,7 +48,7 @@ if TYPE_CHECKING: - from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig + from .configuration_plbart import PLBartConfig try: if not is_sentencepiece_available(): @@ -66,7 +65,6 @@ pass else: from .modeling_plbart import ( - PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, diff --git a/src/transformers/models/plbart/configuration_plbart.py b/src/transformers/models/plbart/configuration_plbart.py index 555a2fcc7572ff..b899847b04c73a 100644 --- a/src/transformers/models/plbart/configuration_plbart.py +++ b/src/transformers/models/plbart/configuration_plbart.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PLBartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index 78200e92eb0ee7..e2b59f980cd67d 100644 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -55,9 +55,6 @@ _CONFIG_FOR_DOC = "PLBartConfig" -from ..deprecated._archive_maps import PLBART_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.mbart.modeling_mbart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int): """ diff --git a/src/transformers/models/poolformer/__init__.py b/src/transformers/models/poolformer/__init__.py index 3a62183a23d6e2..00c345463697d4 100644 --- a/src/transformers/models/poolformer/__init__.py +++ b/src/transformers/models/poolformer/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_poolformer": [ - "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] @@ -40,7 +39,6 @@ pass else: _import_structure["modeling_poolformer"] = [ - "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", @@ -49,7 +47,6 @@ if TYPE_CHECKING: from .configuration_poolformer import ( - POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) @@ -70,7 +67,6 @@ pass else: from .modeling_poolformer import ( - POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, diff --git a/src/transformers/models/poolformer/configuration_poolformer.py b/src/transformers/models/poolformer/configuration_poolformer.py index be0f18c0a31035..1f297077fe166b 100644 --- a/src/transformers/models/poolformer/configuration_poolformer.py +++ b/src/transformers/models/poolformer/configuration_poolformer.py @@ -26,9 +26,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PoolFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of [`PoolFormerModel`]. It is used to instantiate a diff --git a/src/transformers/models/poolformer/modeling_poolformer.py b/src/transformers/models/poolformer/modeling_poolformer.py index 86297e733289be..ae4e3ea0a77813 100755 --- a/src/transformers/models/poolformer/modeling_poolformer.py +++ b/src/transformers/models/poolformer/modeling_poolformer.py @@ -44,9 +44,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/pop2piano/__init__.py b/src/transformers/models/pop2piano/__init__.py index 08b1e732b7df89..cd664cb8a70ce5 100644 --- a/src/transformers/models/pop2piano/__init__.py +++ b/src/transformers/models/pop2piano/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_pop2piano": ["POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pop2PianoConfig"], + "configuration_pop2piano": ["Pop2PianoConfig"], } try: @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_pop2piano"] = [ - "POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST", "Pop2PianoForConditionalGeneration", "Pop2PianoPreTrainedModel", ] @@ -72,7 +71,7 @@ if TYPE_CHECKING: - from .configuration_pop2piano import POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP, Pop2PianoConfig + from .configuration_pop2piano import Pop2PianoConfig try: if not is_torch_available(): @@ -81,7 +80,6 @@ pass else: from .modeling_pop2piano import ( - POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST, Pop2PianoForConditionalGeneration, Pop2PianoPreTrainedModel, ) diff --git a/src/transformers/models/pop2piano/configuration_pop2piano.py b/src/transformers/models/pop2piano/configuration_pop2piano.py index ff0d4f37b23e0b..8bb46b008d846b 100644 --- a/src/transformers/models/pop2piano/configuration_pop2piano.py +++ b/src/transformers/models/pop2piano/configuration_pop2piano.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Pop2PianoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used diff --git a/src/transformers/models/pop2piano/modeling_pop2piano.py b/src/transformers/models/pop2piano/modeling_pop2piano.py index c85135ccfea2d9..e46f2907f4a019 100644 --- a/src/transformers/models/pop2piano/modeling_pop2piano.py +++ b/src/transformers/models/pop2piano/modeling_pop2piano.py @@ -65,9 +65,6 @@ _CHECKPOINT_FOR_DOC = "sweetcocoa/pop2piano" -from ..deprecated._archive_maps import POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - POP2PIANO_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): diff --git a/src/transformers/models/prophetnet/__init__.py b/src/transformers/models/prophetnet/__init__.py index 083301cc20c677..2e1a1ac6101483 100644 --- a/src/transformers/models/prophetnet/__init__.py +++ b/src/transformers/models/prophetnet/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig"], + "configuration_prophetnet": ["ProphetNetConfig"], "tokenization_prophetnet": ["ProphetNetTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_prophetnet"] = [ - "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ProphetNetDecoder", "ProphetNetEncoder", "ProphetNetForCausalLM", @@ -40,7 +39,7 @@ if TYPE_CHECKING: - from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig + from .configuration_prophetnet import ProphetNetConfig from .tokenization_prophetnet import ProphetNetTokenizer try: @@ -50,7 +49,6 @@ pass else: from .modeling_prophetnet import ( - PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, diff --git a/src/transformers/models/prophetnet/configuration_prophetnet.py b/src/transformers/models/prophetnet/configuration_prophetnet.py index e07936a14cd302..1b40c9a2c07cd8 100644 --- a/src/transformers/models/prophetnet/configuration_prophetnet.py +++ b/src/transformers/models/prophetnet/configuration_prophetnet.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ProphetNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index c7d9028cdaf709..b7eca9c2b3eacc 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -44,9 +44,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/prophetnet-large-uncased" -from ..deprecated._archive_maps import PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - PROPHETNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads diff --git a/src/transformers/models/pvt/__init__.py b/src/transformers/models/pvt/__init__.py index cab5af9af7c997..1ee7092f0c460a 100644 --- a/src/transformers/models/pvt/__init__.py +++ b/src/transformers/models/pvt/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_pvt": ["PVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtConfig", "PvtOnnxConfig"], + "configuration_pvt": ["PvtConfig", "PvtOnnxConfig"], } try: @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_pvt"] = [ - "PVT_PRETRAINED_MODEL_ARCHIVE_LIST", "PvtForImageClassification", "PvtModel", "PvtPreTrainedModel", @@ -51,7 +50,7 @@ if TYPE_CHECKING: - from .configuration_pvt import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtConfig, PvtOnnxConfig + from .configuration_pvt import PvtConfig, PvtOnnxConfig try: if not is_vision_available(): @@ -68,7 +67,6 @@ pass else: from .modeling_pvt import ( - PVT_PRETRAINED_MODEL_ARCHIVE_LIST, PvtForImageClassification, PvtModel, PvtPreTrainedModel, diff --git a/src/transformers/models/pvt/configuration_pvt.py b/src/transformers/models/pvt/configuration_pvt.py index 7fc99b49cf0d78..82b48224354038 100644 --- a/src/transformers/models/pvt/configuration_pvt.py +++ b/src/transformers/models/pvt/configuration_pvt.py @@ -29,9 +29,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class PvtConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PvtModel`]. It is used to instantiate an Pvt diff --git a/src/transformers/models/pvt/modeling_pvt.py b/src/transformers/models/pvt/modeling_pvt.py index 4574ca37876039..f849b42e9c7fa2 100755 --- a/src/transformers/models/pvt/modeling_pvt.py +++ b/src/transformers/models/pvt/modeling_pvt.py @@ -50,9 +50,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import PVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ diff --git a/src/transformers/models/qdqbert/__init__.py b/src/transformers/models/qdqbert/__init__.py index 3d161192d81b0d..d413aefe0c7c5a 100644 --- a/src/transformers/models/qdqbert/__init__.py +++ b/src/transformers/models/qdqbert/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"]} +_import_structure = {"configuration_qdqbert": ["QDQBertConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_qdqbert"] = [ - "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "QDQBertForMaskedLM", "QDQBertForMultipleChoice", "QDQBertForNextSentencePrediction", @@ -41,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig + from .configuration_qdqbert import QDQBertConfig try: if not is_torch_available(): @@ -50,7 +49,6 @@ pass else: from .modeling_qdqbert import ( - QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST, QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, diff --git a/src/transformers/models/qdqbert/configuration_qdqbert.py b/src/transformers/models/qdqbert/configuration_qdqbert.py index 9a48424cc063c1..40ae3cc3108aa2 100644 --- a/src/transformers/models/qdqbert/configuration_qdqbert.py +++ b/src/transformers/models/qdqbert/configuration_qdqbert.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class QDQBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an diff --git a/src/transformers/models/qdqbert/modeling_qdqbert.py b/src/transformers/models/qdqbert/modeling_qdqbert.py index 7f1916dc80bf5c..a10979b91637dc 100755 --- a/src/transformers/models/qdqbert/modeling_qdqbert.py +++ b/src/transformers/models/qdqbert/modeling_qdqbert.py @@ -70,9 +70,6 @@ _CONFIG_FOR_DOC = "QDQBertConfig" -from ..deprecated._archive_maps import QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_qdqbert(model, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/qwen2/__init__.py b/src/transformers/models/qwen2/__init__.py index 9fd51aaffee86c..3409f28214d1fd 100644 --- a/src/transformers/models/qwen2/__init__.py +++ b/src/transformers/models/qwen2/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_qwen2": ["QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2Config"], + "configuration_qwen2": ["Qwen2Config"], "tokenization_qwen2": ["Qwen2Tokenizer"], } @@ -49,7 +49,7 @@ if TYPE_CHECKING: - from .configuration_qwen2 import QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2Config + from .configuration_qwen2 import Qwen2Config from .tokenization_qwen2 import Qwen2Tokenizer try: diff --git a/src/transformers/models/qwen2/configuration_qwen2.py b/src/transformers/models/qwen2/configuration_qwen2.py index 2513866d3e62d8..c2a99dfa8b2a49 100644 --- a/src/transformers/models/qwen2/configuration_qwen2.py +++ b/src/transformers/models/qwen2/configuration_qwen2.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Qwen2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2Model`]. It is used to instantiate a diff --git a/src/transformers/models/qwen2_moe/__init__.py b/src/transformers/models/qwen2_moe/__init__.py index f083b454d554a0..fb123832787f1f 100644 --- a/src/transformers/models/qwen2_moe/__init__.py +++ b/src/transformers/models/qwen2_moe/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_qwen2_moe": ["QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2MoeConfig"], + "configuration_qwen2_moe": ["Qwen2MoeConfig"], } @@ -40,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_qwen2_moe import QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2MoeConfig + from .configuration_qwen2_moe import Qwen2MoeConfig try: if not is_torch_available(): diff --git a/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py b/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py index e3f516ed9c2de4..b16a358f20e74b 100644 --- a/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py @@ -20,10 +20,6 @@ logger = logging.get_logger(__name__) -QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "Qwen/Qwen1.5-MoE-A2.7B": "https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B/resolve/main/config.json", -} - class Qwen2MoeConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py index ca349dca1c1bc3..ef1dd23cde55de 100644 --- a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py @@ -56,11 +56,6 @@ _CHECKPOINT_FOR_DOC = "Qwen/Qwen1.5-MoE-A2.7B" _CONFIG_FOR_DOC = "Qwen2MoeConfig" -QWEN2MOE_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "Qwen/Qwen1.5-MoE-A2.7B", - # See all Qwen2 models at https://huggingface.co/models?filter=qwen2 -] - # Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func def load_balancing_loss_func( diff --git a/src/transformers/models/realm/__init__.py b/src/transformers/models/realm/__init__.py index 594ce0c35e382f..eea7384673792a 100644 --- a/src/transformers/models/realm/__init__.py +++ b/src/transformers/models/realm/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_realm": ["REALM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RealmConfig"], + "configuration_realm": ["RealmConfig"], "tokenization_realm": ["RealmTokenizer"], } @@ -36,7 +36,6 @@ pass else: _import_structure["modeling_realm"] = [ - "REALM_PRETRAINED_MODEL_ARCHIVE_LIST", "RealmEmbedder", "RealmForOpenQA", "RealmKnowledgeAugEncoder", @@ -49,7 +48,7 @@ if TYPE_CHECKING: - from .configuration_realm import REALM_PRETRAINED_CONFIG_ARCHIVE_MAP, RealmConfig + from .configuration_realm import RealmConfig from .tokenization_realm import RealmTokenizer try: @@ -67,7 +66,6 @@ pass else: from .modeling_realm import ( - REALM_PRETRAINED_MODEL_ARCHIVE_LIST, RealmEmbedder, RealmForOpenQA, RealmKnowledgeAugEncoder, diff --git a/src/transformers/models/realm/configuration_realm.py b/src/transformers/models/realm/configuration_realm.py index 3725c37922a6ad..fd21f44a558de8 100644 --- a/src/transformers/models/realm/configuration_realm.py +++ b/src/transformers/models/realm/configuration_realm.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import REALM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RealmConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of diff --git a/src/transformers/models/realm/modeling_realm.py b/src/transformers/models/realm/modeling_realm.py index 3753ba9dd28d01..ff5e43bc13a700 100644 --- a/src/transformers/models/realm/modeling_realm.py +++ b/src/transformers/models/realm/modeling_realm.py @@ -43,9 +43,6 @@ _CONFIG_FOR_DOC = "RealmConfig" -from ..deprecated._archive_maps import REALM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_realm(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/reformer/__init__.py b/src/transformers/models/reformer/__init__.py index 37508ef808e083..ef13dd7c312dd0 100644 --- a/src/transformers/models/reformer/__init__.py +++ b/src/transformers/models/reformer/__init__.py @@ -23,7 +23,7 @@ ) -_import_structure = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} +_import_structure = {"configuration_reformer": ["ReformerConfig"]} try: if not is_sentencepiece_available(): @@ -48,7 +48,6 @@ pass else: _import_structure["modeling_reformer"] = [ - "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", @@ -61,7 +60,7 @@ if TYPE_CHECKING: - from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig + from .configuration_reformer import ReformerConfig try: if not is_sentencepiece_available(): @@ -86,7 +85,6 @@ pass else: from .modeling_reformer import ( - REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, diff --git a/src/transformers/models/reformer/configuration_reformer.py b/src/transformers/models/reformer/configuration_reformer.py index 35e8628ce0fa45..eecd67cc06ba08 100755 --- a/src/transformers/models/reformer/configuration_reformer.py +++ b/src/transformers/models/reformer/configuration_reformer.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ReformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ReformerModel`]. It is used to instantiate a diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index bfb8fb5ebe1cfa..2e98a07217e682 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -51,9 +51,6 @@ _CONFIG_FOR_DOC = "ReformerConfig" -from ..deprecated._archive_maps import REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Define named tuples for nn.Modules here LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"]) LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"]) diff --git a/src/transformers/models/regnet/__init__.py b/src/transformers/models/regnet/__init__.py index 5084c4486008d1..25507927affde7 100644 --- a/src/transformers/models/regnet/__init__.py +++ b/src/transformers/models/regnet/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"]} +_import_structure = {"configuration_regnet": ["RegNetConfig"]} try: if not is_torch_available(): @@ -31,7 +31,6 @@ pass else: _import_structure["modeling_regnet"] = [ - "REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", "RegNetForImageClassification", "RegNetModel", "RegNetPreTrainedModel", @@ -44,7 +43,6 @@ pass else: _import_structure["modeling_tf_regnet"] = [ - "TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRegNetForImageClassification", "TFRegNetModel", "TFRegNetPreTrainedModel", @@ -64,7 +62,7 @@ if TYPE_CHECKING: - from .configuration_regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig + from .configuration_regnet import RegNetConfig try: if not is_torch_available(): @@ -73,7 +71,6 @@ pass else: from .modeling_regnet import ( - REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, RegNetForImageClassification, RegNetModel, RegNetPreTrainedModel, @@ -86,7 +83,6 @@ pass else: from .modeling_tf_regnet import ( - TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel, TFRegNetPreTrainedModel, diff --git a/src/transformers/models/regnet/configuration_regnet.py b/src/transformers/models/regnet/configuration_regnet.py index 629ac733917e3a..e24bc70a891c77 100644 --- a/src/transformers/models/regnet/configuration_regnet.py +++ b/src/transformers/models/regnet/configuration_regnet.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RegNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet diff --git a/src/transformers/models/regnet/modeling_regnet.py b/src/transformers/models/regnet/modeling_regnet.py index 2e05f8329a65c8..df8f64a03b703f 100644 --- a/src/transformers/models/regnet/modeling_regnet.py +++ b/src/transformers/models/regnet/modeling_regnet.py @@ -47,9 +47,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class RegNetConvLayer(nn.Module): def __init__( self, diff --git a/src/transformers/models/regnet/modeling_tf_regnet.py b/src/transformers/models/regnet/modeling_tf_regnet.py index a8c296027fc6c3..24ebb3f5caf5c9 100644 --- a/src/transformers/models/regnet/modeling_tf_regnet.py +++ b/src/transformers/models/regnet/modeling_tf_regnet.py @@ -51,9 +51,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFRegNetConvLayer(keras.layers.Layer): def __init__( self, diff --git a/src/transformers/models/rembert/__init__.py b/src/transformers/models/rembert/__init__.py index 98e8e2254dcfa9..5ffaf3c8c04cf3 100644 --- a/src/transformers/models/rembert/__init__.py +++ b/src/transformers/models/rembert/__init__.py @@ -24,9 +24,7 @@ ) -_import_structure = { - "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] -} +_import_structure = {"configuration_rembert": ["RemBertConfig", "RemBertOnnxConfig"]} try: if not is_sentencepiece_available(): @@ -51,7 +49,6 @@ pass else: _import_structure["modeling_rembert"] = [ - "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", @@ -72,7 +69,6 @@ pass else: _import_structure["modeling_tf_rembert"] = [ - "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", @@ -86,7 +82,7 @@ if TYPE_CHECKING: - from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig + from .configuration_rembert import RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): @@ -111,7 +107,6 @@ pass else: from .modeling_rembert import ( - REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, @@ -131,7 +126,6 @@ pass else: from .modeling_tf_rembert import ( - TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, diff --git a/src/transformers/models/rembert/configuration_rembert.py b/src/transformers/models/rembert/configuration_rembert.py index fa51a79f6012b6..471f2f75213a53 100644 --- a/src/transformers/models/rembert/configuration_rembert.py +++ b/src/transformers/models/rembert/configuration_rembert.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RemBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RemBertModel`]. It is used to instantiate an diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index 9c04ed10b8e9d8..e92418fcffaaed 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -53,9 +53,6 @@ _CHECKPOINT_FOR_DOC = "google/rembert" -from ..deprecated._archive_maps import REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def load_tf_weights_in_rembert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index 94667c25379b02..daceef108076a0 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -63,9 +63,6 @@ _CONFIG_FOR_DOC = "RemBertConfig" -from ..deprecated._archive_maps import TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFRemBertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/resnet/__init__.py b/src/transformers/models/resnet/__init__.py index 62e6b1c2ca1a68..50b71a4dd4cf4d 100644 --- a/src/transformers/models/resnet/__init__.py +++ b/src/transformers/models/resnet/__init__.py @@ -22,9 +22,7 @@ ) -_import_structure = { - "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] -} +_import_structure = {"configuration_resnet": ["ResNetConfig", "ResNetOnnxConfig"]} try: if not is_torch_available(): @@ -33,7 +31,6 @@ pass else: _import_structure["modeling_resnet"] = [ - "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", @@ -47,7 +44,6 @@ pass else: _import_structure["modeling_tf_resnet"] = [ - "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", @@ -66,7 +62,7 @@ ] if TYPE_CHECKING: - from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig + from .configuration_resnet import ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): @@ -75,7 +71,6 @@ pass else: from .modeling_resnet import ( - RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, @@ -89,7 +84,6 @@ pass else: from .modeling_tf_resnet import ( - TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index 8e1938cb9ce986..46ccd96cd9ccfc 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ResNetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ResNetModel`]. It is used to instantiate an diff --git a/src/transformers/models/resnet/modeling_resnet.py b/src/transformers/models/resnet/modeling_resnet.py index 560e807c24312c..06af704603ca62 100644 --- a/src/transformers/models/resnet/modeling_resnet.py +++ b/src/transformers/models/resnet/modeling_resnet.py @@ -54,9 +54,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" -from ..deprecated._archive_maps import RESNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class ResNetConvLayer(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu" diff --git a/src/transformers/models/resnet/modeling_tf_resnet.py b/src/transformers/models/resnet/modeling_tf_resnet.py index 98e9a32d293fe4..4d68775c922ae9 100644 --- a/src/transformers/models/resnet/modeling_tf_resnet.py +++ b/src/transformers/models/resnet/modeling_tf_resnet.py @@ -50,9 +50,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" -from ..deprecated._archive_maps import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFResNetConvLayer(keras.layers.Layer): def __init__( self, diff --git a/src/transformers/models/roberta/__init__.py b/src/transformers/models/roberta/__init__.py index 774179f5f6f445..4a97962f4f5704 100644 --- a/src/transformers/models/roberta/__init__.py +++ b/src/transformers/models/roberta/__init__.py @@ -25,7 +25,7 @@ _import_structure = { - "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], + "configuration_roberta": ["RobertaConfig", "RobertaOnnxConfig"], "tokenization_roberta": ["RobertaTokenizer"], } @@ -44,7 +44,6 @@ pass else: _import_structure["modeling_roberta"] = [ - "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", @@ -62,7 +61,6 @@ pass else: _import_structure["modeling_tf_roberta"] = [ - "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", @@ -93,7 +91,7 @@ if TYPE_CHECKING: - from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig + from .configuration_roberta import RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: @@ -111,7 +109,6 @@ pass else: from .modeling_roberta import ( - ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, @@ -129,7 +126,6 @@ pass else: from .modeling_tf_roberta import ( - TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, diff --git a/src/transformers/models/roberta/configuration_roberta.py b/src/transformers/models/roberta/configuration_roberta.py index aa549556d949fd..0ecd57b23a5ddf 100644 --- a/src/transformers/models/roberta/configuration_roberta.py +++ b/src/transformers/models/roberta/configuration_roberta.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RobertaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RobertaModel`] or a [`TFRobertaModel`]. It is diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 640139212081ca..112ae351b5105f 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "RobertaConfig" -from ..deprecated._archive_maps import ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class RobertaEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index f48bb796c17b4c..84448a168defdb 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -66,9 +66,6 @@ _CONFIG_FOR_DOC = "RobertaConfig" -from ..deprecated._archive_maps import TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFRobertaEmbeddings(keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. diff --git a/src/transformers/models/roberta_prelayernorm/__init__.py b/src/transformers/models/roberta_prelayernorm/__init__.py index e2dcaa71be54da..9f55eed11c4224 100644 --- a/src/transformers/models/roberta_prelayernorm/__init__.py +++ b/src/transformers/models/roberta_prelayernorm/__init__.py @@ -25,7 +25,6 @@ _import_structure = { "configuration_roberta_prelayernorm": [ - "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], @@ -38,7 +37,6 @@ pass else: _import_structure["modeling_roberta_prelayernorm"] = [ - "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", @@ -56,7 +54,6 @@ pass else: _import_structure["modeling_tf_roberta_prelayernorm"] = [ - "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", @@ -88,7 +85,6 @@ if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( - ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) @@ -100,7 +96,6 @@ pass else: from .modeling_roberta_prelayernorm import ( - ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, @@ -118,7 +113,6 @@ pass else: from .modeling_tf_roberta_prelayernorm import ( - TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, diff --git a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py index 379a71abf1fbb1..e7e74b0cdf24d1 100644 --- a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - # Copied from transformers.models.roberta.configuration_roberta.RobertaConfig with FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40,RoBERTa->RoBERTa-PreLayerNorm,Roberta->RobertaPreLayerNorm,roberta->roberta-prelayernorm class RobertaPreLayerNormConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py index 468cb1a243ca89..cfbf5e11aa233d 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig" -from ..deprecated._archive_maps import ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->RobertaPreLayerNorm class RobertaPreLayerNormEmbeddings(nn.Module): """ diff --git a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py index b3a0070788eaf7..beb9c383e1d4ff 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py @@ -66,9 +66,6 @@ _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig" -from ..deprecated._archive_maps import TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings with Roberta->RobertaPreLayerNorm class TFRobertaPreLayerNormEmbeddings(keras.layers.Layer): """ diff --git a/src/transformers/models/roc_bert/__init__.py b/src/transformers/models/roc_bert/__init__.py index 344bcfa41654d1..9971c53975d49a 100644 --- a/src/transformers/models/roc_bert/__init__.py +++ b/src/transformers/models/roc_bert/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], + "configuration_roc_bert": ["RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } @@ -36,7 +36,6 @@ pass else: _import_structure["modeling_roc_bert"] = [ - "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", @@ -51,7 +50,7 @@ ] if TYPE_CHECKING: - from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig + from .configuration_roc_bert import RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: @@ -69,7 +68,6 @@ pass else: from .modeling_roc_bert import ( - ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, diff --git a/src/transformers/models/roc_bert/configuration_roc_bert.py b/src/transformers/models/roc_bert/configuration_roc_bert.py index 26f74ee4c462d0..752c791cf91f7a 100644 --- a/src/transformers/models/roc_bert/configuration_roc_bert.py +++ b/src/transformers/models/roc_bert/configuration_roc_bert.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RoCBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RoCBertModel`]. It is used to instantiate a diff --git a/src/transformers/models/roc_bert/modeling_roc_bert.py b/src/transformers/models/roc_bert/modeling_roc_bert.py index 9d8284461f6679..216aaf78b2b060 100644 --- a/src/transformers/models/roc_bert/modeling_roc_bert.py +++ b/src/transformers/models/roc_bert/modeling_roc_bert.py @@ -73,8 +73,6 @@ # Maske language modeling -from ..deprecated._archive_maps import ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - # Copied from transformers.models.bert.modeling_bert.load_tf_weights_in_bert with bert->roc_bert def load_tf_weights_in_roc_bert(model, config, tf_checkpoint_path): diff --git a/src/transformers/models/roformer/__init__.py b/src/transformers/models/roformer/__init__.py index 93c86eb081fa03..d9642eba59fe26 100644 --- a/src/transformers/models/roformer/__init__.py +++ b/src/transformers/models/roformer/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"], + "configuration_roformer": ["RoFormerConfig", "RoFormerOnnxConfig"], "tokenization_roformer": ["RoFormerTokenizer"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_roformer"] = [ - "ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "RoFormerForCausalLM", "RoFormerForMaskedLM", "RoFormerForMultipleChoice", @@ -64,7 +63,6 @@ pass else: _import_structure["modeling_tf_roformer"] = [ - "TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRoFormerForCausalLM", "TFRoFormerForMaskedLM", "TFRoFormerForMultipleChoice", @@ -84,7 +82,6 @@ pass else: _import_structure["modeling_flax_roformer"] = [ - "FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", @@ -96,7 +93,7 @@ if TYPE_CHECKING: - from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig + from .configuration_roformer import RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: @@ -114,7 +111,6 @@ pass else: from .modeling_roformer import ( - ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, @@ -134,7 +130,6 @@ pass else: from .modeling_tf_roformer import ( - TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, @@ -153,7 +148,6 @@ pass else: from .modeling_flax_roformer import ( - FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, diff --git a/src/transformers/models/roformer/configuration_roformer.py b/src/transformers/models/roformer/configuration_roformer.py index adde64345d9ee4..0732c3a9e09061 100644 --- a/src/transformers/models/roformer/configuration_roformer.py +++ b/src/transformers/models/roformer/configuration_roformer.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RoFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RoFormerModel`]. It is used to instantiate an diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index f7589d4853d581..2911d98d31ec59 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "RoFormerConfig" -from ..deprecated._archive_maps import ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->RoFormer class RoFormerSinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 3c1ba63ce1863c..e3f84cc78aa23a 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -65,9 +65,6 @@ _CONFIG_FOR_DOC = "RoFormerConfig" -from ..deprecated._archive_maps import TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFRoFormerSinusoidalPositionalEmbedding(keras.layers.Layer): """This module produces sinusoidal positional embeddings of any length.""" diff --git a/src/transformers/models/rwkv/__init__.py b/src/transformers/models/rwkv/__init__.py index e68eefe9f8aaa5..2cbfd94bac7bb1 100644 --- a/src/transformers/models/rwkv/__init__.py +++ b/src/transformers/models/rwkv/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_rwkv": ["RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP", "RwkvConfig", "RwkvOnnxConfig"], + "configuration_rwkv": ["RwkvConfig", "RwkvOnnxConfig"], } try: @@ -32,7 +32,6 @@ pass else: _import_structure["modeling_rwkv"] = [ - "RWKV_PRETRAINED_MODEL_ARCHIVE_LIST", "RwkvForCausalLM", "RwkvModel", "RwkvPreTrainedModel", @@ -40,7 +39,7 @@ if TYPE_CHECKING: - from .configuration_rwkv import RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP, RwkvConfig, RwkvOnnxConfig + from .configuration_rwkv import RwkvConfig, RwkvOnnxConfig try: if not is_torch_available(): @@ -49,7 +48,6 @@ pass else: from .modeling_rwkv import ( - RWKV_PRETRAINED_MODEL_ARCHIVE_LIST, RwkvForCausalLM, RwkvModel, RwkvPreTrainedModel, diff --git a/src/transformers/models/rwkv/configuration_rwkv.py b/src/transformers/models/rwkv/configuration_rwkv.py index 5e0598dad5c424..57b74123335a44 100644 --- a/src/transformers/models/rwkv/configuration_rwkv.py +++ b/src/transformers/models/rwkv/configuration_rwkv.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class RwkvConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`RwkvModel`]. It is used to instantiate a RWKV diff --git a/src/transformers/models/rwkv/modeling_rwkv.py b/src/transformers/models/rwkv/modeling_rwkv.py index 79e06d141bb846..d9e4bfadf32013 100644 --- a/src/transformers/models/rwkv/modeling_rwkv.py +++ b/src/transformers/models/rwkv/modeling_rwkv.py @@ -45,9 +45,6 @@ _CONFIG_FOR_DOC = "RwkvConfig" -from ..deprecated._archive_maps import RWKV_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - rwkv_cuda_kernel = None diff --git a/src/transformers/models/sam/__init__.py b/src/transformers/models/sam/__init__.py index e8006e89e0f11d..672281440c1ae9 100644 --- a/src/transformers/models/sam/__init__.py +++ b/src/transformers/models/sam/__init__.py @@ -24,7 +24,6 @@ _import_structure = { "configuration_sam": [ - "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP", "SamConfig", "SamMaskDecoderConfig", "SamPromptEncoderConfig", @@ -41,7 +40,6 @@ pass else: _import_structure["modeling_sam"] = [ - "SAM_PRETRAINED_MODEL_ARCHIVE_LIST", "SamModel", "SamPreTrainedModel", ] @@ -52,7 +50,6 @@ pass else: _import_structure["modeling_tf_sam"] = [ - "TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSamModel", "TFSamPreTrainedModel", ] @@ -67,7 +64,6 @@ if TYPE_CHECKING: from .configuration_sam import ( - SAM_PRETRAINED_CONFIG_ARCHIVE_MAP, SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, @@ -81,7 +77,7 @@ except OptionalDependencyNotAvailable: pass else: - from .modeling_sam import SAM_PRETRAINED_MODEL_ARCHIVE_LIST, SamModel, SamPreTrainedModel + from .modeling_sam import SamModel, SamPreTrainedModel try: if not is_tf_available(): @@ -89,7 +85,7 @@ except OptionalDependencyNotAvailable: pass else: - from .modeling_tf_sam import TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST, TFSamModel, TFSamPreTrainedModel + from .modeling_tf_sam import TFSamModel, TFSamPreTrainedModel try: if not is_vision_available(): diff --git a/src/transformers/models/sam/configuration_sam.py b/src/transformers/models/sam/configuration_sam.py index 5afe75eb8eae43..63dc5ff6307941 100644 --- a/src/transformers/models/sam/configuration_sam.py +++ b/src/transformers/models/sam/configuration_sam.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SAM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SamPromptEncoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`] diff --git a/src/transformers/models/sam/modeling_sam.py b/src/transformers/models/sam/modeling_sam.py index 3203031cc9a2e4..023457261155cd 100644 --- a/src/transformers/models/sam/modeling_sam.py +++ b/src/transformers/models/sam/modeling_sam.py @@ -38,9 +38,6 @@ _CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge" -from ..deprecated._archive_maps import SAM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class SamVisionEncoderOutput(ModelOutput): """ diff --git a/src/transformers/models/sam/modeling_tf_sam.py b/src/transformers/models/sam/modeling_tf_sam.py index f527337cd6cdaa..5da1293e0f4d74 100644 --- a/src/transformers/models/sam/modeling_tf_sam.py +++ b/src/transformers/models/sam/modeling_tf_sam.py @@ -41,9 +41,6 @@ _CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge" -from ..deprecated._archive_maps import TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TFSamVisionEncoderOutput(ModelOutput): """ diff --git a/src/transformers/models/seamless_m4t/__init__.py b/src/transformers/models/seamless_m4t/__init__.py index 3167311a5a6ef7..56b04e76b62ca6 100644 --- a/src/transformers/models/seamless_m4t/__init__.py +++ b/src/transformers/models/seamless_m4t/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_seamless_m4t": ["SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4TConfig"], + "configuration_seamless_m4t": ["SeamlessM4TConfig"], "feature_extraction_seamless_m4t": ["SeamlessM4TFeatureExtractor"], "processing_seamless_m4t": ["SeamlessM4TProcessor"], } @@ -51,7 +51,6 @@ pass else: _import_structure["modeling_seamless_m4t"] = [ - "SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST", "SeamlessM4TForTextToSpeech", "SeamlessM4TForSpeechToSpeech", "SeamlessM4TForTextToText", @@ -65,7 +64,7 @@ ] if TYPE_CHECKING: - from .configuration_seamless_m4t import SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4TConfig + from .configuration_seamless_m4t import SeamlessM4TConfig from .feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor from .processing_seamless_m4t import SeamlessM4TProcessor @@ -92,7 +91,6 @@ pass else: from .modeling_seamless_m4t import ( - SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST, SeamlessM4TCodeHifiGan, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, diff --git a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py index 8ae61f1defece6..2075c65fa89abe 100644 --- a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SeamlessM4TConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~SeamlessM4TModel`]. It is used to instantiate an diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index bfc0fb5aeb752d..b23b30056ee2a6 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -51,12 +51,6 @@ _CONFIG_FOR_DOC = "SeamlessM4TConfig" -from ..deprecated._archive_maps import ( # noqa: F401, E402 - SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402 - SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, # noqa: F401, E402 -) - - @dataclass class SeamlessM4TGenerationOutput(ModelOutput): """ diff --git a/src/transformers/models/seamless_m4t_v2/__init__.py b/src/transformers/models/seamless_m4t_v2/__init__.py index ebc4caef2da10a..5fde6a5d332a39 100644 --- a/src/transformers/models/seamless_m4t_v2/__init__.py +++ b/src/transformers/models/seamless_m4t_v2/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_seamless_m4t_v2": ["SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4Tv2Config"], + "configuration_seamless_m4t_v2": ["SeamlessM4Tv2Config"], } try: @@ -31,7 +31,6 @@ pass else: _import_structure["modeling_seamless_m4t_v2"] = [ - "SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "SeamlessM4Tv2ForTextToSpeech", "SeamlessM4Tv2ForSpeechToSpeech", "SeamlessM4Tv2ForTextToText", @@ -41,7 +40,7 @@ ] if TYPE_CHECKING: - from .configuration_seamless_m4t_v2 import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4Tv2Config + from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config try: if not is_torch_available(): @@ -50,7 +49,6 @@ pass else: from .modeling_seamless_m4t_v2 import ( - SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST, SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech, diff --git a/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py index e03523d3e0d8b4..051a86cba1e96c 100644 --- a/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SeamlessM4Tv2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate diff --git a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py index 238e089c31f4a6..526b697300d77a 100644 --- a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py @@ -51,14 +51,6 @@ _CONFIG_FOR_DOC = "SeamlessM4Tv2Config" -from ..deprecated._archive_maps import SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - -SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = { - "microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json", -} - - @dataclass # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TGenerationOutput with SeamlessM4T->SeamlessM4Tv2 class SeamlessM4Tv2GenerationOutput(ModelOutput): diff --git a/src/transformers/models/segformer/__init__.py b/src/transformers/models/segformer/__init__.py index 22dc3655b889b5..8d8cccdf39ff42 100644 --- a/src/transformers/models/segformer/__init__.py +++ b/src/transformers/models/segformer/__init__.py @@ -22,9 +22,7 @@ ) -_import_structure = { - "configuration_segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig", "SegformerOnnxConfig"] -} +_import_structure = {"configuration_segformer": ["SegformerConfig", "SegformerOnnxConfig"]} try: if not is_vision_available(): @@ -42,7 +40,6 @@ pass else: _import_structure["modeling_segformer"] = [ - "SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SegformerDecodeHead", "SegformerForImageClassification", "SegformerForSemanticSegmentation", @@ -58,7 +55,6 @@ pass else: _import_structure["modeling_tf_segformer"] = [ - "TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSegformerDecodeHead", "TFSegformerForImageClassification", "TFSegformerForSemanticSegmentation", @@ -68,7 +64,7 @@ if TYPE_CHECKING: - from .configuration_segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig, SegformerOnnxConfig + from .configuration_segformer import SegformerConfig, SegformerOnnxConfig try: if not is_vision_available(): @@ -86,7 +82,6 @@ pass else: from .modeling_segformer import ( - SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SegformerDecodeHead, SegformerForImageClassification, SegformerForSemanticSegmentation, @@ -101,7 +96,6 @@ pass else: from .modeling_tf_segformer import ( - TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFSegformerDecodeHead, TFSegformerForImageClassification, TFSegformerForSemanticSegmentation, diff --git a/src/transformers/models/segformer/configuration_segformer.py b/src/transformers/models/segformer/configuration_segformer.py index aba2693ba33bbf..6aadb64b6fe683 100644 --- a/src/transformers/models/segformer/configuration_segformer.py +++ b/src/transformers/models/segformer/configuration_segformer.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SegformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SegformerModel`]. It is used to instantiate an diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index d1205630dd1042..d47219a0bd0d23 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -52,9 +52,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class SegFormerImageClassifierOutput(ImageClassifierOutput): """ Base class for outputs of image classification models. diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index d215059ff611ab..0657f1b437e525 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -56,9 +56,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->Segformer class TFSegformerDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). diff --git a/src/transformers/models/seggpt/__init__.py b/src/transformers/models/seggpt/__init__.py index 49649c92865da6..b6095b53277ae0 100644 --- a/src/transformers/models/seggpt/__init__.py +++ b/src/transformers/models/seggpt/__init__.py @@ -16,9 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = { - "configuration_seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig", "SegGptOnnxConfig"] -} +_import_structure = {"configuration_seggpt": ["SegGptConfig", "SegGptOnnxConfig"]} try: if not is_torch_available(): @@ -27,7 +25,6 @@ pass else: _import_structure["modeling_seggpt"] = [ - "SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "SegGptModel", "SegGptPreTrainedModel", "SegGptForImageSegmentation", @@ -42,7 +39,7 @@ _import_structure["image_processing_seggpt"] = ["SegGptImageProcessor"] if TYPE_CHECKING: - from .configuration_seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig, SegGptOnnxConfig + from .configuration_seggpt import SegGptConfig, SegGptOnnxConfig try: if not is_torch_available(): @@ -51,7 +48,6 @@ pass else: from .modeling_seggpt import ( - SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST, SegGptForImageSegmentation, SegGptModel, SegGptPreTrainedModel, diff --git a/src/transformers/models/seggpt/configuration_seggpt.py b/src/transformers/models/seggpt/configuration_seggpt.py index 38607d775a6582..bac482e97f875d 100644 --- a/src/transformers/models/seggpt/configuration_seggpt.py +++ b/src/transformers/models/seggpt/configuration_seggpt.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SegGptConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SegGptModel`]. It is used to instantiate a SegGPT diff --git a/src/transformers/models/seggpt/modeling_seggpt.py b/src/transformers/models/seggpt/modeling_seggpt.py index 64cd4296f7a554..65672e421361dd 100644 --- a/src/transformers/models/seggpt/modeling_seggpt.py +++ b/src/transformers/models/seggpt/modeling_seggpt.py @@ -47,9 +47,6 @@ _EXPECTED_OUTPUT_SHAPE = [3, 896, 448] -from ..deprecated._archive_maps import SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class SegGptEncoderOutput(ModelOutput): """ diff --git a/src/transformers/models/sew/__init__.py b/src/transformers/models/sew/__init__.py index bd43be68b7c053..aba88cc45133c2 100644 --- a/src/transformers/models/sew/__init__.py +++ b/src/transformers/models/sew/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} +_import_structure = {"configuration_sew": ["SEWConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_sew"] = [ - "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", @@ -33,7 +32,7 @@ ] if TYPE_CHECKING: - from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig + from .configuration_sew import SEWConfig try: if not is_torch_available(): @@ -42,7 +41,6 @@ pass else: from .modeling_sew import ( - SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, diff --git a/src/transformers/models/sew/configuration_sew.py b/src/transformers/models/sew/configuration_sew.py index b14ce441d000cb..33ea6d374fa71b 100644 --- a/src/transformers/models/sew/configuration_sew.py +++ b/src/transformers/models/sew/configuration_sew.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SEWConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index 63768828ae4b62..60cbb777c795a3 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -69,9 +69,6 @@ _SEQ_CLASS_EXPECTED_LOSS = 9.52 -from ..deprecated._archive_maps import SEW_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/sew_d/__init__.py b/src/transformers/models/sew_d/__init__.py index ab1dd5284a32e4..c99be845d544b5 100644 --- a/src/transformers/models/sew_d/__init__.py +++ b/src/transformers/models/sew_d/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"]} +_import_structure = {"configuration_sew_d": ["SEWDConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_sew_d"] = [ - "SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWDForCTC", "SEWDForSequenceClassification", "SEWDModel", @@ -33,7 +32,7 @@ ] if TYPE_CHECKING: - from .configuration_sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig + from .configuration_sew_d import SEWDConfig try: if not is_torch_available(): @@ -42,7 +41,6 @@ pass else: from .modeling_sew_d import ( - SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST, SEWDForCTC, SEWDForSequenceClassification, SEWDModel, diff --git a/src/transformers/models/sew_d/configuration_sew_d.py b/src/transformers/models/sew_d/configuration_sew_d.py index 9e96a1f22b30bf..aa4b60edc7e059 100644 --- a/src/transformers/models/sew_d/configuration_sew_d.py +++ b/src/transformers/models/sew_d/configuration_sew_d.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SEWDConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SEWDModel`]. It is used to instantiate a SEW-D diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index 84bf303cd52481..352a86c9c2e884 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -56,9 +56,6 @@ _SEQ_CLASS_EXPECTED_LOSS = 3.16 -from ..deprecated._archive_maps import SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], diff --git a/src/transformers/models/siglip/__init__.py b/src/transformers/models/siglip/__init__.py index ff44d5cbf14b3c..96ce20e7f230bf 100644 --- a/src/transformers/models/siglip/__init__.py +++ b/src/transformers/models/siglip/__init__.py @@ -24,7 +24,6 @@ _import_structure = { "configuration_siglip": [ - "SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "SiglipConfig", "SiglipTextConfig", "SiglipVisionConfig", @@ -56,7 +55,6 @@ pass else: _import_structure["modeling_siglip"] = [ - "SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "SiglipModel", "SiglipPreTrainedModel", "SiglipTextModel", @@ -67,7 +65,6 @@ if TYPE_CHECKING: from .configuration_siglip import ( - SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, SiglipConfig, SiglipTextConfig, SiglipVisionConfig, @@ -97,7 +94,6 @@ pass else: from .modeling_siglip import ( - SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST, SiglipForImageClassification, SiglipModel, SiglipPreTrainedModel, diff --git a/src/transformers/models/siglip/configuration_siglip.py b/src/transformers/models/siglip/configuration_siglip.py index 872e5c3b965ba9..7692f79abb333e 100644 --- a/src/transformers/models/siglip/configuration_siglip.py +++ b/src/transformers/models/siglip/configuration_siglip.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SiglipTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SiglipTextModel`]. It is used to instantiate a diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index 744f4044d91d09..5399006227e784 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -48,9 +48,6 @@ _CHECKPOINT_FOR_DOC = "google/siglip-base-patch16-224" -from ..deprecated._archive_maps import SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def _trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index 3194f99931a4d6..4ad05da69710ad 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], + "configuration_speech_to_text": ["Speech2TextConfig"], "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"], "processing_speech_to_text": ["Speech2TextProcessor"], } @@ -43,7 +43,6 @@ pass else: _import_structure["modeling_tf_speech_to_text"] = [ - "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", @@ -56,7 +55,6 @@ pass else: _import_structure["modeling_speech_to_text"] = [ - "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", @@ -64,7 +62,7 @@ if TYPE_CHECKING: - from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig + from .configuration_speech_to_text import Speech2TextConfig from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor from .processing_speech_to_text import Speech2TextProcessor @@ -83,7 +81,6 @@ pass else: from .modeling_tf_speech_to_text import ( - TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeech2TextForConditionalGeneration, TFSpeech2TextModel, TFSpeech2TextPreTrainedModel, @@ -96,7 +93,6 @@ pass else: from .modeling_speech_to_text import ( - SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextPreTrainedModel, diff --git a/src/transformers/models/speech_to_text/configuration_speech_to_text.py b/src/transformers/models/speech_to_text/configuration_speech_to_text.py index 67dee8dc0bc361..2b8e3bd22e2d7b 100644 --- a/src/transformers/models/speech_to_text/configuration_speech_to_text.py +++ b/src/transformers/models/speech_to_text/configuration_speech_to_text.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Speech2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate a diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index 6898cc081fe91f..155d6a5dfe3e62 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -44,9 +44,6 @@ _CONFIG_FOR_DOC = "Speech2TextConfig" -from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index 8fd6bd21a593c9..91e6028332f0a9 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -56,9 +56,6 @@ _CHECKPOINT_FOR_DOC = "facebook/s2t-small-librispeech-asr" -from ..deprecated._archive_maps import TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/speech_to_text_2/__init__.py b/src/transformers/models/speech_to_text_2/__init__.py index bf842f6006b3ec..ab507bc19f85f9 100644 --- a/src/transformers/models/speech_to_text_2/__init__.py +++ b/src/transformers/models/speech_to_text_2/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_speech_to_text_2": ["SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2Text2Config"], + "configuration_speech_to_text_2": ["Speech2Text2Config"], "processing_speech_to_text_2": ["Speech2Text2Processor"], "tokenization_speech_to_text_2": ["Speech2Text2Tokenizer"], } @@ -36,14 +36,13 @@ pass else: _import_structure["modeling_speech_to_text_2"] = [ - "SPEECH_TO_TEXT_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel", ] if TYPE_CHECKING: - from .configuration_speech_to_text_2 import SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2Text2Config + from .configuration_speech_to_text_2 import Speech2Text2Config from .processing_speech_to_text_2 import Speech2Text2Processor from .tokenization_speech_to_text_2 import Speech2Text2Tokenizer @@ -54,7 +53,6 @@ pass else: from .modeling_speech_to_text_2 import ( - SPEECH_TO_TEXT_2_PRETRAINED_MODEL_ARCHIVE_LIST, Speech2Text2ForCausalLM, Speech2Text2PreTrainedModel, ) diff --git a/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py index cbb3be82552266..5c9ebbe00fb9cd 100644 --- a/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Speech2Text2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Speech2Text2ForCausalLM`]. It is used to diff --git a/src/transformers/models/speecht5/__init__.py b/src/transformers/models/speecht5/__init__.py index 20606dda51ef87..f9afe52aa4b7ab 100644 --- a/src/transformers/models/speecht5/__init__.py +++ b/src/transformers/models/speecht5/__init__.py @@ -23,8 +23,6 @@ _import_structure = { "configuration_speecht5": [ - "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", - "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], @@ -47,7 +45,6 @@ pass else: _import_structure["modeling_speecht5"] = [ - "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", @@ -58,8 +55,6 @@ if TYPE_CHECKING: from .configuration_speecht5 import ( - SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, - SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechT5Config, SpeechT5HifiGanConfig, ) @@ -81,7 +76,6 @@ pass else: from .modeling_speecht5 import ( - SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, diff --git a/src/transformers/models/speecht5/configuration_speecht5.py b/src/transformers/models/speecht5/configuration_speecht5.py index 36cb4995a83f05..91883253032497 100644 --- a/src/transformers/models/speecht5/configuration_speecht5.py +++ b/src/transformers/models/speecht5/configuration_speecht5.py @@ -24,14 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - -SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = { - "microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json", -} - - class SpeechT5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SpeechT5Model`]. It is used to instantiate a diff --git a/src/transformers/models/speecht5/modeling_speecht5.py b/src/transformers/models/speecht5/modeling_speecht5.py index 5caac417027768..175b4b6e7962c1 100644 --- a/src/transformers/models/speecht5/modeling_speecht5.py +++ b/src/transformers/models/speecht5/modeling_speecht5.py @@ -47,9 +47,6 @@ _CONFIG_FOR_DOC = "SpeechT5Config" -from ..deprecated._archive_maps import SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ diff --git a/src/transformers/models/splinter/__init__.py b/src/transformers/models/splinter/__init__.py index 24355c01add73b..81896fb15a5b66 100644 --- a/src/transformers/models/splinter/__init__.py +++ b/src/transformers/models/splinter/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_splinter": ["SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig"], + "configuration_splinter": ["SplinterConfig"], "tokenization_splinter": ["SplinterTokenizer"], } @@ -36,7 +36,6 @@ pass else: _import_structure["modeling_splinter"] = [ - "SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST", "SplinterForQuestionAnswering", "SplinterForPreTraining", "SplinterLayer", @@ -46,7 +45,7 @@ if TYPE_CHECKING: - from .configuration_splinter import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig + from .configuration_splinter import SplinterConfig from .tokenization_splinter import SplinterTokenizer try: @@ -64,7 +63,6 @@ pass else: from .modeling_splinter import ( - SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterLayer, diff --git a/src/transformers/models/splinter/configuration_splinter.py b/src/transformers/models/splinter/configuration_splinter.py index 5248c74c1a3efc..83e78e4e4a2638 100644 --- a/src/transformers/models/splinter/configuration_splinter.py +++ b/src/transformers/models/splinter/configuration_splinter.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SplinterConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SplinterModel`]. It is used to instantiate an diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index fa546e1201346a..f3f470258dc3b6 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -38,9 +38,6 @@ _CONFIG_FOR_DOC = "SplinterConfig" -from ..deprecated._archive_maps import SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class SplinterEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/squeezebert/__init__.py b/src/transformers/models/squeezebert/__init__.py index b3af76dff7e1ac..45aff2f64c1610 100644 --- a/src/transformers/models/squeezebert/__init__.py +++ b/src/transformers/models/squeezebert/__init__.py @@ -19,7 +19,6 @@ _import_structure = { "configuration_squeezebert": [ - "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertOnnxConfig", ], @@ -41,7 +40,6 @@ pass else: _import_structure["modeling_squeezebert"] = [ - "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "SqueezeBertForMaskedLM", "SqueezeBertForMultipleChoice", "SqueezeBertForQuestionAnswering", @@ -55,7 +53,6 @@ if TYPE_CHECKING: from .configuration_squeezebert import ( - SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) @@ -76,7 +73,6 @@ pass else: from .modeling_squeezebert import ( - SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, diff --git a/src/transformers/models/squeezebert/configuration_squeezebert.py b/src/transformers/models/squeezebert/configuration_squeezebert.py index 2e8710bb5c5859..ec79988849e4cf 100644 --- a/src/transformers/models/squeezebert/configuration_squeezebert.py +++ b/src/transformers/models/squeezebert/configuration_squeezebert.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SqueezeBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index d95a58daaf6164..cd5dcb0842631e 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -43,9 +43,6 @@ _CONFIG_FOR_DOC = "SqueezeBertConfig" -from ..deprecated._archive_maps import SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class SqueezeBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" diff --git a/src/transformers/models/stablelm/__init__.py b/src/transformers/models/stablelm/__init__.py index 5c846cad030978..7fc3a6857fa55a 100644 --- a/src/transformers/models/stablelm/__init__.py +++ b/src/transformers/models/stablelm/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_stablelm": ["STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP", "StableLmConfig"], + "configuration_stablelm": ["StableLmConfig"], } @@ -40,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_stablelm import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP, StableLmConfig + from .configuration_stablelm import StableLmConfig try: if not is_torch_available(): diff --git a/src/transformers/models/stablelm/configuration_stablelm.py b/src/transformers/models/stablelm/configuration_stablelm.py index beb4af4d8402b3..64b39fe20e518e 100644 --- a/src/transformers/models/stablelm/configuration_stablelm.py +++ b/src/transformers/models/stablelm/configuration_stablelm.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class StableLmConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`~StableLmModel`]. diff --git a/src/transformers/models/starcoder2/__init__.py b/src/transformers/models/starcoder2/__init__.py index a2b25f10090b36..1eb195fde16b03 100644 --- a/src/transformers/models/starcoder2/__init__.py +++ b/src/transformers/models/starcoder2/__init__.py @@ -21,7 +21,7 @@ _import_structure = { - "configuration_starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"], + "configuration_starcoder2": ["Starcoder2Config"], } @@ -40,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config + from .configuration_starcoder2 import Starcoder2Config try: if not is_torch_available(): diff --git a/src/transformers/models/starcoder2/configuration_starcoder2.py b/src/transformers/models/starcoder2/configuration_starcoder2.py index 8337135442c86f..3bb0d1b65519c7 100644 --- a/src/transformers/models/starcoder2/configuration_starcoder2.py +++ b/src/transformers/models/starcoder2/configuration_starcoder2.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Starcoder2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a diff --git a/src/transformers/models/superpoint/__init__.py b/src/transformers/models/superpoint/__init__.py index 313767c02dda89..90cde651ea0ae0 100644 --- a/src/transformers/models/superpoint/__init__.py +++ b/src/transformers/models/superpoint/__init__.py @@ -17,12 +17,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = { - "configuration_superpoint": [ - "SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "SuperPointConfig", - ] -} +_import_structure = {"configuration_superpoint": ["SuperPointConfig"]} try: if not is_vision_available(): @@ -39,7 +34,6 @@ pass else: _import_structure["modeling_superpoint"] = [ - "SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST", "SuperPointForKeypointDetection", "SuperPointPreTrainedModel", ] @@ -47,7 +41,6 @@ if TYPE_CHECKING: from .configuration_superpoint import ( - SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP, SuperPointConfig, ) @@ -66,7 +59,6 @@ pass else: from .modeling_superpoint import ( - SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST, SuperPointForKeypointDetection, SuperPointPreTrainedModel, ) diff --git a/src/transformers/models/superpoint/configuration_superpoint.py b/src/transformers/models/superpoint/configuration_superpoint.py index 5970a6e1b4134d..ac97b0aa8f4231 100644 --- a/src/transformers/models/superpoint/configuration_superpoint.py +++ b/src/transformers/models/superpoint/configuration_superpoint.py @@ -19,10 +19,6 @@ logger = logging.get_logger(__name__) -SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "magic-leap-community/superpoint": "https://huggingface.co/magic-leap-community/superpoint/blob/main/config.json" -} - class SuperPointConfig(PretrainedConfig): r""" diff --git a/src/transformers/models/superpoint/modeling_superpoint.py b/src/transformers/models/superpoint/modeling_superpoint.py index 3e3fdbbf10cfb1..87473480d700f5 100644 --- a/src/transformers/models/superpoint/modeling_superpoint.py +++ b/src/transformers/models/superpoint/modeling_superpoint.py @@ -39,8 +39,6 @@ _CHECKPOINT_FOR_DOC = "magic-leap-community/superpoint" -SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST = ["magic-leap-community/superpoint"] - def remove_keypoints_from_borders( keypoints: torch.Tensor, scores: torch.Tensor, border: int, height: int, width: int diff --git a/src/transformers/models/swiftformer/__init__.py b/src/transformers/models/swiftformer/__init__.py index b324ea174d551b..2f5dcc811dde98 100644 --- a/src/transformers/models/swiftformer/__init__.py +++ b/src/transformers/models/swiftformer/__init__.py @@ -23,7 +23,6 @@ _import_structure = { "configuration_swiftformer": [ - "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] @@ -36,7 +35,6 @@ pass else: _import_structure["modeling_swiftformer"] = [ - "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", @@ -49,7 +47,6 @@ pass else: _import_structure["modeling_tf_swiftformer"] = [ - "TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwiftFormerForImageClassification", "TFSwiftFormerModel", "TFSwiftFormerPreTrainedModel", @@ -57,7 +54,6 @@ if TYPE_CHECKING: from .configuration_swiftformer import ( - SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) @@ -69,7 +65,6 @@ pass else: from .modeling_swiftformer import ( - SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, @@ -81,7 +76,6 @@ pass else: from .modeling_tf_swiftformer import ( - TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwiftFormerForImageClassification, TFSwiftFormerModel, TFSwiftFormerPreTrainedModel, diff --git a/src/transformers/models/swiftformer/configuration_swiftformer.py b/src/transformers/models/swiftformer/configuration_swiftformer.py index 3789c72d421fb3..be95094bac8bc3 100644 --- a/src/transformers/models/swiftformer/configuration_swiftformer.py +++ b/src/transformers/models/swiftformer/configuration_swiftformer.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SwiftFormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwiftFormerModel`]. It is used to instantiate an diff --git a/src/transformers/models/swiftformer/modeling_swiftformer.py b/src/transformers/models/swiftformer/modeling_swiftformer.py index 970874423a3e3c..68a0ee53b041b0 100644 --- a/src/transformers/models/swiftformer/modeling_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_swiftformer.py @@ -52,9 +52,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class SwiftFormerPatchEmbedding(nn.Module): """ Patch Embedding Layer constructed of two 2D convolutional layers. diff --git a/src/transformers/models/swiftformer/modeling_tf_swiftformer.py b/src/transformers/models/swiftformer/modeling_tf_swiftformer.py index ce8bf2452559c9..271bd5e280dd4c 100644 --- a/src/transformers/models/swiftformer/modeling_tf_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_tf_swiftformer.py @@ -48,12 +48,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "MBZUAI/swiftformer-xs", - # See all SwiftFormer models at https://huggingface.co/models?filter=swiftformer -] - - class TFSwiftFormerPatchEmbeddingSequential(keras.layers.Layer): """ The sequential component of the patch embedding layer. diff --git a/src/transformers/models/swin/__init__.py b/src/transformers/models/swin/__init__.py index 39cace5d5e8875..a3458fe1efb848 100644 --- a/src/transformers/models/swin/__init__.py +++ b/src/transformers/models/swin/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available -_import_structure = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} +_import_structure = {"configuration_swin": ["SwinConfig", "SwinOnnxConfig"]} try: @@ -26,7 +26,6 @@ pass else: _import_structure["modeling_swin"] = [ - "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", @@ -41,7 +40,6 @@ pass else: _import_structure["modeling_tf_swin"] = [ - "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", @@ -49,7 +47,7 @@ ] if TYPE_CHECKING: - from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig + from .configuration_swin import SwinConfig, SwinOnnxConfig try: if not is_torch_available(): @@ -58,7 +56,6 @@ pass else: from .modeling_swin import ( - SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, @@ -73,7 +70,6 @@ pass else: from .modeling_tf_swin import ( - TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, diff --git a/src/transformers/models/swin/configuration_swin.py b/src/transformers/models/swin/configuration_swin.py index 9bf460870f9ee0..281d0f047b2a68 100644 --- a/src/transformers/models/swin/configuration_swin.py +++ b/src/transformers/models/swin/configuration_swin.py @@ -28,9 +28,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SwinConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwinModel`]. It is used to instantiate a Swin diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index f21029dcbfa652..cb0eff88abc26f 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -56,9 +56,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # drop_path, SwinPatchEmbeddings, SwinPatchMerging and SwinDropPath are from the timm library. diff --git a/src/transformers/models/swin/modeling_tf_swin.py b/src/transformers/models/swin/modeling_tf_swin.py index b9a10793406916..99da3d7f1e2306 100644 --- a/src/transformers/models/swin/modeling_tf_swin.py +++ b/src/transformers/models/swin/modeling_tf_swin.py @@ -61,9 +61,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # drop_path, TFSwinPatchEmbeddings, TFSwinPatchMerging and TFSwinDropPath are tensorflow # implementations of PyTorch functionalities in the timm library. diff --git a/src/transformers/models/swin2sr/__init__.py b/src/transformers/models/swin2sr/__init__.py index 881a7673512ef2..16495f1dc9712d 100644 --- a/src/transformers/models/swin2sr/__init__.py +++ b/src/transformers/models/swin2sr/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_swin2sr": ["SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swin2SRConfig"], + "configuration_swin2sr": ["Swin2SRConfig"], } @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_swin2sr"] = [ - "SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST", "Swin2SRForImageSuperResolution", "Swin2SRModel", "Swin2SRPreTrainedModel", @@ -45,7 +44,7 @@ if TYPE_CHECKING: - from .configuration_swin2sr import SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP, Swin2SRConfig + from .configuration_swin2sr import Swin2SRConfig try: if not is_torch_available(): @@ -54,7 +53,6 @@ pass else: from .modeling_swin2sr import ( - SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST, Swin2SRForImageSuperResolution, Swin2SRModel, Swin2SRPreTrainedModel, diff --git a/src/transformers/models/swin2sr/configuration_swin2sr.py b/src/transformers/models/swin2sr/configuration_swin2sr.py index 1858be52a5ab45..98177a804aee57 100644 --- a/src/transformers/models/swin2sr/configuration_swin2sr.py +++ b/src/transformers/models/swin2sr/configuration_swin2sr.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Swin2SRConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Swin2SRModel`]. It is used to instantiate a Swin diff --git a/src/transformers/models/swin2sr/modeling_swin2sr.py b/src/transformers/models/swin2sr/modeling_swin2sr.py index fb3c0a38f21f47..96b67da51e477a 100644 --- a/src/transformers/models/swin2sr/modeling_swin2sr.py +++ b/src/transformers/models/swin2sr/modeling_swin2sr.py @@ -49,9 +49,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 180, 488, 648] -from ..deprecated._archive_maps import SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class Swin2SREncoderOutput(ModelOutput): """ diff --git a/src/transformers/models/swinv2/__init__.py b/src/transformers/models/swinv2/__init__.py index b104662e088b31..e3a13b79651fcd 100644 --- a/src/transformers/models/swinv2/__init__.py +++ b/src/transformers/models/swinv2/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], + "configuration_swinv2": ["Swinv2Config"], } @@ -28,7 +28,6 @@ pass else: _import_structure["modeling_swinv2"] = [ - "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", @@ -38,7 +37,7 @@ if TYPE_CHECKING: - from .configuration_swinv2 import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Swinv2Config + from .configuration_swinv2 import Swinv2Config try: if not is_torch_available(): @@ -47,7 +46,6 @@ pass else: from .modeling_swinv2 import ( - SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, Swinv2Backbone, Swinv2ForImageClassification, Swinv2ForMaskedImageModeling, diff --git a/src/transformers/models/swinv2/configuration_swinv2.py b/src/transformers/models/swinv2/configuration_swinv2.py index 41acd48f53259c..17e924804ca61a 100644 --- a/src/transformers/models/swinv2/configuration_swinv2.py +++ b/src/transformers/models/swinv2/configuration_swinv2.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Swinv2Config(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index 83b8ed5ec381b2..213d60a386dcc8 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -56,9 +56,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat" -from ..deprecated._archive_maps import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # drop_path, Swinv2PatchEmbeddings, Swinv2PatchMerging and Swinv2DropPath are from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/swin_transformer_v2.py. diff --git a/src/transformers/models/switch_transformers/__init__.py b/src/transformers/models/switch_transformers/__init__.py index 35816110111092..e6f9914fcbcc1e 100644 --- a/src/transformers/models/switch_transformers/__init__.py +++ b/src/transformers/models/switch_transformers/__init__.py @@ -27,7 +27,6 @@ _import_structure = { "configuration_switch_transformers": [ - "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwitchTransformersConfig", "SwitchTransformersOnnxConfig", ] @@ -40,7 +39,6 @@ pass else: _import_structure["modeling_switch_transformers"] = [ - "SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST", "SwitchTransformersEncoderModel", "SwitchTransformersForConditionalGeneration", "SwitchTransformersModel", @@ -52,7 +50,6 @@ if TYPE_CHECKING: from .configuration_switch_transformers import ( - SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP, SwitchTransformersConfig, SwitchTransformersOnnxConfig, ) @@ -64,7 +61,6 @@ pass else: from .modeling_switch_transformers import ( - SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST, SwitchTransformersEncoderModel, SwitchTransformersForConditionalGeneration, SwitchTransformersModel, diff --git a/src/transformers/models/switch_transformers/configuration_switch_transformers.py b/src/transformers/models/switch_transformers/configuration_switch_transformers.py index fb531003178af0..fe96297777c98d 100644 --- a/src/transformers/models/switch_transformers/configuration_switch_transformers.py +++ b/src/transformers/models/switch_transformers/configuration_switch_transformers.py @@ -20,9 +20,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class SwitchTransformersConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SwitchTransformersModel`]. It is used to diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index 774f9bf1a2fcd5..395d055177fae8 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -55,8 +55,6 @@ # for the pretrained weights provided with the models #################################################### -from ..deprecated._archive_maps import SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - def router_z_loss_func(router_logits: torch.Tensor) -> float: r""" diff --git a/src/transformers/models/t5/__init__.py b/src/transformers/models/t5/__init__.py index dbdbe238ba3376..d6549e270abcb6 100644 --- a/src/transformers/models/t5/__init__.py +++ b/src/transformers/models/t5/__init__.py @@ -25,7 +25,7 @@ ) -_import_structure = {"configuration_t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config", "T5OnnxConfig"]} +_import_structure = {"configuration_t5": ["T5Config", "T5OnnxConfig"]} try: if not is_sentencepiece_available(): @@ -50,7 +50,6 @@ pass else: _import_structure["modeling_t5"] = [ - "T5_PRETRAINED_MODEL_ARCHIVE_LIST", "T5EncoderModel", "T5ForConditionalGeneration", "T5Model", @@ -68,7 +67,6 @@ pass else: _import_structure["modeling_tf_t5"] = [ - "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST", "TFT5EncoderModel", "TFT5ForConditionalGeneration", "TFT5Model", @@ -90,7 +88,7 @@ if TYPE_CHECKING: - from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config, T5OnnxConfig + from .configuration_t5 import T5Config, T5OnnxConfig try: if not is_sentencepiece_available(): @@ -115,7 +113,6 @@ pass else: from .modeling_t5 import ( - T5_PRETRAINED_MODEL_ARCHIVE_LIST, T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, @@ -133,7 +130,6 @@ pass else: from .modeling_tf_t5 import ( - TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model, diff --git a/src/transformers/models/t5/configuration_t5.py b/src/transformers/models/t5/configuration_t5.py index 2633ee630dff90..eec47e5eb2aba6 100644 --- a/src/transformers/models/t5/configuration_t5.py +++ b/src/transformers/models/t5/configuration_t5.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import T5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class T5Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`T5Model`] or a [`TFT5Model`]. It is used to diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index 930d098186a5a3..81dff54f99c4bf 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -60,8 +60,6 @@ # for the pretrained weights provided with the models #################################################### -from ..deprecated._archive_maps import T5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - #################################################### # This is a conversion method from TF 1.0 to PyTorch diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index 834abbad8a2885..d7a9c0a17def24 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -59,9 +59,6 @@ _CONFIG_FOR_DOC = "T5Config" -from ..deprecated._archive_maps import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - #################################################### # TF 2.0 Models are constructed using Keras imperative API by sub-classing # - keras.layers.Layer for the layers and diff --git a/src/transformers/models/table_transformer/__init__.py b/src/transformers/models/table_transformer/__init__.py index 346bc9ef9caaa6..de993193b0c522 100644 --- a/src/transformers/models/table_transformer/__init__.py +++ b/src/transformers/models/table_transformer/__init__.py @@ -19,7 +19,6 @@ _import_structure = { "configuration_table_transformer": [ - "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TableTransformerConfig", "TableTransformerOnnxConfig", ] @@ -32,7 +31,6 @@ pass else: _import_structure["modeling_table_transformer"] = [ - "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", @@ -41,7 +39,6 @@ if TYPE_CHECKING: from .configuration_table_transformer import ( - TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) @@ -53,7 +50,6 @@ pass else: from .modeling_table_transformer import ( - TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, diff --git a/src/transformers/models/table_transformer/configuration_table_transformer.py b/src/transformers/models/table_transformer/configuration_table_transformer.py index 4963396024a57e..8553ad1a6597e7 100644 --- a/src/transformers/models/table_transformer/configuration_table_transformer.py +++ b/src/transformers/models/table_transformer/configuration_table_transformer.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TableTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to diff --git a/src/transformers/models/table_transformer/modeling_table_transformer.py b/src/transformers/models/table_transformer/modeling_table_transformer.py index 9a684ee121ddca..05a548fd5ac92d 100644 --- a/src/transformers/models/table_transformer/modeling_table_transformer.py +++ b/src/transformers/models/table_transformer/modeling_table_transformer.py @@ -61,9 +61,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/table-transformer-detection" -from ..deprecated._archive_maps import TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions): diff --git a/src/transformers/models/tapas/__init__.py b/src/transformers/models/tapas/__init__.py index e1afab325420f7..750bf7e00f5a8f 100644 --- a/src/transformers/models/tapas/__init__.py +++ b/src/transformers/models/tapas/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], + "configuration_tapas": ["TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_tapas"] = [ - "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", @@ -44,7 +43,6 @@ pass else: _import_structure["modeling_tf_tapas"] = [ - "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", @@ -54,7 +52,7 @@ if TYPE_CHECKING: - from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig + from .configuration_tapas import TapasConfig from .tokenization_tapas import TapasTokenizer try: @@ -64,7 +62,6 @@ pass else: from .modeling_tapas import ( - TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, @@ -80,7 +77,6 @@ pass else: from .modeling_tf_tapas import ( - TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, diff --git a/src/transformers/models/tapas/configuration_tapas.py b/src/transformers/models/tapas/configuration_tapas.py index b448afd0022062..cbc5cebf4e1b76 100644 --- a/src/transformers/models/tapas/configuration_tapas.py +++ b/src/transformers/models/tapas/configuration_tapas.py @@ -24,7 +24,6 @@ from ...configuration_utils import PretrainedConfig -from ..deprecated._archive_maps import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 class TapasConfig(PretrainedConfig): diff --git a/src/transformers/models/tapas/modeling_tapas.py b/src/transformers/models/tapas/modeling_tapas.py index 97636d8b28e18e..903fa2a2254455 100644 --- a/src/transformers/models/tapas/modeling_tapas.py +++ b/src/transformers/models/tapas/modeling_tapas.py @@ -57,9 +57,6 @@ _CHECKPOINT_FOR_DOC = "google/tapas-base" -from ..deprecated._archive_maps import TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index 7cb64a482f3845..b26803cecb0bc1 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -84,9 +84,6 @@ _CHECKPOINT_FOR_DOC = "google/tapas-base" -from ..deprecated._archive_maps import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 diff --git a/src/transformers/models/time_series_transformer/__init__.py b/src/transformers/models/time_series_transformer/__init__.py index 1c09b683a34625..39879ed1bc00b7 100644 --- a/src/transformers/models/time_series_transformer/__init__.py +++ b/src/transformers/models/time_series_transformer/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_time_series_transformer": [ - "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TimeSeriesTransformerConfig", - ], + "configuration_time_series_transformer": ["TimeSeriesTransformerConfig"], } try: @@ -30,7 +27,6 @@ pass else: _import_structure["modeling_time_series_transformer"] = [ - "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", @@ -39,7 +35,6 @@ if TYPE_CHECKING: from .configuration_time_series_transformer import ( - TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) @@ -50,7 +45,6 @@ pass else: from .modeling_time_series_transformer import ( - TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, diff --git a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py index f53f3aad1ec947..8c74f151749f6b 100644 --- a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TimeSeriesTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index ab46d3a92a1853..dd7a2228a32790 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -46,9 +46,6 @@ _CONFIG_FOR_DOC = "TimeSeriesTransformerConfig" -from ..deprecated._archive_maps import TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TimeSeriesFeatureEmbedder(nn.Module): """ Embed a sequence of categorical features. diff --git a/src/transformers/models/timesformer/__init__.py b/src/transformers/models/timesformer/__init__.py index f777a11ad1bdcf..48a2aa9fa47464 100644 --- a/src/transformers/models/timesformer/__init__.py +++ b/src/transformers/models/timesformer/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], + "configuration_timesformer": ["TimesformerConfig"], } try: @@ -27,14 +27,13 @@ pass else: _import_structure["modeling_timesformer"] = [ - "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig + from .configuration_timesformer import TimesformerConfig try: if not is_torch_available(): @@ -43,7 +42,6 @@ pass else: from .modeling_timesformer import ( - TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, diff --git a/src/transformers/models/timesformer/configuration_timesformer.py b/src/transformers/models/timesformer/configuration_timesformer.py index 79a86b7b5b370d..ebcfcc82482a6f 100644 --- a/src/transformers/models/timesformer/configuration_timesformer.py +++ b/src/transformers/models/timesformer/configuration_timesformer.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TimesformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TimesformerModel`]. It is used to instantiate a diff --git a/src/transformers/models/timesformer/modeling_timesformer.py b/src/transformers/models/timesformer/modeling_timesformer.py index 17b80ee5a1d53f..f85e9452470df9 100644 --- a/src/transformers/models/timesformer/modeling_timesformer.py +++ b/src/transformers/models/timesformer/modeling_timesformer.py @@ -37,9 +37,6 @@ _CHECKPOINT_FOR_DOC = "facebook/timesformer" -from ..deprecated._archive_maps import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L155 class TimesformerPatchEmbeddings(nn.Module): """Image to Patch Embedding""" diff --git a/src/transformers/models/trocr/__init__.py b/src/transformers/models/trocr/__init__.py index 08400fc916ec21..14854857586d97 100644 --- a/src/transformers/models/trocr/__init__.py +++ b/src/transformers/models/trocr/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], + "configuration_trocr": ["TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } @@ -35,14 +35,13 @@ pass else: _import_structure["modeling_trocr"] = [ - "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig + from .configuration_trocr import TrOCRConfig from .processing_trocr import TrOCRProcessor try: @@ -51,7 +50,7 @@ except OptionalDependencyNotAvailable: pass else: - from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel + from .modeling_trocr import TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys diff --git a/src/transformers/models/trocr/configuration_trocr.py b/src/transformers/models/trocr/configuration_trocr.py index ab282db97bfc55..efa20d884e381d 100644 --- a/src/transformers/models/trocr/configuration_trocr.py +++ b/src/transformers/models/trocr/configuration_trocr.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TrOCRConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index a20c56e331ce6b..1a0766fc25d80a 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -37,9 +37,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/trocr-base-handwritten" -from ..deprecated._archive_maps import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->TrOCR class TrOCRLearnedPositionalEmbedding(nn.Embedding): """ diff --git a/src/transformers/models/tvlt/__init__.py b/src/transformers/models/tvlt/__init__.py index 86c0f7c1c0b99d..d63bad0a7adc81 100644 --- a/src/transformers/models/tvlt/__init__.py +++ b/src/transformers/models/tvlt/__init__.py @@ -26,7 +26,7 @@ _import_structure = { - "configuration_tvlt": ["TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP", "TvltConfig"], + "configuration_tvlt": ["TvltConfig"], "feature_extraction_tvlt": ["TvltFeatureExtractor"], "processing_tvlt": ["TvltProcessor"], } @@ -38,7 +38,6 @@ pass else: _import_structure["modeling_tvlt"] = [ - "TVLT_PRETRAINED_MODEL_ARCHIVE_LIST", "TvltModel", "TvltForPreTraining", "TvltForAudioVisualClassification", @@ -55,7 +54,7 @@ if TYPE_CHECKING: - from .configuration_tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig + from .configuration_tvlt import TvltConfig from .processing_tvlt import TvltProcessor from .feature_extraction_tvlt import TvltFeatureExtractor @@ -66,7 +65,6 @@ pass else: from .modeling_tvlt import ( - TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, diff --git a/src/transformers/models/tvlt/configuration_tvlt.py b/src/transformers/models/tvlt/configuration_tvlt.py index 063befc9d77f92..fbf65effd96ea3 100644 --- a/src/transformers/models/tvlt/configuration_tvlt.py +++ b/src/transformers/models/tvlt/configuration_tvlt.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TvltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TvltModel`]. It is used to instantiate a TVLT diff --git a/src/transformers/models/tvlt/modeling_tvlt.py b/src/transformers/models/tvlt/modeling_tvlt.py index f841c47ea4bc56..0376570fe5a3b8 100644 --- a/src/transformers/models/tvlt/modeling_tvlt.py +++ b/src/transformers/models/tvlt/modeling_tvlt.py @@ -46,9 +46,6 @@ _CHECKPOINT_FOR_DOC = "ZinengTang/tvlt-base" -from ..deprecated._archive_maps import TVLT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TvltModelOutput(ModelOutput): """ diff --git a/src/transformers/models/tvp/__init__.py b/src/transformers/models/tvp/__init__.py index 63c0bd27174471..b8479dbdd331b8 100644 --- a/src/transformers/models/tvp/__init__.py +++ b/src/transformers/models/tvp/__init__.py @@ -18,10 +18,7 @@ _import_structure = { - "configuration_tvp": [ - "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TvpConfig", - ], + "configuration_tvp": ["TvpConfig"], "processing_tvp": ["TvpProcessor"], } @@ -40,7 +37,6 @@ pass else: _import_structure["modeling_tvp"] = [ - "TVP_PRETRAINED_MODEL_ARCHIVE_LIST", "TvpModel", "TvpPreTrainedModel", "TvpForVideoGrounding", @@ -48,7 +44,6 @@ if TYPE_CHECKING: from .configuration_tvp import ( - TVP_PRETRAINED_CONFIG_ARCHIVE_MAP, TvpConfig, ) from .processing_tvp import TvpProcessor @@ -68,7 +63,6 @@ pass else: from .modeling_tvp import ( - TVP_PRETRAINED_MODEL_ARCHIVE_LIST, TvpForVideoGrounding, TvpModel, TvpPreTrainedModel, diff --git a/src/transformers/models/tvp/configuration_tvp.py b/src/transformers/models/tvp/configuration_tvp.py index 85b7ac6a41cbcc..65c4e3a5225b4a 100644 --- a/src/transformers/models/tvp/configuration_tvp.py +++ b/src/transformers/models/tvp/configuration_tvp.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TVP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class TvpConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py index da8e85da74cfbd..121cd7b5f3f375 100644 --- a/src/transformers/models/tvp/modeling_tvp.py +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -35,9 +35,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import TVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class TvpVideoGroundingOutput(ModelOutput): """ diff --git a/src/transformers/models/udop/__init__.py b/src/transformers/models/udop/__init__.py index 5066fde6af1d15..732d97aa7a99c7 100644 --- a/src/transformers/models/udop/__init__.py +++ b/src/transformers/models/udop/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_udop": ["UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP", "UdopConfig"], + "configuration_udop": ["UdopConfig"], "processing_udop": ["UdopProcessor"], } @@ -51,7 +51,6 @@ pass else: _import_structure["modeling_udop"] = [ - "UDOP_PRETRAINED_MODEL_ARCHIVE_LIST", "UdopForConditionalGeneration", "UdopPreTrainedModel", "UdopModel", @@ -59,7 +58,7 @@ ] if TYPE_CHECKING: - from .configuration_udop import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP, UdopConfig + from .configuration_udop import UdopConfig from .processing_udop import UdopProcessor try: @@ -85,7 +84,6 @@ pass else: from .modeling_udop import ( - UDOP_PRETRAINED_MODEL_ARCHIVE_LIST, UdopEncoderModel, UdopForConditionalGeneration, UdopModel, diff --git a/src/transformers/models/udop/configuration_udop.py b/src/transformers/models/udop/configuration_udop.py index ba124d0aa15e6d..3802b6de01a75b 100644 --- a/src/transformers/models/udop/configuration_udop.py +++ b/src/transformers/models/udop/configuration_udop.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class UdopConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UdopForConditionalGeneration`]. It is used to diff --git a/src/transformers/models/udop/modeling_udop.py b/src/transformers/models/udop/modeling_udop.py index 9d12d9cc2e2173..0a2d003a568efd 100644 --- a/src/transformers/models/udop/modeling_udop.py +++ b/src/transformers/models/udop/modeling_udop.py @@ -47,9 +47,6 @@ logger = logging.getLogger(__name__) -from ..deprecated._archive_maps import UDOP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - _CONFIG_FOR_DOC = "UdopConfig" diff --git a/src/transformers/models/udop/tokenization_udop.py b/src/transformers/models/udop/tokenization_udop.py index c3b270bc55a8bf..3eaf1f5cadb9a4 100644 --- a/src/transformers/models/udop/tokenization_udop.py +++ b/src/transformers/models/udop/tokenization_udop.py @@ -147,15 +147,6 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/spiece.model", - }, - "tokenizer_file": { - "microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/tokenizer.json", - }, -} - class UdopTokenizer(PreTrainedTokenizer): """ @@ -249,7 +240,6 @@ class UdopTokenizer(PreTrainedTokenizer): """ vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] def __init__( diff --git a/src/transformers/models/udop/tokenization_udop_fast.py b/src/transformers/models/udop/tokenization_udop_fast.py index cce527a80537d9..db3b90e05c963a 100644 --- a/src/transformers/models/udop/tokenization_udop_fast.py +++ b/src/transformers/models/udop/tokenization_udop_fast.py @@ -39,14 +39,6 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/spiece.model", - }, - "tokenizer_file": { - "microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/tokenizer.json", - }, -} logger = logging.get_logger(__name__) @@ -202,7 +194,6 @@ class UdopTokenizerFast(PreTrainedTokenizerFast): """ vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = UdopTokenizer diff --git a/src/transformers/models/unispeech/__init__.py b/src/transformers/models/unispeech/__init__.py index 2800fa17076e6e..91db9ada5ef297 100644 --- a/src/transformers/models/unispeech/__init__.py +++ b/src/transformers/models/unispeech/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} +_import_structure = {"configuration_unispeech": ["UniSpeechConfig"]} try: if not is_torch_available(): @@ -31,7 +31,6 @@ pass else: _import_structure["modeling_unispeech"] = [ - "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", @@ -40,7 +39,7 @@ ] if TYPE_CHECKING: - from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig + from .configuration_unispeech import UniSpeechConfig try: if not is_torch_available(): @@ -49,7 +48,6 @@ pass else: from .modeling_unispeech import ( - UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, diff --git a/src/transformers/models/unispeech/configuration_unispeech.py b/src/transformers/models/unispeech/configuration_unispeech.py index 25a003ae9f5f9a..18502adcb0ec4b 100644 --- a/src/transformers/models/unispeech/configuration_unispeech.py +++ b/src/transformers/models/unispeech/configuration_unispeech.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class UniSpeechConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UniSpeechModel`]. It is used to instantiate an diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 8416258debe487..5c1557fb1f1834 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -65,9 +65,6 @@ _CTC_EXPECTED_LOSS = 17.17 -from ..deprecated._archive_maps import UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/unispeech_sat/__init__.py b/src/transformers/models/unispeech_sat/__init__.py index d1ac3ec2c43fb9..275f98ac222024 100644 --- a/src/transformers/models/unispeech_sat/__init__.py +++ b/src/transformers/models/unispeech_sat/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_unispeech_sat": ["UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechSatConfig"], + "configuration_unispeech_sat": ["UniSpeechSatConfig"], } try: @@ -33,7 +33,6 @@ pass else: _import_structure["modeling_unispeech_sat"] = [ - "UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechSatForAudioFrameClassification", "UniSpeechSatForCTC", "UniSpeechSatForPreTraining", @@ -44,7 +43,7 @@ ] if TYPE_CHECKING: - from .configuration_unispeech_sat import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig + from .configuration_unispeech_sat import UniSpeechSatConfig try: if not is_torch_available(): @@ -53,7 +52,6 @@ pass else: from .modeling_unispeech_sat import ( - UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechSatForAudioFrameClassification, UniSpeechSatForCTC, UniSpeechSatForPreTraining, diff --git a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py index 1e6e40ad48515e..87b4bc8506dd73 100644 --- a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class UniSpeechSatConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UniSpeechSatModel`]. It is used to instantiate an diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index fab4670fe5149c..853c521e5e9cfa 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -81,9 +81,6 @@ _XVECTOR_EXPECTED_OUTPUT = 0.97 -from ..deprecated._archive_maps import UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/univnet/__init__.py b/src/transformers/models/univnet/__init__.py index afb03ee9894b0e..ea9babc3314f40 100644 --- a/src/transformers/models/univnet/__init__.py +++ b/src/transformers/models/univnet/__init__.py @@ -22,10 +22,7 @@ _import_structure = { - "configuration_univnet": [ - "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP", - "UnivNetConfig", - ], + "configuration_univnet": ["UnivNetConfig"], "feature_extraction_univnet": ["UnivNetFeatureExtractor"], } @@ -36,14 +33,12 @@ pass else: _import_structure["modeling_univnet"] = [ - "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST", "UnivNetModel", ] if TYPE_CHECKING: from .configuration_univnet import ( - UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP, UnivNetConfig, ) from .feature_extraction_univnet import UnivNetFeatureExtractor @@ -55,7 +50,6 @@ pass else: from .modeling_univnet import ( - UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST, UnivNetModel, ) diff --git a/src/transformers/models/univnet/configuration_univnet.py b/src/transformers/models/univnet/configuration_univnet.py index 933db21d5ae381..27850e114d3d2d 100644 --- a/src/transformers/models/univnet/configuration_univnet.py +++ b/src/transformers/models/univnet/configuration_univnet.py @@ -20,9 +20,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class UnivNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a diff --git a/src/transformers/models/univnet/modeling_univnet.py b/src/transformers/models/univnet/modeling_univnet.py index c2551d72653196..e4fc1215c08bd6 100644 --- a/src/transformers/models/univnet/modeling_univnet.py +++ b/src/transformers/models/univnet/modeling_univnet.py @@ -33,9 +33,6 @@ _CHECKPOINT_FOR_DOC = "dg845/univnet-dev" -from ..deprecated._archive_maps import UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class UnivNetModelOutput(ModelOutput): """ diff --git a/src/transformers/models/videomae/__init__.py b/src/transformers/models/videomae/__init__.py index 663b6d41aba605..0e52081adbca5b 100644 --- a/src/transformers/models/videomae/__init__.py +++ b/src/transformers/models/videomae/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], + "configuration_videomae": ["VideoMAEConfig"], } try: @@ -27,7 +27,6 @@ pass else: _import_structure["modeling_videomae"] = [ - "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", "VideoMAEForPreTraining", "VideoMAEModel", "VideoMAEPreTrainedModel", @@ -44,7 +43,7 @@ _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"] if TYPE_CHECKING: - from .configuration_videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig + from .configuration_videomae import VideoMAEConfig try: if not is_torch_available(): @@ -53,7 +52,6 @@ pass else: from .modeling_videomae import ( - VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, diff --git a/src/transformers/models/videomae/configuration_videomae.py b/src/transformers/models/videomae/configuration_videomae.py index ba3d1d82736bc2..b1cfcaecfae2c6 100644 --- a/src/transformers/models/videomae/configuration_videomae.py +++ b/src/transformers/models/videomae/configuration_videomae.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VideoMAEConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a diff --git a/src/transformers/models/videomae/modeling_videomae.py b/src/transformers/models/videomae/modeling_videomae.py index 6beb18bb77ce0a..100bee54389569 100644 --- a/src/transformers/models/videomae/modeling_videomae.py +++ b/src/transformers/models/videomae/modeling_videomae.py @@ -48,9 +48,6 @@ _CHECKPOINT_FOR_DOC = "MCG-NJU/videomae-base" -from ..deprecated._archive_maps import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class VideoMAEDecoderOutput(ModelOutput): """ diff --git a/src/transformers/models/vilt/__init__.py b/src/transformers/models/vilt/__init__.py index 6d5afba10dacfc..6fcfd64c8beb68 100644 --- a/src/transformers/models/vilt/__init__.py +++ b/src/transformers/models/vilt/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig"]} +_import_structure = {"configuration_vilt": ["ViltConfig"]} try: if not is_vision_available(): @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_vilt"] = [ - "VILT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViltForImageAndTextRetrieval", "ViltForImagesAndTextClassification", "ViltForTokenClassification", @@ -48,7 +47,7 @@ if TYPE_CHECKING: - from .configuration_vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig + from .configuration_vilt import ViltConfig try: if not is_vision_available(): @@ -67,7 +66,6 @@ pass else: from .modeling_vilt import ( - VILT_PRETRAINED_MODEL_ARCHIVE_LIST, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, diff --git a/src/transformers/models/vilt/configuration_vilt.py b/src/transformers/models/vilt/configuration_vilt.py index 0ad4bde69494d7..ef0ce550d2a044 100644 --- a/src/transformers/models/vilt/configuration_vilt.py +++ b/src/transformers/models/vilt/configuration_vilt.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ViltConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index e5f775cfc6f079..4395bb47b5eb3c 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -49,9 +49,6 @@ _CHECKPOINT_FOR_DOC = "dandelin/vilt-b32-mlm" -from ..deprecated._archive_maps import VILT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class ViltForImagesAndTextClassificationOutput(ModelOutput): """ diff --git a/src/transformers/models/vipllava/__init__.py b/src/transformers/models/vipllava/__init__.py index 2853605ba2d275..edc2a5106ba7cf 100644 --- a/src/transformers/models/vipllava/__init__.py +++ b/src/transformers/models/vipllava/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_vipllava": ["VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "VipLlavaConfig"]} +_import_structure = {"configuration_vipllava": ["VipLlavaConfig"]} try: @@ -26,14 +26,13 @@ pass else: _import_structure["modeling_vipllava"] = [ - "VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST", "VipLlavaForConditionalGeneration", "VipLlavaPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_vipllava import VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, VipLlavaConfig + from .configuration_vipllava import VipLlavaConfig try: if not is_torch_available(): @@ -42,7 +41,6 @@ pass else: from .modeling_vipllava import ( - VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST, VipLlavaForConditionalGeneration, VipLlavaPreTrainedModel, ) diff --git a/src/transformers/models/vipllava/configuration_vipllava.py b/src/transformers/models/vipllava/configuration_vipllava.py index d57f4179492ea2..e94d4be6c1e0b6 100644 --- a/src/transformers/models/vipllava/configuration_vipllava.py +++ b/src/transformers/models/vipllava/configuration_vipllava.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VipLlavaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VipLlavaForConditionalGeneration`]. It is used to instantiate an diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index fade9851c15830..02821b03a27975 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -39,9 +39,6 @@ _CONFIG_FOR_DOC = "VipLlavaConfig" -from ..deprecated._archive_maps import VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->VipLlava class VipLlavaCausalLMOutputWithPast(ModelOutput): diff --git a/src/transformers/models/visual_bert/__init__.py b/src/transformers/models/visual_bert/__init__.py index a752f1fa0c1476..db74a924a85cc7 100644 --- a/src/transformers/models/visual_bert/__init__.py +++ b/src/transformers/models/visual_bert/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_visual_bert": ["VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VisualBertConfig"]} +_import_structure = {"configuration_visual_bert": ["VisualBertConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_visual_bert"] = [ - "VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "VisualBertForMultipleChoice", "VisualBertForPreTraining", "VisualBertForQuestionAnswering", @@ -38,7 +37,7 @@ if TYPE_CHECKING: - from .configuration_visual_bert import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, VisualBertConfig + from .configuration_visual_bert import VisualBertConfig try: if not is_torch_available(): @@ -47,7 +46,6 @@ pass else: from .modeling_visual_bert import ( - VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, diff --git a/src/transformers/models/visual_bert/configuration_visual_bert.py b/src/transformers/models/visual_bert/configuration_visual_bert.py index 2edf5466e347b8..bb146a143aab9f 100644 --- a/src/transformers/models/visual_bert/configuration_visual_bert.py +++ b/src/transformers/models/visual_bert/configuration_visual_bert.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VisualBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VisualBertModel`]. It is used to instantiate an diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index 33df2ac13cf5b9..f482ed8cd7c4aa 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -49,9 +49,6 @@ _CHECKPOINT_FOR_DOC = "uclanlp/visualbert-vqa-coco-pre" -from ..deprecated._archive_maps import VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class VisualBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings and visual embeddings.""" diff --git a/src/transformers/models/vit/__init__.py b/src/transformers/models/vit/__init__.py index d426ec93bf5859..db41e881faafa6 100644 --- a/src/transformers/models/vit/__init__.py +++ b/src/transformers/models/vit/__init__.py @@ -23,7 +23,7 @@ ) -_import_structure = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} +_import_structure = {"configuration_vit": ["ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): @@ -41,7 +41,6 @@ pass else: _import_structure["modeling_vit"] = [ - "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", @@ -73,7 +72,7 @@ ] if TYPE_CHECKING: - from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig + from .configuration_vit import ViTConfig, ViTOnnxConfig try: if not is_vision_available(): @@ -91,7 +90,6 @@ pass else: from .modeling_vit import ( - VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, diff --git a/src/transformers/models/vit/configuration_vit.py b/src/transformers/models/vit/configuration_vit.py index 4b505b5d9cbb6d..286d302c7883d5 100644 --- a/src/transformers/models/vit/configuration_vit.py +++ b/src/transformers/models/vit/configuration_vit.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ViTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViTModel`]. It is used to instantiate an ViT diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index 4ccdd1deaf4ca1..8aa43c5c43c500 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -57,9 +57,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat" -from ..deprecated._archive_maps import VIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class ViTEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. diff --git a/src/transformers/models/vit_hybrid/__init__.py b/src/transformers/models/vit_hybrid/__init__.py index 47342d3a260438..f87e44449a978e 100644 --- a/src/transformers/models/vit_hybrid/__init__.py +++ b/src/transformers/models/vit_hybrid/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_vit_hybrid": ["VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTHybridConfig"]} +_import_structure = {"configuration_vit_hybrid": ["ViTHybridConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_vit_hybrid"] = [ - "VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTHybridForImageClassification", "ViTHybridModel", "ViTHybridPreTrainedModel", @@ -41,7 +40,7 @@ if TYPE_CHECKING: - from .configuration_vit_hybrid import VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTHybridConfig + from .configuration_vit_hybrid import ViTHybridConfig try: if not is_torch_available(): @@ -50,7 +49,6 @@ pass else: from .modeling_vit_hybrid import ( - VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST, ViTHybridForImageClassification, ViTHybridModel, ViTHybridPreTrainedModel, diff --git a/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py b/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py index 8a8a808ec60d05..2b9dcd0a81159f 100644 --- a/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py +++ b/src/transformers/models/vit_hybrid/configuration_vit_hybrid.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ViTHybridConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViTHybridModel`]. It is used to instantiate a ViT diff --git a/src/transformers/models/vit_hybrid/modeling_vit_hybrid.py b/src/transformers/models/vit_hybrid/modeling_vit_hybrid.py index 6fe9f8d2b6c9bd..20579e0d3db2cc 100644 --- a/src/transformers/models/vit_hybrid/modeling_vit_hybrid.py +++ b/src/transformers/models/vit_hybrid/modeling_vit_hybrid.py @@ -47,9 +47,6 @@ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" -from ..deprecated._archive_maps import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class ViTHybridEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. diff --git a/src/transformers/models/vit_mae/__init__.py b/src/transformers/models/vit_mae/__init__.py index bfd200e9dcb913..f5360061762e6f 100644 --- a/src/transformers/models/vit_mae/__init__.py +++ b/src/transformers/models/vit_mae/__init__.py @@ -22,7 +22,7 @@ ) -_import_structure = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]} +_import_structure = {"configuration_vit_mae": ["ViTMAEConfig"]} try: if not is_torch_available(): @@ -31,7 +31,6 @@ pass else: _import_structure["modeling_vit_mae"] = [ - "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", @@ -51,7 +50,7 @@ ] if TYPE_CHECKING: - from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig + from .configuration_vit_mae import ViTMAEConfig try: if not is_torch_available(): @@ -60,7 +59,6 @@ pass else: from .modeling_vit_mae import ( - VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, diff --git a/src/transformers/models/vit_mae/configuration_vit_mae.py b/src/transformers/models/vit_mae/configuration_vit_mae.py index c5866ef40b497c..e4e46e7e4202d0 100644 --- a/src/transformers/models/vit_mae/configuration_vit_mae.py +++ b/src/transformers/models/vit_mae/configuration_vit_mae.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ViTMAEConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViTMAEModel`]. It is used to instantiate an ViT diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index bfbe59ea903a1a..b652c9e71f9106 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -46,9 +46,6 @@ _CHECKPOINT_FOR_DOC = "facebook/vit-mae-base" -from ..deprecated._archive_maps import VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class ViTMAEModelOutput(ModelOutput): """ diff --git a/src/transformers/models/vit_msn/__init__.py b/src/transformers/models/vit_msn/__init__.py index c36cb750cfa4e6..88f7ff73d29b69 100644 --- a/src/transformers/models/vit_msn/__init__.py +++ b/src/transformers/models/vit_msn/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} +_import_structure = {"configuration_vit_msn": ["ViTMSNConfig"]} try: if not is_torch_available(): @@ -25,14 +25,13 @@ pass else: _import_structure["modeling_vit_msn"] = [ - "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel", ] if TYPE_CHECKING: - from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig + from .configuration_vit_msn import ViTMSNConfig try: if not is_torch_available(): @@ -41,7 +40,6 @@ pass else: from .modeling_vit_msn import ( - VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, diff --git a/src/transformers/models/vit_msn/configuration_vit_msn.py b/src/transformers/models/vit_msn/configuration_vit_msn.py index 296434346625f5..14acb15d549c04 100644 --- a/src/transformers/models/vit_msn/configuration_vit_msn.py +++ b/src/transformers/models/vit_msn/configuration_vit_msn.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class ViTMSNConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ViTMSNModel`]. It is used to instantiate an ViT diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index 424d657dc87859..0632738455d1ab 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -38,8 +38,6 @@ _CONFIG_FOR_DOC = "ViTMSNConfig" _CHECKPOINT_FOR_DOC = "facebook/vit-msn-small" -from ..deprecated._archive_maps import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - class ViTMSNEmbeddings(nn.Module): """ diff --git a/src/transformers/models/vitdet/__init__.py b/src/transformers/models/vitdet/__init__.py index 8ccc1365820d69..a7ee9c755ff19b 100644 --- a/src/transformers/models/vitdet/__init__.py +++ b/src/transformers/models/vitdet/__init__.py @@ -20,7 +20,7 @@ ) -_import_structure = {"configuration_vitdet": ["VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitDetConfig"]} +_import_structure = {"configuration_vitdet": ["VitDetConfig"]} try: if not is_torch_available(): @@ -29,14 +29,13 @@ pass else: _import_structure["modeling_vitdet"] = [ - "VITDET_PRETRAINED_MODEL_ARCHIVE_LIST", "VitDetModel", "VitDetPreTrainedModel", "VitDetBackbone", ] if TYPE_CHECKING: - from .configuration_vitdet import VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP, VitDetConfig + from .configuration_vitdet import VitDetConfig try: if not is_torch_available(): @@ -45,7 +44,6 @@ pass else: from .modeling_vitdet import ( - VITDET_PRETRAINED_MODEL_ARCHIVE_LIST, VitDetBackbone, VitDetModel, VitDetPreTrainedModel, diff --git a/src/transformers/models/vitdet/configuration_vitdet.py b/src/transformers/models/vitdet/configuration_vitdet.py index 2a7973dde87979..f85558c254018d 100644 --- a/src/transformers/models/vitdet/configuration_vitdet.py +++ b/src/transformers/models/vitdet/configuration_vitdet.py @@ -23,9 +23,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VitDetConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VitDetModel`]. It is used to instantiate an diff --git a/src/transformers/models/vitdet/modeling_vitdet.py b/src/transformers/models/vitdet/modeling_vitdet.py index 985f00b7e54f56..9dc3e476f39889 100644 --- a/src/transformers/models/vitdet/modeling_vitdet.py +++ b/src/transformers/models/vitdet/modeling_vitdet.py @@ -42,9 +42,6 @@ _CONFIG_FOR_DOC = "VitDetConfig" -from ..deprecated._archive_maps import VITDET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class VitDetEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial diff --git a/src/transformers/models/vitmatte/__init__.py b/src/transformers/models/vitmatte/__init__.py index abbfae97c22030..7745a96cc6d545 100644 --- a/src/transformers/models/vitmatte/__init__.py +++ b/src/transformers/models/vitmatte/__init__.py @@ -21,7 +21,7 @@ ) -_import_structure = {"configuration_vitmatte": ["VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VitMatteConfig"]} +_import_structure = {"configuration_vitmatte": ["VitMatteConfig"]} try: if not is_vision_available(): @@ -38,13 +38,12 @@ pass else: _import_structure["modeling_vitmatte"] = [ - "VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST", "VitMattePreTrainedModel", "VitMatteForImageMatting", ] if TYPE_CHECKING: - from .configuration_vitmatte import VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP, VitMatteConfig + from .configuration_vitmatte import VitMatteConfig try: if not is_vision_available(): @@ -61,7 +60,6 @@ pass else: from .modeling_vitmatte import ( - VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST, VitMatteForImageMatting, VitMattePreTrainedModel, ) diff --git a/src/transformers/models/vitmatte/configuration_vitmatte.py b/src/transformers/models/vitmatte/configuration_vitmatte.py index 275640d1d079a1..67f562d2bd13d5 100644 --- a/src/transformers/models/vitmatte/configuration_vitmatte.py +++ b/src/transformers/models/vitmatte/configuration_vitmatte.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VitMatteConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of [`VitMatteForImageMatting`]. It is used to diff --git a/src/transformers/models/vitmatte/modeling_vitmatte.py b/src/transformers/models/vitmatte/modeling_vitmatte.py index 4d204a8e563a8d..f7bfb06d9a6900 100644 --- a/src/transformers/models/vitmatte/modeling_vitmatte.py +++ b/src/transformers/models/vitmatte/modeling_vitmatte.py @@ -28,7 +28,6 @@ replace_return_docstrings, ) from ...utils.backbone_utils import load_backbone -from ..deprecated._archive_maps import VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 from .configuration_vitmatte import VitMatteConfig diff --git a/src/transformers/models/vits/__init__.py b/src/transformers/models/vits/__init__.py index 79c18048e7c776..14428463d28a50 100644 --- a/src/transformers/models/vits/__init__.py +++ b/src/transformers/models/vits/__init__.py @@ -23,10 +23,7 @@ _import_structure = { - "configuration_vits": [ - "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP", - "VitsConfig", - ], + "configuration_vits": ["VitsConfig"], "tokenization_vits": ["VitsTokenizer"], } @@ -37,14 +34,12 @@ pass else: _import_structure["modeling_vits"] = [ - "VITS_PRETRAINED_MODEL_ARCHIVE_LIST", "VitsModel", "VitsPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vits import ( - VITS_PRETRAINED_CONFIG_ARCHIVE_MAP, VitsConfig, ) from .tokenization_vits import VitsTokenizer @@ -56,7 +51,6 @@ pass else: from .modeling_vits import ( - VITS_PRETRAINED_MODEL_ARCHIVE_LIST, VitsModel, VitsPreTrainedModel, ) diff --git a/src/transformers/models/vits/configuration_vits.py b/src/transformers/models/vits/configuration_vits.py index 5538e53d4be1b8..8d5ffca36f3674 100644 --- a/src/transformers/models/vits/configuration_vits.py +++ b/src/transformers/models/vits/configuration_vits.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VITS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VitsConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VitsModel`]. It is used to instantiate a VITS diff --git a/src/transformers/models/vits/modeling_vits.py b/src/transformers/models/vits/modeling_vits.py index df8cf9350b3128..905945e01ae1d8 100644 --- a/src/transformers/models/vits/modeling_vits.py +++ b/src/transformers/models/vits/modeling_vits.py @@ -42,9 +42,6 @@ _CONFIG_FOR_DOC = "VitsConfig" -from ..deprecated._archive_maps import VITS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class VitsModelOutput(ModelOutput): """ diff --git a/src/transformers/models/vivit/__init__.py b/src/transformers/models/vivit/__init__.py index ec446b79707255..261238edccbe75 100644 --- a/src/transformers/models/vivit/__init__.py +++ b/src/transformers/models/vivit/__init__.py @@ -22,7 +22,7 @@ _import_structure = { - "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"], + "configuration_vivit": ["VivitConfig"], } try: if not is_vision_available(): @@ -40,7 +40,6 @@ pass else: _import_structure["modeling_vivit"] = [ - "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification", @@ -48,7 +47,7 @@ if TYPE_CHECKING: - from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig + from .configuration_vivit import VivitConfig try: if not is_vision_available(): @@ -65,7 +64,6 @@ pass else: from .modeling_vivit import ( - VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, diff --git a/src/transformers/models/vivit/configuration_vivit.py b/src/transformers/models/vivit/configuration_vivit.py index 28ac13496f82f8..4cbebc7692c804 100644 --- a/src/transformers/models/vivit/configuration_vivit.py +++ b/src/transformers/models/vivit/configuration_vivit.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class VivitConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VivitModel`]. It is used to instantiate a ViViT diff --git a/src/transformers/models/vivit/modeling_vivit.py b/src/transformers/models/vivit/modeling_vivit.py index ef94b836a48746..c8bac4f02e8b31 100755 --- a/src/transformers/models/vivit/modeling_vivit.py +++ b/src/transformers/models/vivit/modeling_vivit.py @@ -37,9 +37,6 @@ _CONFIG_FOR_DOC = "VivitConfig" -from ..deprecated._archive_maps import VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class VivitTubeletEmbeddings(nn.Module): """ Construct Vivit Tubelet embeddings. diff --git a/src/transformers/models/wav2vec2/__init__.py b/src/transformers/models/wav2vec2/__init__.py index b3abdb99ec722d..06e1c6628db9a8 100644 --- a/src/transformers/models/wav2vec2/__init__.py +++ b/src/transformers/models/wav2vec2/__init__.py @@ -23,7 +23,7 @@ _import_structure = { - "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], + "configuration_wav2vec2": ["Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], @@ -37,7 +37,6 @@ pass else: _import_structure["modeling_wav2vec2"] = [ - "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", @@ -55,7 +54,6 @@ pass else: _import_structure["modeling_tf_wav2vec2"] = [ - "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", @@ -77,7 +75,7 @@ if TYPE_CHECKING: - from .configuration_wav2vec2 import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2Config + from .configuration_wav2vec2 import Wav2Vec2Config from .feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor from .processing_wav2vec2 import Wav2Vec2Processor from .tokenization_wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2Tokenizer @@ -89,7 +87,6 @@ pass else: from .modeling_wav2vec2 import ( - WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, @@ -107,7 +104,6 @@ pass else: from .modeling_tf_wav2vec2 import ( - TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWav2Vec2ForCTC, TFWav2Vec2ForSequenceClassification, TFWav2Vec2Model, diff --git a/src/transformers/models/wav2vec2/configuration_wav2vec2.py b/src/transformers/models/wav2vec2/configuration_wav2vec2.py index 252674bb3da3fd..1d6777efcb74ee 100644 --- a/src/transformers/models/wav2vec2/configuration_wav2vec2.py +++ b/src/transformers/models/wav2vec2/configuration_wav2vec2.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Wav2Vec2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2Model`]. It is used to instantiate an diff --git a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py index a8e39b0754af75..efbae8f2324812 100644 --- a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py @@ -53,9 +53,6 @@ _CONFIG_FOR_DOC = "Wav2Vec2Config" -from ..deprecated._archive_maps import TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 8a4c3e0615b9c8..ec928762b58733 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -96,9 +96,6 @@ _XVECTOR_EXPECTED_OUTPUT = 0.98 -from ..deprecated._archive_maps import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/wav2vec2_bert/__init__.py b/src/transformers/models/wav2vec2_bert/__init__.py index 594f108bcaad96..be37038211a811 100644 --- a/src/transformers/models/wav2vec2_bert/__init__.py +++ b/src/transformers/models/wav2vec2_bert/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_wav2vec2_bert": [ - "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Wav2Vec2BertConfig", - ], + "configuration_wav2vec2_bert": ["Wav2Vec2BertConfig"], "processing_wav2vec2_bert": ["Wav2Vec2BertProcessor"], } @@ -32,7 +29,6 @@ pass else: _import_structure["modeling_wav2vec2_bert"] = [ - "WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2BertForAudioFrameClassification", "Wav2Vec2BertForCTC", "Wav2Vec2BertForSequenceClassification", @@ -43,7 +39,6 @@ if TYPE_CHECKING: from .configuration_wav2vec2_bert import ( - WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2BertConfig, ) from .processing_wav2vec2_bert import Wav2Vec2BertProcessor @@ -55,7 +50,6 @@ pass else: from .modeling_wav2vec2_bert import ( - WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForCTC, Wav2Vec2BertForSequenceClassification, diff --git a/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py index 4183c1e4c06e7b..f6c364884bd2b4 100644 --- a/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py @@ -22,9 +22,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Wav2Vec2BertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2BertModel`]. It is used to diff --git a/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py index 1bff2956f41fe4..077546a65e93ff 100644 --- a/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py @@ -64,9 +64,6 @@ _CTC_EXPECTED_LOSS = 17.04 -from ..deprecated._archive_maps import WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2._compute_new_attention_mask def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor): """ diff --git a/src/transformers/models/wav2vec2_conformer/__init__.py b/src/transformers/models/wav2vec2_conformer/__init__.py index 35081cfcdef97b..a780a50b6cce11 100644 --- a/src/transformers/models/wav2vec2_conformer/__init__.py +++ b/src/transformers/models/wav2vec2_conformer/__init__.py @@ -17,10 +17,7 @@ _import_structure = { - "configuration_wav2vec2_conformer": [ - "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Wav2Vec2ConformerConfig", - ], + "configuration_wav2vec2_conformer": ["Wav2Vec2ConformerConfig"], } @@ -31,7 +28,6 @@ pass else: _import_structure["modeling_wav2vec2_conformer"] = [ - "WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ConformerForAudioFrameClassification", "Wav2Vec2ConformerForCTC", "Wav2Vec2ConformerForPreTraining", @@ -43,7 +39,6 @@ if TYPE_CHECKING: from .configuration_wav2vec2_conformer import ( - WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2ConformerConfig, ) @@ -54,7 +49,6 @@ pass else: from .modeling_wav2vec2_conformer import ( - WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, diff --git a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py index 1b99edcece527b..5c931342c9cf31 100644 --- a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class Wav2Vec2ConformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2ConformerModel`]. It is used to diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index 49a50befe443dd..da8ed52faf32f8 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -65,9 +65,6 @@ _CTC_EXPECTED_LOSS = 64.21 -from ..deprecated._archive_maps import WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForPreTrainingOutput with Wav2Vec2->Wav2Vec2Conformer class Wav2Vec2ConformerForPreTrainingOutput(ModelOutput): diff --git a/src/transformers/models/wavlm/__init__.py b/src/transformers/models/wavlm/__init__.py index 3d48a3615bb4a3..d615a3a5ae4062 100644 --- a/src/transformers/models/wavlm/__init__.py +++ b/src/transformers/models/wavlm/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available -_import_structure = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]} +_import_structure = {"configuration_wavlm": ["WavLMConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_wavlm"] = [ - "WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST", "WavLMForAudioFrameClassification", "WavLMForCTC", "WavLMForSequenceClassification", @@ -35,7 +34,7 @@ ] if TYPE_CHECKING: - from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig + from .configuration_wavlm import WavLMConfig try: if not is_torch_available(): @@ -44,7 +43,6 @@ pass else: from .modeling_wavlm import ( - WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, diff --git a/src/transformers/models/wavlm/configuration_wavlm.py b/src/transformers/models/wavlm/configuration_wavlm.py index c0f5f90fe321af..a860475336f931 100644 --- a/src/transformers/models/wavlm/configuration_wavlm.py +++ b/src/transformers/models/wavlm/configuration_wavlm.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class WavLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM diff --git a/src/transformers/models/wavlm/modeling_wavlm.py b/src/transformers/models/wavlm/modeling_wavlm.py index 1db656da60a538..dd52277b9a6130 100755 --- a/src/transformers/models/wavlm/modeling_wavlm.py +++ b/src/transformers/models/wavlm/modeling_wavlm.py @@ -71,9 +71,6 @@ _XVECTOR_EXPECTED_OUTPUT = 0.97 -from ..deprecated._archive_maps import WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], diff --git a/src/transformers/models/whisper/__init__.py b/src/transformers/models/whisper/__init__.py index d87828da69f5d1..5d37e72c02b5df 100644 --- a/src/transformers/models/whisper/__init__.py +++ b/src/transformers/models/whisper/__init__.py @@ -24,7 +24,7 @@ _import_structure = { - "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], + "configuration_whisper": ["WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], @@ -45,7 +45,6 @@ pass else: _import_structure["modeling_whisper"] = [ - "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForCausalLM", "WhisperForConditionalGeneration", "WhisperModel", @@ -60,7 +59,6 @@ pass else: _import_structure["modeling_tf_whisper"] = [ - "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", @@ -81,7 +79,7 @@ if TYPE_CHECKING: - from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig + from .configuration_whisper import WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer @@ -101,7 +99,6 @@ pass else: from .modeling_whisper import ( - WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForCausalLM, WhisperForConditionalGeneration, @@ -116,7 +113,6 @@ pass else: from .modeling_tf_whisper import ( - TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, diff --git a/src/transformers/models/whisper/configuration_whisper.py b/src/transformers/models/whisper/configuration_whisper.py index ec9c64df1bdb81..c924a21c2a571c 100644 --- a/src/transformers/models/whisper/configuration_whisper.py +++ b/src/transformers/models/whisper/configuration_whisper.py @@ -30,9 +30,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - # fmt: off NON_SPEECH_TOKENS = [ 1, 2, 7, 8, 9, 10, 14, 25, diff --git a/src/transformers/models/whisper/modeling_tf_whisper.py b/src/transformers/models/whisper/modeling_tf_whisper.py index 4d5dda71e8aaf3..8033bb584fd2aa 100644 --- a/src/transformers/models/whisper/modeling_tf_whisper.py +++ b/src/transformers/models/whisper/modeling_tf_whisper.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "WhisperConfig" -from ..deprecated._archive_maps import TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index b85375a4098d76..c0db404e5c88a5 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -59,9 +59,6 @@ _CHECKPOINT_FOR_DOC = "openai/whisper-tiny" -from ..deprecated._archive_maps import WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) diff --git a/src/transformers/models/x_clip/__init__.py b/src/transformers/models/x_clip/__init__.py index ed3d2ff5152830..2f60ad0ddee2d2 100644 --- a/src/transformers/models/x_clip/__init__.py +++ b/src/transformers/models/x_clip/__init__.py @@ -18,7 +18,6 @@ _import_structure = { "configuration_x_clip": [ - "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig", @@ -33,7 +32,6 @@ pass else: _import_structure["modeling_x_clip"] = [ - "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "XCLIPModel", "XCLIPPreTrainedModel", "XCLIPTextModel", @@ -42,7 +40,6 @@ if TYPE_CHECKING: from .configuration_x_clip import ( - XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, @@ -56,7 +53,6 @@ pass else: from .modeling_x_clip import ( - XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, diff --git a/src/transformers/models/x_clip/configuration_x_clip.py b/src/transformers/models/x_clip/configuration_x_clip.py index 7795269b7e517a..757429d6dfe61a 100644 --- a/src/transformers/models/x_clip/configuration_x_clip.py +++ b/src/transformers/models/x_clip/configuration_x_clip.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XCLIPTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to instantiate an X-CLIP diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index c9791fdfcc00df..092ea947617398 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -42,9 +42,6 @@ _CHECKPOINT_FOR_DOC = "microsoft/xclip-base-patch32" -from ..deprecated._archive_maps import XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: diff --git a/src/transformers/models/xglm/__init__.py b/src/transformers/models/xglm/__init__.py index 747a4ddb4ed9c7..59bba032f4ea2a 100644 --- a/src/transformers/models/xglm/__init__.py +++ b/src/transformers/models/xglm/__init__.py @@ -24,7 +24,7 @@ ) -_import_structure = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} +_import_structure = {"configuration_xglm": ["XGLMConfig"]} try: if not is_sentencepiece_available(): @@ -49,7 +49,6 @@ pass else: _import_structure["modeling_xglm"] = [ - "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", @@ -76,7 +75,6 @@ pass else: _import_structure["modeling_tf_xglm"] = [ - "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", @@ -84,7 +82,7 @@ if TYPE_CHECKING: - from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig + from .configuration_xglm import XGLMConfig try: if not is_sentencepiece_available(): @@ -108,7 +106,7 @@ except OptionalDependencyNotAvailable: pass else: - from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel + from .modeling_xglm import XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): @@ -125,7 +123,6 @@ pass else: from .modeling_tf_xglm import ( - TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, diff --git a/src/transformers/models/xglm/configuration_xglm.py b/src/transformers/models/xglm/configuration_xglm.py index c67c67a4b29073..8eebcfaee68bbb 100644 --- a/src/transformers/models/xglm/configuration_xglm.py +++ b/src/transformers/models/xglm/configuration_xglm.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XGLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XGLMModel`]. It is used to instantiate an XGLM diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index e3003fdbc53ab6..6b563b665766cc 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -55,9 +55,6 @@ _CONFIG_FOR_DOC = "XGLMConfig" -from ..deprecated._archive_maps import TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - LARGE_NEGATIVE = -1e8 diff --git a/src/transformers/models/xglm/modeling_xglm.py b/src/transformers/models/xglm/modeling_xglm.py index 538c852ae99a8d..27d5fa36d71210 100755 --- a/src/transformers/models/xglm/modeling_xglm.py +++ b/src/transformers/models/xglm/modeling_xglm.py @@ -37,9 +37,6 @@ _CONFIG_FOR_DOC = "XGLMConfig" -from ..deprecated._archive_maps import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - XGLM_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads diff --git a/src/transformers/models/xlm/__init__.py b/src/transformers/models/xlm/__init__.py index 1dd57a90b92744..97d0933b8b9a7d 100644 --- a/src/transformers/models/xlm/__init__.py +++ b/src/transformers/models/xlm/__init__.py @@ -18,7 +18,7 @@ _import_structure = { - "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], + "configuration_xlm": ["XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } @@ -29,7 +29,6 @@ pass else: _import_structure["modeling_xlm"] = [ - "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", @@ -47,7 +46,6 @@ pass else: _import_structure["modeling_tf_xlm"] = [ - "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", @@ -60,7 +58,7 @@ if TYPE_CHECKING: - from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig + from .configuration_xlm import XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: @@ -70,7 +68,6 @@ pass else: from .modeling_xlm import ( - XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, @@ -88,7 +85,6 @@ pass else: from .modeling_tf_xlm import ( - TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, diff --git a/src/transformers/models/xlm/configuration_xlm.py b/src/transformers/models/xlm/configuration_xlm.py index 3b1dadd5657e20..2f8b5d6ef29d82 100644 --- a/src/transformers/models/xlm/configuration_xlm.py +++ b/src/transformers/models/xlm/configuration_xlm.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XLMConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`XLMModel`] or a [`TFXLMModel`]. It is used to diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index 45447a4236e118..ff5211a27cf2b1 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -67,9 +67,6 @@ _CONFIG_FOR_DOC = "XLMConfig" -from ..deprecated._archive_maps import TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = tf.constant(np.sin(position_enc[:, 0::2])) diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index aca93ffb6a30b2..23f93bebb71f8e 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -54,9 +54,6 @@ _CONFIG_FOR_DOC = "XLMConfig" -from ..deprecated._archive_maps import XLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out.requires_grad = False diff --git a/src/transformers/models/xlm_prophetnet/__init__.py b/src/transformers/models/xlm_prophetnet/__init__.py index ff14e5b987a789..d9c24d9b4d2513 100644 --- a/src/transformers/models/xlm_prophetnet/__init__.py +++ b/src/transformers/models/xlm_prophetnet/__init__.py @@ -17,7 +17,7 @@ _import_structure = { - "configuration_xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"], + "configuration_xlm_prophetnet": ["XLMProphetNetConfig"], } try: @@ -35,7 +35,6 @@ pass else: _import_structure["modeling_xlm_prophetnet"] = [ - "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMProphetNetDecoder", "XLMProphetNetEncoder", "XLMProphetNetForCausalLM", @@ -46,7 +45,7 @@ if TYPE_CHECKING: - from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig + from .configuration_xlm_prophetnet import XLMProphetNetConfig try: if not is_sentencepiece_available(): @@ -63,7 +62,6 @@ pass else: from .modeling_xlm_prophetnet import ( - XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLMProphetNetDecoder, XLMProphetNetEncoder, XLMProphetNetForCausalLM, diff --git a/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py index f1a903c227bf59..bfb2a898f06ae8 100644 --- a/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XLMProphetNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XLMProphetNetModel`]. It is used to instantiate a diff --git a/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py index 53b8a1fc20cbb5..ccba8fde5db2e8 100644 --- a/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py @@ -45,9 +45,6 @@ _CONFIG_FOR_DOC = "XLMProphetNetConfig" -from ..deprecated._archive_maps import XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_START_DOCSTRING with ProphetNetConfig->XLMProphetNetConfig XLM_PROPHETNET_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the diff --git a/src/transformers/models/xlm_roberta/__init__.py b/src/transformers/models/xlm_roberta/__init__.py index 813cba9fe17c1d..00658bb9ed9b8d 100644 --- a/src/transformers/models/xlm_roberta/__init__.py +++ b/src/transformers/models/xlm_roberta/__init__.py @@ -27,7 +27,6 @@ _import_structure = { "configuration_xlm_roberta": [ - "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], @@ -56,7 +55,6 @@ pass else: _import_structure["modeling_xlm_roberta"] = [ - "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", @@ -74,7 +72,6 @@ pass else: _import_structure["modeling_tf_xlm_roberta"] = [ - "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", @@ -92,7 +89,6 @@ pass else: _import_structure["modeling_flax_xlm_roberta"] = [ - "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", @@ -105,7 +101,6 @@ if TYPE_CHECKING: from .configuration_xlm_roberta import ( - XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) @@ -133,7 +128,6 @@ pass else: from .modeling_xlm_roberta import ( - XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, @@ -151,7 +145,6 @@ pass else: from .modeling_tf_xlm_roberta import ( - TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, @@ -169,7 +162,6 @@ pass else: from .modeling_flax_xlm_roberta import ( - FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, diff --git a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py index 3da0fbecd609fa..b9e348e498e4c9 100644 --- a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XLMRobertaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XLMRobertaModel`] or a [`TFXLMRobertaModel`]. It diff --git a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py index 2caffc0b905f7f..e700fcd0244ad5 100644 --- a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py @@ -52,9 +52,6 @@ remat = nn_partitioning.remat -from ..deprecated._archive_maps import FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.roberta.modeling_flax_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx): """ diff --git a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py index 3b0efe6bd700b7..c0ad2c7c7dfc38 100644 --- a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py @@ -68,9 +68,6 @@ _CONFIG_FOR_DOC = "XLMRobertaConfig" -from ..deprecated._archive_maps import TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - XLM_ROBERTA_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the diff --git a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py index 48c6898811d1e0..642e5dab7a2ca3 100644 --- a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "XLMRobertaConfig" -from ..deprecated._archive_maps import XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->XLMRoberta class XLMRobertaEmbeddings(nn.Module): """ diff --git a/src/transformers/models/xlm_roberta_xl/__init__.py b/src/transformers/models/xlm_roberta_xl/__init__.py index 2df95dbc49200e..68ae26b06d6ca9 100644 --- a/src/transformers/models/xlm_roberta_xl/__init__.py +++ b/src/transformers/models/xlm_roberta_xl/__init__.py @@ -19,7 +19,6 @@ _import_structure = { "configuration_xlm_roberta_xl": [ - "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], @@ -32,7 +31,6 @@ pass else: _import_structure["modeling_xlm_roberta_xl"] = [ - "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", @@ -45,7 +43,6 @@ if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( - XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) @@ -57,7 +54,6 @@ pass else: from .modeling_xlm_roberta_xl import ( - XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, diff --git a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py index 23deeea7435e7f..ac7b13755cfea0 100644 --- a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XLMRobertaXLConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XLMRobertaXLModel`] or a [`TFXLMRobertaXLModel`]. diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index da76ca29ae27ee..bd8ec70e89957b 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -51,9 +51,6 @@ _CONFIG_FOR_DOC = "XLMRobertaXLConfig" -from ..deprecated._archive_maps import XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class XLMRobertaXLEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. diff --git a/src/transformers/models/xlnet/__init__.py b/src/transformers/models/xlnet/__init__.py index f5e1d4568a66a4..f50d4cc178d3b9 100644 --- a/src/transformers/models/xlnet/__init__.py +++ b/src/transformers/models/xlnet/__init__.py @@ -24,7 +24,7 @@ ) -_import_structure = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]} +_import_structure = {"configuration_xlnet": ["XLNetConfig"]} try: if not is_sentencepiece_available(): @@ -49,7 +49,6 @@ pass else: _import_structure["modeling_xlnet"] = [ - "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", @@ -68,7 +67,6 @@ pass else: _import_structure["modeling_tf_xlnet"] = [ - "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", @@ -81,7 +79,7 @@ if TYPE_CHECKING: - from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig + from .configuration_xlnet import XLNetConfig try: if not is_sentencepiece_available(): @@ -106,7 +104,6 @@ pass else: from .modeling_xlnet import ( - XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, @@ -125,7 +122,6 @@ pass else: from .modeling_tf_xlnet import ( - TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, diff --git a/src/transformers/models/xlnet/configuration_xlnet.py b/src/transformers/models/xlnet/configuration_xlnet.py index f81c456b61df69..957a09dbf61958 100644 --- a/src/transformers/models/xlnet/configuration_xlnet.py +++ b/src/transformers/models/xlnet/configuration_xlnet.py @@ -24,9 +24,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XLNetConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`XLNetModel`] or a [`TFXLNetModel`]. It is used to diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index 188f5e39a2fba1..f998b19bb6d3ac 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -61,9 +61,6 @@ _CONFIG_FOR_DOC = "XLNetConfig" -from ..deprecated._archive_maps import TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - class TFXLNetRelativeAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index 78ca545751a4af..7d04fe8cfa1b2d 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -44,9 +44,6 @@ _CONFIG_FOR_DOC = "XLNetConfig" -from ..deprecated._archive_maps import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch diff --git a/src/transformers/models/xmod/__init__.py b/src/transformers/models/xmod/__init__.py index f3cb6f195bd458..9b9cb36e3b93e5 100644 --- a/src/transformers/models/xmod/__init__.py +++ b/src/transformers/models/xmod/__init__.py @@ -23,7 +23,6 @@ _import_structure = { "configuration_xmod": [ - "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], @@ -36,7 +35,6 @@ pass else: _import_structure["modeling_xmod"] = [ - "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", @@ -48,7 +46,7 @@ ] if TYPE_CHECKING: - from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig + from .configuration_xmod import XmodConfig, XmodOnnxConfig try: if not is_torch_available(): @@ -57,7 +55,6 @@ pass else: from .modeling_xmod import ( - XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, diff --git a/src/transformers/models/xmod/configuration_xmod.py b/src/transformers/models/xmod/configuration_xmod.py index 21eb9ba2ea2f7d..4ca52652513b3e 100644 --- a/src/transformers/models/xmod/configuration_xmod.py +++ b/src/transformers/models/xmod/configuration_xmod.py @@ -25,9 +25,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class XmodConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`XmodModel`]. It is used to instantiate an X-MOD diff --git a/src/transformers/models/xmod/modeling_xmod.py b/src/transformers/models/xmod/modeling_xmod.py index 32e34ef6683817..cf51eee40fbcae 100644 --- a/src/transformers/models/xmod/modeling_xmod.py +++ b/src/transformers/models/xmod/modeling_xmod.py @@ -42,9 +42,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import XMOD_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Xmod class XmodEmbeddings(nn.Module): """ diff --git a/src/transformers/models/yolos/__init__.py b/src/transformers/models/yolos/__init__.py index 28d59763bb8550..fdf7c5db1cb220 100644 --- a/src/transformers/models/yolos/__init__.py +++ b/src/transformers/models/yolos/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available -_import_structure = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} +_import_structure = {"configuration_yolos": ["YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): @@ -34,7 +34,6 @@ pass else: _import_structure["modeling_yolos"] = [ - "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", @@ -42,7 +41,7 @@ if TYPE_CHECKING: - from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig + from .configuration_yolos import YolosConfig, YolosOnnxConfig try: if not is_vision_available(): @@ -60,7 +59,6 @@ pass else: from .modeling_yolos import ( - YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, diff --git a/src/transformers/models/yolos/configuration_yolos.py b/src/transformers/models/yolos/configuration_yolos.py index 098210f1a732e2..2493403b4f375e 100644 --- a/src/transformers/models/yolos/configuration_yolos.py +++ b/src/transformers/models/yolos/configuration_yolos.py @@ -27,9 +27,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class YolosConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`YolosModel`]. It is used to instantiate a YOLOS diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index fe558b33a32520..9d6536b6c27258 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -63,9 +63,6 @@ _EXPECTED_OUTPUT_SHAPE = [1, 3401, 384] -from ..deprecated._archive_maps import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - @dataclass class YolosObjectDetectionOutput(ModelOutput): """ diff --git a/src/transformers/models/yoso/__init__.py b/src/transformers/models/yoso/__init__.py index e1f89d73ac47c5..c4c73385017eb7 100644 --- a/src/transformers/models/yoso/__init__.py +++ b/src/transformers/models/yoso/__init__.py @@ -16,7 +16,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available -_import_structure = {"configuration_yoso": ["YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP", "YosoConfig"]} +_import_structure = {"configuration_yoso": ["YosoConfig"]} try: if not is_torch_available(): @@ -25,7 +25,6 @@ pass else: _import_structure["modeling_yoso"] = [ - "YOSO_PRETRAINED_MODEL_ARCHIVE_LIST", "YosoForMaskedLM", "YosoForMultipleChoice", "YosoForQuestionAnswering", @@ -38,7 +37,7 @@ if TYPE_CHECKING: - from .configuration_yoso import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP, YosoConfig + from .configuration_yoso import YosoConfig try: if not is_torch_available(): @@ -47,7 +46,6 @@ pass else: from .modeling_yoso import ( - YOSO_PRETRAINED_MODEL_ARCHIVE_LIST, YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, diff --git a/src/transformers/models/yoso/configuration_yoso.py b/src/transformers/models/yoso/configuration_yoso.py index fe2d4d4403780a..906856fa5d711d 100644 --- a/src/transformers/models/yoso/configuration_yoso.py +++ b/src/transformers/models/yoso/configuration_yoso.py @@ -21,9 +21,6 @@ logger = logging.get_logger(__name__) -from ..deprecated._archive_maps import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 - - class YosoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`YosoModel`]. It is used to instantiate an YOSO diff --git a/src/transformers/models/yoso/modeling_yoso.py b/src/transformers/models/yoso/modeling_yoso.py index 3615ea80719be1..c2d327bf8468b7 100644 --- a/src/transformers/models/yoso/modeling_yoso.py +++ b/src/transformers/models/yoso/modeling_yoso.py @@ -52,9 +52,6 @@ _CONFIG_FOR_DOC = "YosoConfig" -from ..deprecated._archive_maps import YOSO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 - - lsh_cumulation = None diff --git a/src/transformers/utils/dummy_flax_objects.py b/src/transformers/utils/dummy_flax_objects.py index 281ddf56a99450..627daa228c73d6 100644 --- a/src/transformers/utils/dummy_flax_objects.py +++ b/src/transformers/utils/dummy_flax_objects.py @@ -1353,9 +1353,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) -FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index d5c64cc141d268..5d8c17dd436e8e 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -429,9 +429,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class AlbertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -492,9 +489,6 @@ def load_tf_weights_in_albert(*args, **kwargs): requires_backends(load_tf_weights_in_albert, ["torch"]) -ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class AlignModel(metaclass=DummyObject): _backends = ["torch"] @@ -523,9 +517,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class AltCLIPModel(metaclass=DummyObject): _backends = ["torch"] @@ -554,9 +545,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ASTForAudioClassification(metaclass=DummyObject): _backends = ["torch"] @@ -970,9 +958,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class AutoformerForPrediction(metaclass=DummyObject): _backends = ["torch"] @@ -994,9 +979,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BARK_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BarkCausalModel(metaclass=DummyObject): _backends = ["torch"] @@ -1039,9 +1021,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BART_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BartForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1098,9 +1077,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BeitBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -1143,9 +1119,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -1252,9 +1225,6 @@ def load_tf_weights_in_bert_generation(*args, **kwargs): requires_backends(load_tf_weights_in_bert_generation, ["torch"]) -BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BigBirdForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1329,9 +1299,6 @@ def load_tf_weights_in_big_bird(*args, **kwargs): requires_backends(load_tf_weights_in_big_bird, ["torch"]) -BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BigBirdPegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1374,9 +1341,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BioGptForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1412,9 +1376,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BitBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -1443,9 +1404,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BlenderbotForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1474,9 +1432,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BlenderbotSmallForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1505,9 +1460,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -1557,9 +1509,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Blip2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -1595,9 +1544,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BloomForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1640,9 +1586,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BridgeTowerForContrastiveLearning(metaclass=DummyObject): _backends = ["torch"] @@ -1678,9 +1621,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -BROS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class BrosForTokenClassification(metaclass=DummyObject): _backends = ["torch"] @@ -1723,9 +1663,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CamembertForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -1782,9 +1719,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CanineForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] @@ -1838,9 +1772,6 @@ def load_tf_weights_in_canine(*args, **kwargs): requires_backends(load_tf_weights_in_canine, ["torch"]) -CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ChineseCLIPModel(metaclass=DummyObject): _backends = ["torch"] @@ -1869,9 +1800,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ClapAudioModel(metaclass=DummyObject): _backends = ["torch"] @@ -1921,9 +1849,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CLIPForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -1973,9 +1898,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CLIPSegForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] @@ -2011,9 +1933,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ClvpDecoder(metaclass=DummyObject): _backends = ["torch"] @@ -2056,9 +1975,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CodeGenForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -2101,9 +2017,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ConditionalDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -2132,9 +2045,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ConvBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -2195,9 +2105,6 @@ def load_tf_weights_in_convbert(*args, **kwargs): requires_backends(load_tf_weights_in_convbert, ["torch"]) -CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ConvNextBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -2226,9 +2133,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ConvNextV2Backbone(metaclass=DummyObject): _backends = ["torch"] @@ -2257,9 +2161,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CpmAntForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -2281,9 +2182,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CTRLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] @@ -2312,9 +2210,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class CvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -2336,15 +2231,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] @@ -2492,9 +2378,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DebertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -2537,9 +2420,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -2589,9 +2469,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DecisionTransformerGPT2Model(metaclass=DummyObject): _backends = ["torch"] @@ -2620,9 +2497,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DeformableDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -2644,9 +2518,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DeiTForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -2682,9 +2553,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MCTCTForCTC(metaclass=DummyObject): _backends = ["torch"] @@ -2755,9 +2623,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RetriBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -2772,9 +2637,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TrajectoryTransformerModel(metaclass=DummyObject): _backends = ["torch"] @@ -2789,9 +2651,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class AdaptiveEmbedding(metaclass=DummyObject): _backends = ["torch"] @@ -2831,9 +2690,6 @@ def load_tf_weights_in_transfo_xl(*args, **kwargs): requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) -VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VanForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -2855,9 +2711,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DepthAnythingForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] @@ -2872,9 +2725,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DetaForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -2896,9 +2746,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -2927,9 +2774,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DinatBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -2958,9 +2802,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Dinov2Backbone(metaclass=DummyObject): _backends = ["torch"] @@ -2989,9 +2830,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DistilBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -3041,9 +2879,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DonutSwinModel(metaclass=DummyObject): _backends = ["torch"] @@ -3058,15 +2893,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DPRContextEncoder(metaclass=DummyObject): _backends = ["torch"] @@ -3116,9 +2942,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class DPTForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] @@ -3147,9 +2970,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class EfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -3178,9 +2998,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class EfficientNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -3202,9 +3019,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ElectraForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -3272,9 +3086,6 @@ def load_tf_weights_in_electra(*args, **kwargs): requires_backends(load_tf_weights_in_electra, ["torch"]) -ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class EncodecModel(metaclass=DummyObject): _backends = ["torch"] @@ -3296,9 +3107,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ErnieForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -3369,9 +3177,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ErnieMForInformationExtraction(metaclass=DummyObject): _backends = ["torch"] @@ -3421,9 +3226,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class EsmFoldPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] @@ -3473,9 +3275,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FalconForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -3518,9 +3317,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FastSpeech2ConformerHifiGan(metaclass=DummyObject): _backends = ["torch"] @@ -3549,9 +3345,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] @@ -3608,9 +3401,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FlavaForPreTraining(metaclass=DummyObject): _backends = ["torch"] @@ -3660,9 +3450,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -3733,9 +3520,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FocalNetBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -3792,9 +3576,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class FunnelBaseModel(metaclass=DummyObject): _backends = ["torch"] @@ -3904,9 +3685,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GitForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -3935,9 +3713,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GLPNForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] @@ -3959,9 +3734,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] @@ -4015,9 +3787,6 @@ def load_tf_weights_in_gpt2(*args, **kwargs): requires_backends(load_tf_weights_in_gpt2, ["torch"]) -GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPTBigCodeForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -4053,9 +3822,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPTNeoForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -4102,9 +3868,6 @@ def load_tf_weights_in_gpt_neo(*args, **kwargs): requires_backends(load_tf_weights_in_gpt_neo, ["torch"]) -GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPTNeoXForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -4154,9 +3917,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -4185,9 +3945,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPTJForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -4223,9 +3980,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4247,9 +4001,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GraphormerForGraphClassification(metaclass=DummyObject): _backends = ["torch"] @@ -4271,9 +4022,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GroundingDinoForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -4295,9 +4043,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class GroupViTModel(metaclass=DummyObject): _backends = ["torch"] @@ -4326,9 +4071,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class HubertForCTC(metaclass=DummyObject): _backends = ["torch"] @@ -4357,9 +4099,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class IBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -4409,9 +4148,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class IdeficsForVisionText2Text(metaclass=DummyObject): _backends = ["torch"] @@ -4440,9 +4176,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Idefics2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4471,9 +4204,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ImageGPTForCausalImageModeling(metaclass=DummyObject): _backends = ["torch"] @@ -4506,9 +4236,6 @@ def load_tf_weights_in_imagegpt(*args, **kwargs): requires_backends(load_tf_weights_in_imagegpt, ["torch"]) -INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class InformerForPrediction(metaclass=DummyObject): _backends = ["torch"] @@ -4530,9 +4257,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class InstructBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4589,9 +4313,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class JukeboxModel(metaclass=DummyObject): _backends = ["torch"] @@ -4620,9 +4341,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Kosmos2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4644,9 +4362,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -4689,9 +4404,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] @@ -4727,9 +4439,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] @@ -4765,9 +4474,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LED_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LEDForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4803,9 +4509,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LevitForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -4834,9 +4537,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LiltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] @@ -4907,9 +4607,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LlavaForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4924,9 +4621,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LLAVA_NEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LlavaNextForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -4941,9 +4635,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LongformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -5000,9 +4691,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LongT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] @@ -5031,9 +4719,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class LukeForEntityClassification(metaclass=DummyObject): _backends = ["torch"] @@ -5153,9 +4838,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -5177,9 +4859,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MambaForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -5222,9 +4901,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MarkupLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] @@ -5260,9 +4936,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Mask2FormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] @@ -5284,9 +4957,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MaskFormerForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] @@ -5357,9 +5027,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MegaForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -5416,9 +5083,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MegatronBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -5489,9 +5153,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MgpstrForSceneTextRecognition(metaclass=DummyObject): _backends = ["torch"] @@ -5569,9 +5230,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MobileBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -5646,9 +5304,6 @@ def load_tf_weights_in_mobilebert(*args, **kwargs): requires_backends(load_tf_weights_in_mobilebert, ["torch"]) -MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MobileNetV1ForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -5674,9 +5329,6 @@ def load_tf_weights_in_mobilenet_v1(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"]) -MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MobileNetV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -5709,9 +5361,6 @@ def load_tf_weights_in_mobilenet_v2(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"]) -MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MobileViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -5740,9 +5389,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MobileViTV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -5771,9 +5417,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MPNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -5830,9 +5473,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MptForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -5875,9 +5515,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MRA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -5976,9 +5613,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MusicgenForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6014,9 +5648,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MusicgenMelodyForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6045,9 +5676,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class MvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6090,9 +5718,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class NatBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -6121,9 +5746,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class NezhaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -6187,9 +5809,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class NllbMoeForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -6225,9 +5844,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class NystromformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -6305,9 +5921,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class OneFormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] @@ -6329,9 +5942,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] @@ -6371,9 +5981,6 @@ def load_tf_weights_in_openai_gpt(*args, **kwargs): requires_backends(load_tf_weights_in_openai_gpt, ["torch"]) -OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class OPTForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6409,9 +6016,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Owlv2ForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -6447,9 +6051,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class OwlViTForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -6485,9 +6086,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PatchTSMixerForPrediction(metaclass=DummyObject): _backends = ["torch"] @@ -6530,9 +6128,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PatchTSTForClassification(metaclass=DummyObject): _backends = ["torch"] @@ -6603,9 +6198,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PegasusXForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -6627,9 +6219,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject): _backends = ["torch"] @@ -6728,9 +6317,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PhiForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6766,9 +6352,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Phi3ForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6804,9 +6387,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Pix2StructForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -6835,9 +6415,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PLBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -6873,9 +6450,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PoolFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -6897,9 +6471,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Pop2PianoForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -6914,9 +6485,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] @@ -6959,9 +6527,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -PVT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class PvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -7011,9 +6576,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class QDQBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -7172,9 +6734,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RealmEmbedder(metaclass=DummyObject): _backends = ["torch"] @@ -7249,9 +6808,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ReformerAttention(metaclass=DummyObject): _backends = ["torch"] @@ -7308,9 +6864,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RegNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -7332,9 +6885,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RemBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7402,9 +6952,6 @@ def load_tf_weights_in_rembert(*args, **kwargs): requires_backends(load_tf_weights_in_rembert, ["torch"]) -RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ResNetBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -7433,9 +6980,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7492,9 +7036,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7551,9 +7092,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RoCBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7628,9 +7166,6 @@ def load_tf_weights_in_roc_bert(*args, **kwargs): requires_backends(load_tf_weights_in_roc_bert, ["torch"]) -ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RoFormerForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7698,9 +7233,6 @@ def load_tf_weights_in_roformer(*args, **kwargs): requires_backends(load_tf_weights_in_roformer, ["torch"]) -RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class RwkvForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -7722,9 +7254,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SamModel(metaclass=DummyObject): _backends = ["torch"] @@ -7739,9 +7268,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SeamlessM4TCodeHifiGan(metaclass=DummyObject): _backends = ["torch"] @@ -7812,9 +7338,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SeamlessM4Tv2ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] @@ -7857,9 +7380,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SegformerDecodeHead(metaclass=DummyObject): _backends = ["torch"] @@ -7902,9 +7422,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SegGptForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] @@ -7926,9 +7443,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SEWForCTC(metaclass=DummyObject): _backends = ["torch"] @@ -7957,9 +7471,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SEWDForCTC(metaclass=DummyObject): _backends = ["torch"] @@ -7988,9 +7499,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SiglipForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -8033,9 +7541,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Speech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -8071,9 +7576,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SpeechT5ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] @@ -8116,9 +7618,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SplinterForPreTraining(metaclass=DummyObject): _backends = ["torch"] @@ -8154,9 +7653,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SqueezeBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -8269,9 +7765,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SuperPointForKeypointDetection(metaclass=DummyObject): _backends = ["torch"] @@ -8286,9 +7779,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -8310,9 +7800,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SwinBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -8348,9 +7835,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Swin2SRForImageSuperResolution(metaclass=DummyObject): _backends = ["torch"] @@ -8372,9 +7856,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Swinv2Backbone(metaclass=DummyObject): _backends = ["torch"] @@ -8410,9 +7891,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class SwitchTransformersEncoderModel(metaclass=DummyObject): _backends = ["torch"] @@ -8455,9 +7933,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -T5_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class T5EncoderModel(metaclass=DummyObject): _backends = ["torch"] @@ -8511,9 +7986,6 @@ def load_tf_weights_in_t5(*args, **kwargs): requires_backends(load_tf_weights_in_t5, ["torch"]) -TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TableTransformerForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -8535,9 +8007,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TapasForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -8577,9 +8046,6 @@ def load_tf_weights_in_tapas(*args, **kwargs): requires_backends(load_tf_weights_in_tapas, ["torch"]) -TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TimeSeriesTransformerForPrediction(metaclass=DummyObject): _backends = ["torch"] @@ -8601,9 +8067,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TimesformerForVideoClassification(metaclass=DummyObject): _backends = ["torch"] @@ -8632,9 +8095,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TrOCRForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -8649,9 +8109,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TvltForAudioVisualClassification(metaclass=DummyObject): _backends = ["torch"] @@ -8680,9 +8137,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TvpForVideoGrounding(metaclass=DummyObject): _backends = ["torch"] @@ -8704,9 +8158,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -UDOP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class UdopEncoderModel(metaclass=DummyObject): _backends = ["torch"] @@ -8784,9 +8235,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class UniSpeechForCTC(metaclass=DummyObject): _backends = ["torch"] @@ -8822,9 +8270,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] @@ -8874,9 +8319,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class UnivNetModel(metaclass=DummyObject): _backends = ["torch"] @@ -8898,9 +8340,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VideoMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] @@ -8929,9 +8368,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ViltForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] @@ -8988,9 +8424,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VipLlavaForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -9019,9 +8452,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VisualBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] @@ -9078,9 +8508,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9109,9 +8536,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ViTHybridForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9133,9 +8557,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ViTMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] @@ -9164,9 +8585,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class ViTMSNForImageClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9188,9 +8606,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VitDetBackbone(metaclass=DummyObject): _backends = ["torch"] @@ -9212,9 +8627,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VitMatteForImageMatting(metaclass=DummyObject): _backends = ["torch"] @@ -9229,9 +8641,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VITS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VitsModel(metaclass=DummyObject): _backends = ["torch"] @@ -9246,9 +8655,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class VivitForVideoClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9270,9 +8676,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9329,9 +8732,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Wav2Vec2BertForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9374,9 +8774,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9426,9 +8823,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class WavLMForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9471,9 +8865,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class WhisperForAudioClassification(metaclass=DummyObject): _backends = ["torch"] @@ -9509,9 +8900,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XCLIPModel(metaclass=DummyObject): _backends = ["torch"] @@ -9540,9 +8928,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XGLMForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -9564,9 +8949,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XLMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] @@ -9623,9 +9005,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XLMProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] @@ -9668,9 +9047,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -9727,9 +9103,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XLMRobertaXLForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -9786,9 +9159,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XLNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] @@ -9849,9 +9219,6 @@ def load_tf_weights_in_xlnet(*args, **kwargs): requires_backends(load_tf_weights_in_xlnet, ["torch"]) -XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class XmodForCausalLM(metaclass=DummyObject): _backends = ["torch"] @@ -9908,9 +9275,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class YolosForObjectDetection(metaclass=DummyObject): _backends = ["torch"] @@ -9932,9 +9296,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class YosoForMaskedLM(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index e6f75d1f8f0e72..5d4c28cbcc4595 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -167,9 +167,6 @@ def shape_list(*args, **kwargs): requires_backends(shape_list, ["tf"]) -TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFAlbertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -481,9 +478,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFBertEmbeddings(metaclass=DummyObject): _backends = ["tf"] @@ -610,9 +604,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] @@ -662,9 +653,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFCamembertForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -721,9 +709,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFCLIPModel(metaclass=DummyObject): _backends = ["tf"] @@ -752,9 +737,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFConvBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -853,9 +835,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFCTRLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] @@ -884,9 +863,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFCvtForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -936,9 +912,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFDebertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -981,9 +954,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFDebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1033,9 +1003,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFDeiTForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -1071,9 +1038,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFAdaptiveEmbedding(metaclass=DummyObject): _backends = ["tf"] @@ -1116,9 +1080,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1175,15 +1136,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFDPRContextEncoder(metaclass=DummyObject): _backends = ["tf"] @@ -1226,9 +1178,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFEfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -1257,9 +1206,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFElectraForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1323,9 +1269,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFEsmForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1361,9 +1304,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFFlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] @@ -1413,9 +1353,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFFunnelBaseModel(metaclass=DummyObject): _backends = ["tf"] @@ -1479,9 +1416,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFGPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] @@ -1559,9 +1493,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFGroupViTModel(metaclass=DummyObject): _backends = ["tf"] @@ -1590,9 +1521,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFHubertForCTC(metaclass=DummyObject): _backends = ["tf"] @@ -1614,9 +1542,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFLayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1666,9 +1591,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] @@ -1725,9 +1647,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFLongformerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1784,9 +1703,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFLxmertForPreTraining(metaclass=DummyObject): _backends = ["tf"] @@ -1864,9 +1780,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFMobileBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1937,9 +1850,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFMobileViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -1968,9 +1878,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFMPNetForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -2048,9 +1955,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] @@ -2163,9 +2067,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFRegNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -2187,9 +2088,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFRemBertForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -2253,9 +2151,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFResNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -2277,9 +2172,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -2343,9 +2235,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -2409,9 +2298,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFRoFormerForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -2475,9 +2361,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFSamModel(metaclass=DummyObject): _backends = ["tf"] @@ -2492,9 +2375,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFSegformerDecodeHead(metaclass=DummyObject): _backends = ["tf"] @@ -2530,9 +2410,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] @@ -2554,9 +2431,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFSwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -2578,9 +2452,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFSwinForImageClassification(metaclass=DummyObject): _backends = ["tf"] @@ -2609,9 +2480,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] @@ -2640,9 +2508,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFTapasForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -2734,9 +2599,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["tf"] @@ -2765,9 +2627,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] @@ -2789,9 +2648,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFXGLMForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -2813,9 +2669,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFXLMForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] @@ -2872,9 +2725,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] @@ -2931,9 +2781,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None - - class TFXLNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] diff --git a/tests/models/donut/test_processing_donut.py b/tests/models/donut/test_processing_donut.py index c4ee25b25256d1..ace3a109dfbb23 100644 --- a/tests/models/donut/test_processing_donut.py +++ b/tests/models/donut/test_processing_donut.py @@ -19,12 +19,11 @@ from transformers import DonutProcessor -DONUT_PRETRAINED_MODEL_NAME = "naver-clova-ix/donut-base" - - class DonutProcessorTest(unittest.TestCase): + from_pretrained_id = "naver-clova-ix/donut-base" + def setUp(self): - self.processor = DonutProcessor.from_pretrained(DONUT_PRETRAINED_MODEL_NAME) + self.processor = DonutProcessor.from_pretrained(self.from_pretrained_id) def test_token2json(self): expected_json = { diff --git a/tests/models/superpoint/test_modeling_superpoint.py b/tests/models/superpoint/test_modeling_superpoint.py index 080eda385b9e0d..6e10a8a21dd0d1 100644 --- a/tests/models/superpoint/test_modeling_superpoint.py +++ b/tests/models/superpoint/test_modeling_superpoint.py @@ -27,7 +27,6 @@ import torch from transformers import ( - SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST, SuperPointForKeypointDetection, ) @@ -121,6 +120,7 @@ class SuperPointModelTest(ModelTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + from_pretrained_id = "magic-leap-community/superpoint" def setUp(self): self.model_tester = SuperPointModelTester(self) @@ -222,9 +222,8 @@ def check_hidden_states_output(inputs_dict, config, model_class): @slow def test_model_from_pretrained(self): - for model_name in SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: - model = SuperPointForKeypointDetection.from_pretrained(model_name) - self.assertIsNotNone(model) + model = SuperPointForKeypointDetection.from_pretrained(self.from_pretrained_id) + self.assertIsNotNone(model) def test_forward_labels_should_be_none(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/swiftformer/test_modeling_tf_swiftformer.py b/tests/models/swiftformer/test_modeling_tf_swiftformer.py index 1d30abed31fda4..e73d38605d603d 100644 --- a/tests/models/swiftformer/test_modeling_tf_swiftformer.py +++ b/tests/models/swiftformer/test_modeling_tf_swiftformer.py @@ -36,7 +36,6 @@ from transformers import TFSwiftFormerForImageClassification, TFSwiftFormerModel from transformers.modeling_tf_utils import keras - from transformers.models.swiftformer.modeling_tf_swiftformer import TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): @@ -144,6 +143,7 @@ class TFSwiftFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.T test_head_masking = False has_attentions = False test_onnx = False + from_pretrained_id = "MBZUAI/swiftformer-xs" def setUp(self): self.model_tester = TFSwiftFormerModelTester(self) @@ -194,9 +194,8 @@ def test_for_image_classification(self): @slow def test_model_from_pretrained(self): - for model_name in TF_SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: - model = TFSwiftFormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) + model = TFSwiftFormerModel.from_pretrained(self.from_pretrained_id) + self.assertIsNotNone(model) @unittest.skip(reason="TFSwiftFormer does not output attentions") def test_attention_outputs(self):