From 88304738944154c62cdbb58a5c3c8761f2234781 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 22 Oct 2024 13:45:19 +0200 Subject: [PATCH] finish fixing --- .../models/gemma/configuration_gemma.py | 1 + .../models/gemma/modeling_gemma.py | 1 - .../models/gemma2/modeling_gemma2.py | 24 +++++++++++-------- src/transformers/models/glm/modeling_glm.py | 3 +-- .../configuration_instructblipvideo.py | 1 - .../modeling_instructblipvideo.py | 2 -- .../modeling_llava_next_video.py | 8 +------ 7 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/transformers/models/gemma/configuration_gemma.py b/src/transformers/models/gemma/configuration_gemma.py index 75d0096d4811ef..346f386ba698f2 100644 --- a/src/transformers/models/gemma/configuration_gemma.py +++ b/src/transformers/models/gemma/configuration_gemma.py @@ -19,6 +19,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from ...configuration_utils import PretrainedConfig diff --git a/src/transformers/models/gemma/modeling_gemma.py b/src/transformers/models/gemma/modeling_gemma.py index 43882e7f8c0596..d4e6872ece41fb 100644 --- a/src/transformers/models/gemma/modeling_gemma.py +++ b/src/transformers/models/gemma/modeling_gemma.py @@ -23,7 +23,6 @@ from typing import List, Optional, Tuple, Union import torch -import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN diff --git a/src/transformers/models/gemma2/modeling_gemma2.py b/src/transformers/models/gemma2/modeling_gemma2.py index afa5301a5968aa..9dfb2619587af0 100644 --- a/src/transformers/models/gemma2/modeling_gemma2.py +++ b/src/transformers/models/gemma2/modeling_gemma2.py @@ -19,21 +19,33 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from ...activations import ACT2FN from ...cache_utils import Cache, HybridCache -from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast +from ...generation import GenerationMixin +from ...modeling_flash_attention_utils import _flash_attention_forward +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal, is_flash_attn_greater_or_equal_2_10, is_torch_greater_or_equal, logging, + replace_return_docstrings, ) +from .configuration_gemma2 import Gemma2Config if is_flash_attn_2_available(): @@ -41,14 +53,6 @@ if is_torch_greater_or_equal("2.5"): from torch.nn.attention.flex_attention import flex_attention -from typing import List - -from ...generation import GenerationMixin -from ...modeling_flash_attention_utils import _flash_attention_forward -from ...modeling_outputs import SequenceClassifierOutputWithPast, TokenClassifierOutput -from ...modeling_utils import PreTrainedModel -from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings -from .configuration_gemma2 import Gemma2Config class Gemma2RMSNorm(nn.Module): diff --git a/src/transformers/models/glm/modeling_glm.py b/src/transformers/models/glm/modeling_glm.py index c0d3767164c388..484b16d314d55d 100644 --- a/src/transformers/models/glm/modeling_glm.py +++ b/src/transformers/models/glm/modeling_glm.py @@ -23,8 +23,7 @@ from typing import List, Optional, Tuple, Union import torch -import torch.utils.checkpoint -from torch import nn +import torch.nn as nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache diff --git a/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py b/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py index 2a0f8d8a647ff5..e7c8eeccef98b4 100644 --- a/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py @@ -19,7 +19,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os from typing import Union diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index 9877a079b8d8e8..19e96c54230e61 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -19,13 +19,11 @@ # See the License for the specific language governing permissions and # limitations under the License. - import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch -import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss diff --git a/src/transformers/models/llava_next_video/modeling_llava_next_video.py b/src/transformers/models/llava_next_video/modeling_llava_next_video.py index 3fd6bb47fc7661..fbfd37291c886f 100644 --- a/src/transformers/models/llava_next_video/modeling_llava_next_video.py +++ b/src/transformers/models/llava_next_video/modeling_llava_next_video.py @@ -25,7 +25,6 @@ import numpy as np import torch -import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN @@ -33,12 +32,7 @@ from ...image_processing_utils import select_best_resolution from ...modeling_outputs import ModelOutput from ...modeling_utils import PreTrainedModel -from ...utils import ( - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto import AutoModel, AutoModelForCausalLM from .configuration_llava_next_video import LlavaNextVideoConfig