diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 3b280930c0cbdf..e9dca6df989472 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -614,7 +614,10 @@ def forward( input_dtype = query_layer.dtype if input_dtype == torch.float32: # Handle the case where the model is quantized - target_dtype = getattr(self.config, "_pre_quantization_dtype", self.query_key_value.weight.dtype) + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.query_key_value.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index b0767cf4967dca..b67719ac327162 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -476,7 +476,10 @@ def forward( input_dtype = query_states.dtype if input_dtype == torch.float32: # Handle the case where the model is quantized - target_dtype = getattr(self.config, "_pre_quantization_dtype", self.q_proj.weight.dtype) + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index f636e514823e7d..cfef5a427118e9 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -409,7 +409,10 @@ def forward( input_dtype = query_states.dtype if input_dtype == torch.float32: # Handle the case where the model is quantized - target_dtype = getattr(self.config, "_pre_quantization_dtype", self.q_proj.weight.dtype) + if hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to"