Skip to content

Commit

Permalink
[FA-2] Revert suggestion that broke FA2 fine-tuning with quantized …
Browse files Browse the repository at this point in the history
…models (#26916)

revert
  • Loading branch information
younesbelkada authored Oct 18, 2023
1 parent caa0ff0 commit 574a538
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 3 deletions.
5 changes: 4 additions & 1 deletion src/transformers/models/falcon/modeling_falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,10 @@ def forward(
input_dtype = query_layer.dtype
if input_dtype == torch.float32:
# Handle the case where the model is quantized
target_dtype = getattr(self.config, "_pre_quantization_dtype", self.query_key_value.weight.dtype)
if hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.query_key_value.weight.dtype

logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
Expand Down
5 changes: 4 additions & 1 deletion src/transformers/models/llama/modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,10 @@ def forward(
input_dtype = query_states.dtype
if input_dtype == torch.float32:
# Handle the case where the model is quantized
target_dtype = getattr(self.config, "_pre_quantization_dtype", self.q_proj.weight.dtype)
if hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype

logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
Expand Down
5 changes: 4 additions & 1 deletion src/transformers/models/mistral/modeling_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,10 @@ def forward(
input_dtype = query_states.dtype
if input_dtype == torch.float32:
# Handle the case where the model is quantized
target_dtype = getattr(self.config, "_pre_quantization_dtype", self.q_proj.weight.dtype)
if hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype

logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
Expand Down

0 comments on commit 574a538

Please sign in to comment.