Skip to content

Commit

Permalink
Update src/transformers/models/falcon/modeling_falcon.py
Browse files Browse the repository at this point in the history
Co-authored-by: Arthur <[email protected]>
  • Loading branch information
younesbelkada and ArthurZucker authored Oct 18, 2023
1 parent dfe9ddd commit 21ffe37
Showing 1 changed file with 1 addition and 4 deletions.
5 changes: 1 addition & 4 deletions src/transformers/models/falcon/modeling_falcon.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,10 +614,7 @@ def forward(
input_dtype = query_layer.dtype
if input_dtype == torch.float32:
# Handle the case where the model is quantized
if hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.query_key_value.weight.dtype
target_dtype = getattr(self.config, "_pre_quantization_dtype", self.query_key_value.weight.dtype)

logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
Expand Down

0 comments on commit 21ffe37

Please sign in to comment.