Skip to content

Commit

Permalink
style
Browse files Browse the repository at this point in the history
  • Loading branch information
younesbelkada committed Apr 18, 2024
1 parent 784c377 commit 6a6c001
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/transformers/integrations/awq.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,9 @@ def fuse_awq_modules(model, quantization_config):
# `None` attention mask to the fused attention modules as now the attention mask is dropped by our models and dealt
# by the `AttentionMaskConverter` module.
if len(fused_attention_modules) > 0:
fused_attention_parent_modules = set(
fused_attention_parent_modules = {
fused_attention_module.split(".")[0] for fused_attention_module in fused_attention_modules
)
}
for module_name, module in model.named_modules():
if any(
module_name in fused_attention_parent_module
Expand Down

0 comments on commit 6a6c001

Please sign in to comment.