From 6a6c0017c593923aaacf116fbcca32c6420b179b Mon Sep 17 00:00:00 2001 From: Younes Belkada Date: Thu, 18 Apr 2024 14:49:21 +0200 Subject: [PATCH] style --- src/transformers/integrations/awq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/integrations/awq.py b/src/transformers/integrations/awq.py index d625502708b88a..72c72e3c94bec0 100644 --- a/src/transformers/integrations/awq.py +++ b/src/transformers/integrations/awq.py @@ -254,9 +254,9 @@ def fuse_awq_modules(model, quantization_config): # `None` attention mask to the fused attention modules as now the attention mask is dropped by our models and dealt # by the `AttentionMaskConverter` module. if len(fused_attention_modules) > 0: - fused_attention_parent_modules = set( + fused_attention_parent_modules = { fused_attention_module.split(".")[0] for fused_attention_module in fused_attention_modules - ) + } for module_name, module in model.named_modules(): if any( module_name in fused_attention_parent_module