Skip to content

Commit

Permalink
Fix typo in EETQ Tests (#35160)
Browse files Browse the repository at this point in the history
fix
  • Loading branch information
MekkCyber authored Dec 9, 2024
1 parent de8a0b7 commit 7238387
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions tests/quantization/eetq_integration/test_eetq.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def test_quantized_model_conversion(self):

self.assertEqual(nb_linears - 1, nb_eetq_linear)

# Try with `linear_weights_not_to_quantize`
# Try with `modules_to_not_convert`
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = EetqConfig(modules_to_not_convert=["fc1"])
Expand All @@ -128,7 +128,7 @@ def test_quantized_model_conversion(self):
for module in model.modules():
if isinstance(module, EetqLinear):
nb_eetq_linear += 1

# 25 corresponds to the lm_head along with 24 fc1 layers.
self.assertEqual(nb_linears - 25, nb_eetq_linear)

def test_quantized_model(self):
Expand Down

0 comments on commit 7238387

Please sign in to comment.