Skip to content

Commit

Permalink
[GPTQ] Fix test (huggingface#28018)
Browse files Browse the repository at this point in the history
* fix test

* reduce length

* smaller model
  • Loading branch information
SunMarc authored and wgifford committed Jan 21, 2024
1 parent 5151ea2 commit 9837d61
Showing 1 changed file with 13 additions and 14 deletions.
27 changes: 13 additions & 14 deletions tests/quantization/gptq/test_gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,9 @@ def test_serialization(self):
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if not self.use_exllama:
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname).to(0)
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4)
).to(0)
self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old")
else:
# we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel
Expand All @@ -242,12 +244,11 @@ def test_change_loading_attributes(self):
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if not self.use_exllama:
self.assertEqual(self.quantized_model.config.quantization_config.use_exllama, False)
self.check_quantized_layers_type(self.quantized_model, "cuda-old")
# we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0}
)
self.assertEqual(quantized_model_from_saved.config.quantization_config.use_exllama, True)
self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits)
self.check_quantized_layers_type(quantized_model_from_saved, "exllama")
self.check_inference_correctness(quantized_model_from_saved)
Expand Down Expand Up @@ -279,10 +280,10 @@ class GPTQTestActOrderExllama(unittest.TestCase):
"""

EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year")
model_name = "hf-internal-testing/Llama-2-7B-GPTQ"
revision = "gptq-4bit-128g-actorder_True"
input_text = "Hello my name is"
EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.")
# 4bit + act_order + 128g
model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ"
input_text = "Hello, how are you ?"

@classmethod
def setUpClass(cls):
Expand All @@ -292,7 +293,6 @@ def setUpClass(cls):
cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
revision=cls.revision,
torch_dtype=torch.float16,
device_map={"": 0},
quantization_config=cls.quantization_config,
Expand Down Expand Up @@ -336,7 +336,7 @@ def test_max_input_length(self):
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
self.assertTrue("temp_state buffer is too small" in str(cm.exception))

prompt = "I am in Paris and" * 500
prompt = "I am in Paris and"
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] < 4028)
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
Expand All @@ -355,10 +355,10 @@ class GPTQTestExllamaV2(unittest.TestCase):
"""

EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is Katie and I am a 20 year")
model_name = "hf-internal-testing/Llama-2-7B-GPTQ"
revision = "gptq-4bit-128g-actorder_True"
input_text = "Hello my name is"
EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.")
# 4bit + act_order + 128g
model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ"
input_text = "Hello, how are you ?"

@classmethod
def setUpClass(cls):
Expand All @@ -368,7 +368,6 @@ def setUpClass(cls):
cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2})
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
revision=cls.revision,
torch_dtype=torch.float16,
device_map={"": 0},
quantization_config=cls.quantization_config,
Expand Down

0 comments on commit 9837d61

Please sign in to comment.