diff --git a/neural_speed/convert/convert_quantized_gptj.py b/neural_speed/convert/convert_quantized_gptj.py index 0a060c9bb..aca52a517 100644 --- a/neural_speed/convert/convert_quantized_gptj.py +++ b/neural_speed/convert/convert_quantized_gptj.py @@ -66,6 +66,10 @@ def convert_q4_bestla_tensor(src_name, dst_name, model, fout, q_config): int_weight = (int_weight - 8) * 16 gptq_scales = gptq_scales / 16 gptq_zeros = (gptq_zeros - 8) * 16 + elif q_config['bits'] == 3: + int_weight = (int_weight - 4) * 32 + gptq_scales = gptq_scales / 32 + gptq_zeros = (gptq_zeros - 4) * 32 dst = np.zeros((int_weight.shape[0], int_weight.shape[1] * 4), dtype=np.int8) int_weight = np.ascontiguousarray(int_weight.numpy()) gptq_scales = np.ascontiguousarray((gptq_scales.float()).numpy())