Skip to content
This repository has been archived by the owner on Aug 30, 2024. It is now read-only.

Commit

Permalink
fixed the dtype issue
Browse files Browse the repository at this point in the history
  • Loading branch information
Zhenzhong1 committed Jan 31, 2024
1 parent 8bb3be1 commit b812553
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion neural_speed/convert/convert_quantized_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,13 @@ def convert_q4_bestla_tensor(src_name, dst_name, model, fout, q_config, n_head,
shape = int_weight.shape
write_header(fout, shape[::-1], dst_name, GGML_QJBLAS_TYPE)

weight_dtype = "int8"
if q_config['bits'] == 4:
int_weight = (int_weight - 8) * 16
gptq_scales = gptq_scales / 16
gptq_zeros = (gptq_zeros - 8) * 16
weight_dtype == "int4"

dst = np.zeros((int_weight.shape[0], int_weight.shape[1] * 4), dtype=np.int8)
int_weight = np.ascontiguousarray(int_weight.numpy())
gptq_scales = np.ascontiguousarray((gptq_scales.float()).numpy())
Expand All @@ -84,7 +87,7 @@ def convert_q4_bestla_tensor(src_name, dst_name, model, fout, q_config, n_head,

# pack int weight in bestla format
byte_size = cpp_model.Model.np_bestla_qpack(int_weight, gptq_scales, gptq_zeros, g_idx, dst,
weight_dtype="int4" if q_config['bits'] == 4 else "int8",
weight_dtype=weight_dtype,
group_size=q_config['group_size'],
alg="sym" if q_config['sym'] else "asym",
compute_dtype="int8")
Expand Down

0 comments on commit b812553

Please sign in to comment.