Skip to content

Commit

Permalink
tts : fix tensor shapes
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Dec 16, 2024
1 parent 609f17d commit 1766241
Show file tree
Hide file tree
Showing 5 changed files with 100 additions and 71 deletions.
4 changes: 4 additions & 0 deletions convert_hf_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,8 @@ def prepare_tensors(self):
gguf.MODEL_TENSOR.TIME_MIX_W2,
gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1,
gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2,
gguf.MODEL_TENSOR.POS_NET_NORM1,
gguf.MODEL_TENSOR.POS_NET_NORM2,
)
)
or not new_name.endswith(".weight")
Expand Down Expand Up @@ -2060,6 +2062,8 @@ def set_gguf_parameters(self):
self.gguf_writer.add_posnet_length (self.hparams["n_embd_posnet"])
self.gguf_writer.add_convnext_length (self.hparams["n_embd_convnext"])
self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"])
self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"])
self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"])


@Model.register("Qwen2MoeForCausalLM")
Expand Down
9 changes: 9 additions & 0 deletions examples/tts/convert_pt_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,13 @@ def flatten_state_dict(state_dict, parent_key='', sep='.'):
if new_key.endswith("gamma"):
new_key = new_key.replace("gamma", "gamma.weight")

# convert from 1D [768] to 2D [768, 1] so that ggml_add can broadcast the bias
if (new_key.endswith("norm.weight") or new_key.endswith("norm1.weight") or new_key.endswith("norm2.weight") or new_key.endswith(".bias")) and (new_key.startswith("backbone.pos_net") or new_key.startswith("backbone.embed.bias")):
value = value.unsqueeze(1)

if new_key.endswith("dwconv.bias"):
value = value.unsqueeze(1)

size_mb = value.element_size() * value.nelement() / (1024 * 1024)
print(f"{size_mb:8.2f} MB - {new_key}: {value.shape}")

Expand Down Expand Up @@ -154,6 +161,8 @@ def flatten_state_dict(state_dict, parent_key='', sep='.'):
"vocab_size": 4096,
"n_head": 1,
"layer_norm_epsilon": 1e-6,
"group_norm_epsilon": 1e-6,
"group_norm_groups": 32,
"max_position_embeddings": 8192, # ?
"num_hidden_layers": 12
}
Expand Down
2 changes: 2 additions & 0 deletions gguf-py/gguf/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,8 @@ class Attention:
VALUE_LENGTH = "{arch}.attention.value_length"
LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
GROUPNORM_EPS = "{arch}.attention.group_norm_epsilon"
GROUPNORM_GROUPS = "{arch}.attention.group_norm_groups"
CAUSAL = "{arch}.attention.causal"
Q_LORA_RANK = "{arch}.attention.q_lora_rank"
KV_LORA_RANK = "{arch}.attention.kv_lora_rank"
Expand Down
6 changes: 6 additions & 0 deletions gguf-py/gguf/gguf_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,12 @@ def add_layer_norm_eps(self, value: float) -> None:
def add_layer_norm_rms_eps(self, value: float) -> None:
self.add_float32(Keys.Attention.LAYERNORM_RMS_EPS.format(arch=self.arch), value)

def add_group_norm_eps(self, value: float) -> None:
self.add_float32(Keys.Attention.GROUPNORM_EPS.format(arch=self.arch), value)

def add_group_norm_groups(self, value: int) -> None:
self.add_uint32(Keys.Attention.GROUPNORM_GROUPS.format(arch=self.arch), value)

def add_causal_attention(self, value: bool) -> None:
self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value)

Expand Down
Loading

0 comments on commit 1766241

Please sign in to comment.