Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into tool-call
Browse files Browse the repository at this point in the history
  • Loading branch information
ochafik committed Oct 23, 2024
2 parents 2b49440 + 873279b commit 5f4aef1
Show file tree
Hide file tree
Showing 5 changed files with 185 additions and 179 deletions.
1 change: 1 addition & 0 deletions convert_hf_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2865,6 +2865,7 @@ def set_vocab(self):
self.gguf_writer.add_token_types(toktypes)
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False)
special_vocab.chat_template = "rwkv-world"
# hack: Add '\n\n' as the EOT token to make it chat normally
special_vocab._set_special_token("eot", 261)
special_vocab.add_to_gguf(self.gguf_writer)

Expand Down
3 changes: 3 additions & 0 deletions convert_lora_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,9 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]:
if ".base_layer.weight" in name:
continue
logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor")
if ".embed_tokens.weight" in name or ".lm_head.weight" in name:
logger.error("Embeddings is present in the adapter. This can be due to new tokens added during fine tuning")
logger.error("Hint: if you are using TRL, make sure not to call setup_chat_format()")
sys.exit(1)

if base_name in tensor_map:
Expand Down
6 changes: 3 additions & 3 deletions flake.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 5f4aef1

Please sign in to comment.