Skip to content

Commit

Permalink
convert : partially revert PR ggerganov#4818 (ggerganov#5041)
Browse files Browse the repository at this point in the history
  • Loading branch information
cebtenzzre authored and jordankanter committed Feb 3, 2024
1 parent e9a3308 commit b870974
Show file tree
Hide file tree
Showing 6 changed files with 241 additions and 433 deletions.
9 changes: 5 additions & 4 deletions convert-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import sys
from enum import IntEnum
from pathlib import Path
from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast, Optional
from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast

import numpy as np
import torch
Expand Down Expand Up @@ -487,7 +487,8 @@ def write_tensors(self):
# map tensor names
if "scales" in name:
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales"))
new_name = new_name.replace("scales", "act.scales")
if new_name is not None:
new_name = new_name.replace("scales", "act.scales")
else:
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
if new_name is None:
Expand Down Expand Up @@ -904,7 +905,7 @@ def token_bytes_to_string(b):
return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])

@staticmethod
def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: Optional[int] = None) -> list[bytes]:
def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
parts = [bytes([b]) for b in token]
while True:
min_idx = None
Expand Down Expand Up @@ -1285,7 +1286,7 @@ def main() -> None:

if args.awq_path:
sys.path.insert(1, str(Path(__file__).parent / 'awq-py'))
from awq.apply_awq import add_scale_weights
from awq.apply_awq import add_scale_weights # type: ignore[import-not-found]
tmp_model_path = args.model / "weighted_model"
dir_model = tmp_model_path
if tmp_model_path.is_dir():
Expand Down
14 changes: 5 additions & 9 deletions convert-llama-ggml-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
from __future__ import annotations

import argparse
import os
import struct
import sys
from enum import IntEnum
from pathlib import Path

import numpy as np

import os
if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
Expand Down Expand Up @@ -371,15 +371,11 @@ def handle_metadata(cfg, hp):
params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path)
else:
raise ValueError('Unable to load metadata')
vocab = convert.load_vocab(
cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir,
cfg.vocabtype)
# FIXME: Respect cfg.vocab_dir?
svocab = gguf.SpecialVocab(cfg.model_metadata_dir,
load_merges = cfg.vocabtype == 'bpe',
n_vocab = vocab.vocab_size)
vocab_path = Path(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir)
vocab_factory = convert.VocabFactory(vocab_path)
vocab, special_vocab = vocab_factory.load_vocab(cfg.vocabtype, cfg.model_metadata_dir)
convert.check_vocab_size(params, vocab)
return (params, vocab, svocab)
return params, vocab, special_vocab


def handle_args():
Expand Down
3 changes: 1 addition & 2 deletions convert-lora-to-ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,16 @@
import os
import struct
import sys
from pathlib import Path
from typing import Any, BinaryIO, Sequence

import numpy as np
import torch

from pathlib import Path
if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
import gguf


NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1}


Expand Down
10 changes: 6 additions & 4 deletions convert-persimmon-to-gguf.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
#!/usr/bin/env python3
import torch
import argparse
import os
from pprint import pprint
import sys
import argparse
from pathlib import Path
from pprint import pprint

import torch
from sentencepiece import SentencePieceProcessor

if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
Expand Down Expand Up @@ -69,7 +71,7 @@ def main():
persimmon_model = torch.load(args.ckpt_path)
hparams = persimmon_model['args']
pprint(hparams)
tensors = {}
tensors: dict[str, torch.Tensor] = {}
_flatten_dict(persimmon_model['model'], tensors, None)

arch = gguf.MODEL_ARCH.PERSIMMON
Expand Down
Loading

0 comments on commit b870974

Please sign in to comment.