From 57dee04e162532bcb0c8c2e7167a9f3bdff2f7be Mon Sep 17 00:00:00 2001 From: Gabe Goodhart Date: Tue, 19 Nov 2024 16:14:03 -0700 Subject: [PATCH] Download fix (#1366) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: allow multiple weight mapping files for mistral Downloading a Mistral model fails because it includes multiple weight mapping files. The regression was introduced in commit `766bee9f4a1fcb187fae543a525495d3ff482097`. I'm unclear on the original intent, but perhaps the exception was meant to apply only to Granite models. This isn’t an ideal fix, but it does enable Mistral to be downloaded and used for chat. Signed-off-by: Sébastien Han * fix(download): Fix safetensors/bin/pth download logic The previous logic didn't handle .bin files, so if a model (like mistral) has both .bin and .safetensors, it would download both. Branch: download-fix Signed-off-by: Gabe Goodhart * fix(convert hf): Better logic to handle multiple weight mapping files This will not actually be needed for mistral with the fix in download to handle .bin files, but it may be needed for other models, so it's worth having. Branch: download-fix Signed-off-by: Gabe Goodhart --------- Signed-off-by: Sébastien Han Signed-off-by: Gabe Goodhart Co-authored-by: Sébastien Han --- torchchat/cli/convert_hf_checkpoint.py | 35 ++++++++++++++++++-------- torchchat/cli/download.py | 9 ++++--- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/torchchat/cli/convert_hf_checkpoint.py b/torchchat/cli/convert_hf_checkpoint.py index f428e4cc6f..122ab0f282 100644 --- a/torchchat/cli/convert_hf_checkpoint.py +++ b/torchchat/cli/convert_hf_checkpoint.py @@ -39,19 +39,14 @@ def convert_hf_checkpoint( config = TransformerArgs.from_params(config_args) print(f"Model config {config.__dict__}") - # Load the json file containing weight mapping + # Find all candidate weight mapping index files model_map_json_matches = [Path(m) for m in glob.glob(str(model_dir / "*.index.json"))] - assert len(model_map_json_matches) <= 1, "Found multiple weight mapping files" - if len(model_map_json_matches): - model_map_json = model_map_json_matches[0] - else: - model_map_json = model_dir / "pytorch_model.bin.index.json" # If there is no weight mapping, check for a consolidated model and # tokenizer we can move. Llama 2 and Mistral have weight mappings, while # Llama 3 has a consolidated model and tokenizer. # Otherwise raise an error. - if not model_map_json.is_file(): + if not model_map_json_matches: consolidated_pth = model_dir / "original" / "consolidated.00.pth" tokenizer_pth = model_dir / "original" / "tokenizer.model" if consolidated_pth.is_file() and tokenizer_pth.is_file(): @@ -68,11 +63,30 @@ def convert_hf_checkpoint( return else: raise RuntimeError( - f"Could not find {model_map_json} or {consolidated_pth} plus {tokenizer_pth}" + f"Could not find a valid model weight map or {consolidated_pth} plus {tokenizer_pth}" ) - with open(model_map_json) as json_map: - bin_index = json.load(json_map) + # Load the json file(s) containing weight mapping + # + # NOTE: If there are multiple index files, there are two possibilities: + # 1. The files could be mapped to different weight format files (e.g. .bin + # vs .safetensors) + # 2. The files could be split subsets of the mappings that need to be + # merged + # + # In either case, we can simply keep the mappings where the target file is + # valid in the model dir. + bin_index = {} + for weight_map_file in model_map_json_matches: + with open(weight_map_file, "r") as handle: + weight_map = json.load(handle) + valid_mappings = { + k: model_dir / v + for (k, v) in weight_map.get("weight_map", {}).items() + if (model_dir / v).is_file() + } + bin_index.update(valid_mappings) + bin_files = set(bin_index.values()) weight_map = { "model.embed_tokens.weight": "tok_embeddings.weight", @@ -96,7 +110,6 @@ def convert_hf_checkpoint( "model.norm.weight": "norm.weight", "lm_head.weight": "output.weight", } - bin_files = {model_dir / bin for bin in bin_index["weight_map"].values()} def permute(w, n_heads): return ( diff --git a/torchchat/cli/download.py b/torchchat/cli/download.py index f334eb5558..4da2bc3907 100644 --- a/torchchat/cli/download.py +++ b/torchchat/cli/download.py @@ -35,11 +35,12 @@ def _download_hf_snapshot( model_info = model_info(model_config.distribution_path, token=hf_token) model_fnames = [f.rfilename for f in model_info.siblings] - # Check the model config for preference between safetensors and pth + # Check the model config for preference between safetensors and pth/bin has_pth = any(f.endswith(".pth") for f in model_fnames) + has_bin = any(f.endswith(".bin") for f in model_fnames) has_safetensors = any(f.endswith(".safetensors") for f in model_fnames) - # If told to prefer safetensors, ignore pth files + # If told to prefer safetensors, ignore pth/bin files if model_config.prefer_safetensors: if not has_safetensors: print( @@ -47,10 +48,10 @@ def _download_hf_snapshot( file=sys.stderr, ) exit(1) - ignore_patterns = "*.pth" + ignore_patterns = ["*.pth", "*.bin"] # If the model has both, prefer pth files over safetensors - elif has_pth and has_safetensors: + elif (has_pth or has_bin) and has_safetensors: ignore_patterns = "*safetensors*" # Otherwise, download everything