Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
dakinggg authored Sep 21, 2023
2 parents 507b4df + 299e737 commit 9853135
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 8 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pytest-cpu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
set -ex
export PATH=/composer-python:$PATH
python -m pip install --upgrade 'pip<23' wheel
python -m pip install --upgrade .[dev]
python -m pip install --upgrade .[all-cpu]
- name: Run Tests
id: tests
run: |
Expand Down
2 changes: 1 addition & 1 deletion scripts/eval/yamls/hf_8bit_eval.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ models:
model:
name: hf_causal_lm
pretrained_model_name_or_path: ${model_name_or_path}
init_device: cpu
init_device: mixed
pretrained: true
load_in_8bit: true
tokenizer:
Expand Down
2 changes: 1 addition & 1 deletion scripts/eval/yamls/hf_eval.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ models:
model:
name: hf_causal_lm
pretrained_model_name_or_path: ${model_name_or_path}
init_device: cpu
init_device: mixed
pretrained: true
tokenizer:
name: ${model_name_or_path}
Expand Down
6 changes: 4 additions & 2 deletions scripts/eval/yamls/hf_lora_eval.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ precision: amp_fp16
# If you are using one model, put it here:
model_name_or_path: EleutherAI/gpt-neo-125m
# If you are using a seperated lora weight, put it here:
lora_id_or_path: nathan0/lora-gpt-neo-125m-alpaca
# lora weights must be compatible with the specified model
lora_id_or_path: edbeeching/gpt-neo-125M-imdb-lora # Example lora weights for gpt-neo-125m

# otherwise, write a block for each model you want to test in the `models` section

models:
Expand All @@ -14,7 +16,7 @@ models:
model:
name: hf_causal_lm
pretrained_model_name_or_path: ${model_name_or_path}
init_device: cpu
init_device: mixed
pretrained: true
pretrained_lora_id_or_path: ${lora_id_or_path}
tokenizer:
Expand Down
4 changes: 2 additions & 2 deletions scripts/eval/yamls/mpt_eval.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ tokenizer_name: EleutherAI/gpt-neox-20b
seed: 1
precision: amp_fp16


models:
-
model_name: mpt_test
Expand All @@ -14,7 +13,8 @@ models:
model_max_length: ${max_seq_len}
model:
name: mpt_causal_lm
init_device: meta
init_device: mixed
# Set the below model parameters to match the checkpoint specified with load_path
d_model: 768
n_heads: 12
n_layers: 12
Expand Down
6 changes: 5 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
]

install_requires = [
'mosaicml[libcloud,wandb,mlflow]>=0.16.1,<0.17',
'mosaicml[libcloud,wandb,mlflow,oci,gcs]>=0.16.1,<0.17',
'accelerate>=0.20,<0.21', # for HF inference `device_map`
'transformers>=4.33,<4.34',
'mosaicml-streaming>=0.6,<0.7',
Expand All @@ -64,6 +64,8 @@
'cmake>=3.25.0,<=3.26.3', # required for triton-pre-mlir below
# PyPI does not support direct dependencies, so we remove this line before uploading from PyPI
'triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir_sm90#subdirectory=python',
'boto3>=1.21.45,<2',
'huggingface-hub>=0.17.0,<1.0',
]

extra_deps = {}
Expand Down Expand Up @@ -103,6 +105,8 @@
'openai==0.27.8',
'tiktoken==0.4.0',
]
extra_deps['all-cpu'] = set(
dep for key, deps in extra_deps.items() for dep in deps if 'gpu' not in key)
extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)

setup(
Expand Down

0 comments on commit 9853135

Please sign in to comment.