Skip to content

Workflow for AutoTP

Workflow for AutoTP #9

Workflow file for this run

name: autotp
on:
pull_request:
paths:
- '.github/workflows/autotp.yml'
- 'requirements/**'
- 'deepspeed/__init__.py'
- 'deepspeed/module_inject/**'
- '!deepspeed/module_inject/containers/**' # exclude container dir
workflow_dispatch:
merge_group:
branches: [ master ]
schedule:
- cron: "0 0 * * 0"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
unit-tests:
runs-on: [self-hosted, cpu]
steps:
- uses: actions/checkout@v3
- id: setup-venv
uses: ./.github/workflows/setup-venv
- name: Install gcc-9
run: |
sudo add-apt-repository -u ppa:ubuntu-toolchain-r/test
sudo apt install -y gcc-9 g++-9
# set gcc-9 and g++9 to default
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 99
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-9 99
- name: Check gcc version
run: |
# Get gcc version
gcc --version
g++ --version
- name: Detect instruction sets on instance
run: |
lscpu
cat /proc/meminfo
pip install cmake
git clone https://github.com/intel/intel-extension-for-pytorch
cd intel-extension-for-pytorch/tests/cpu/isa
cmake .
make
./cpu_features
- name: Install numactl
run: |
sudo apt-get install -y numactl
- name: Install oneCCL Bindings for PyTorch
run: |
pip install torch
python -m pip install intel_extension_for_pytorch
# the curl line is for troubleshooting
curl -L https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
python -m pip install oneccl_bind_pt --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
pip install py-cpuinfo
# check installed version
pip list |grep \\\<torch\\\>
pip list |grep intel-extension-for-pytorch
pip list |grep oneccl-bind-pt
- name: Install oneCCL
run: |
git clone https://github.com/oneapi-src/oneCCL
cd oneCCL
mkdir build
cd build
cmake ..
make -j install
- name: Install transformers
run: |
git clone https://github.com/huggingface/transformers
cd transformers
git rev-parse --short HEAD
pip install .
- name: Install deepspeed
run: |
python -c "import torch;import intel_extension_for_pytorch as ipex;print(ipex._C._has_xpu())"
# check why the host does not have AVX2 support
pip install .[dev,1bit,autotuning,inf]
ds_report
- name: Python environment check
run: |
pip list
- name: Download DeepSpeedExamples
run: |
#git clone https://github.com/delock/DeepSpeedExamples --branch gma/hf_compare
git clone https://github.com/foin6/DeepSpeedExamples --branch dev
- name: Sanity check minimal
run: |
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6
source oneCCL/build/_install/env/setvars.sh
python -c "import torch;import intel_extension_for_pytorch as ipex;print(ipex._C._has_xpu())"
- name: AutoTP test (facebook/opt-1.3b)
run: |
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6
source oneCCL/build/_install/env/setvars.sh
# modify MODEL to change the model name, other lines are the same
export MODEL=facebook/opt-1.3b
cd DeepSpeedExamples/inference/huggingface/text-generation
deepspeed --num_gpus 2 --bind_cores_to_rank inference-test.py --model $MODEL --dtype bfloat16 --use_meta_tensor
deepspeed --num_gpus 2 --bind_cores_to_rank ds-hf-compare.py --model $MODEL --dtype bfloat16 --num_inputs 1 --use_kernel False
- name: AutoTP test (bigscience/bloom-3b)
run: |
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6
source oneCCL/build/_install/env/setvars.sh
# modify MODEL to change the model name, other lines are the same
export MODEL=bigscience/bloom-3b
cd DeepSpeedExamples/inference/huggingface/text-generation
deepspeed --num_gpus 2 --bind_cores_to_rank inference-test.py --model $MODEL --dtype bfloat16 --use_meta_tensor
deepspeed --num_gpus 2 --bind_cores_to_rank ds-hf-compare.py --model $MODEL --dtype bfloat16 --num_inputs 1 --use_kernel False