-
Notifications
You must be signed in to change notification settings - Fork 866
88 lines (86 loc) · 3.23 KB
/
benchmark_nightly_lmi.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
name: Benchmark LMI torchserve nightly
on:
# run every day at 2:15am
# schedule:
# - cron: '15 02 * * *'
push:
branches:
- benchmark_update
jobs:
nightly:
strategy:
fail-fast: false
matrix:
hardware: [lmi] #[cpu, gpu, inf2, lmi]
runs-on:
- self-hosted
- ${{ matrix.hardware }}
timeout-minutes: 1320
steps:
- name: Clean up previous run
run: |
echo "Cleaning up previous run"
cd $RUNNER_WORKSPACE
pwd
cd ..
pwd
rm -rf _tool
- name: Setup Python 3.9
uses: actions/setup-python@v5
with:
python-version: 3.9
architecture: x64
- name: Setup Java 17
uses: actions/setup-java@v3
with:
distribution: 'zulu'
java-version: '17'
- name: Checkout TorchServe
uses: actions/checkout@v3
with:
submodules: recursive
- name: Download weights from s3
run: |
pip install awscli
aws s3 cp s3://torchserve/mar_files/llama-2/Llama-2-7b-chat-hf/ /home/ubuntu/Llama-2-7b-chat-hf --recursive
- name: Install dependencies
run: |
sudo apt-get update -y
sudo apt-get install -y apache2-utils
pip install sentencepiece
pip install -r benchmarks/requirements-ab.txt
- name: Benchmark lmi nightly
run: python benchmarks/auto_benchmark.py --input benchmarks/benchmark_config_lmi.yaml --skip false
- name: Clean up weights
run: |
rm -rf /home/ubuntu/Llama-2-7b-chat-hf
# - name: Save benchmark artifacts
# uses: actions/upload-artifact@v2
# with:
# name: nightly ${{ matrix.hardware }} artifact
# path: /tmp/ts_benchmark
# - name: Download benchmark artifacts for auto validation
# uses: dawidd6/action-download-artifact@v2
# with:
# workflow: ${{ github.event.workflow_run.workflow_id }}
# workflow_conclusion: success
# if_no_artifact_found: ignore
# path: /tmp/ts_artifacts
# name: ${{ matrix.hardware }}_benchmark_validation
# - name: Validate Benchmark result
# run: python benchmarks/validate_report.py --input-artifacts-dir /tmp/ts_artifacts/${{ matrix.hardware }}_benchmark_validation
# - name: Update benchmark artifacts for auto validation
# run: python benchmarks/utils/update_artifacts.py --output /tmp/ts_artifacts/${{ matrix.hardware }}_benchmark_validation
# - name: Upload the updated benchmark artifacts for auto validation
# uses: actions/upload-artifact@v2
# with:
# name: ${{ matrix.hardware }}_benchmark_validation
# path: /tmp/ts_artifacts
# - name: Open issue on failure
# if: ${{ failure() && github.event_name == 'schedule' && matrix.hardware == 'cpu' }}
# uses: dacbd/create-issue-action@v1
# with:
# token: ${{ secrets.GITHUB_TOKEN }}
# title: Nightly ${{ matrix.hardware }} benchmark failed
# body: Commit ${{ github.sha }} daily scheduled [CI run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) failed, please check why
# assignees: ''