Skip to content

Commit

Permalink
Merge pull request #16 from facebookexperimental/update-notify
Browse files Browse the repository at this point in the history
Update workflows
  • Loading branch information
mpage authored Jan 11, 2025
2 parents 5f53761 + e41e774 commit ab2cce4
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 17 deletions.
39 changes: 30 additions & 9 deletions .github/workflows/_benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ on:
description: free threading
type: boolean
default: false
clang:
description: build with latest clang
type: boolean
default: false
workflow_dispatch:
inputs:
fork:
Expand All @@ -62,9 +66,6 @@ on:
benchmarks:
description: Benchmarks to run (comma-separated; empty runs all benchmarks)
type: string
pgo:
description: Build with PGO
type: boolean
force:
description: Rerun and replace results if commit already exists
type: boolean
Expand All @@ -84,6 +85,10 @@ on:
description: free threading
type: boolean
default: false
clang:
description: build with latest clang
type: boolean
default: false
jobs:
benchmark-linux-x86_64-linux:
runs-on: [self-hosted, linux, bare-metal, linux-x86_64-linux]
Expand Down Expand Up @@ -122,7 +127,7 @@ jobs:
if: ${{ always() }}
id: should_run
run: |
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} >> $GITHUB_OUTPUT
- name: Checkout python-macrobenchmarks
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
Expand All @@ -139,12 +144,19 @@ jobs:
repository: mdboom/pyperformance
path: pyperformance
ref: ${{ env.PYPERFORMANCE_HASH }}
- name: Build with clang
if: ${{ inputs.clang }}
run: |
echo "CC=`which clang-19`" >> $GITHUB_ENV
echo "LLVM_AR=`which llvm-ar-19`" >> $GITHUB_ENV
echo "LLVM_PROFDATA=`which llvm-profdata-19`" >> $GITHUB_ENV
- name: Build Python
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
cd cpython
./configure ${{ inputs.pgo == true && '--enable-optimizations --with-lto=yes' || '' }} ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }}
make ${{ runner.arch == 'ARM64' && '-j' || '-j4' }}
./python -VV
- name: Install pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
Expand All @@ -161,7 +173,7 @@ jobs:
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
rm -rf ~/.debug/*
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }}
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} --run_id ${{ github.run_id }}
# Pull again, since another job may have committed results in the meantime
- name: Pull benchmarking
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
Expand Down Expand Up @@ -195,7 +207,8 @@ jobs:
if: ${{ (inputs.machine == 'linux-x86_64-linux' || inputs.machine == 'all') }}
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang
== true && 'clang' || '' }}
benchmark-linux-x86_64-vultr:
runs-on: [self-hosted, linux, bare-metal, linux-x86_64-vultr]
timeout-minutes: 1440
Expand Down Expand Up @@ -233,7 +246,7 @@ jobs:
if: ${{ always() }}
id: should_run
run: |
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} >> $GITHUB_OUTPUT
- name: Checkout python-macrobenchmarks
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
Expand All @@ -250,12 +263,19 @@ jobs:
repository: mdboom/pyperformance
path: pyperformance
ref: ${{ env.PYPERFORMANCE_HASH }}
- name: Build with clang
if: ${{ inputs.clang }}
run: |
echo "CC=`which clang-19`" >> $GITHUB_ENV
echo "LLVM_AR=`which llvm-ar-19`" >> $GITHUB_ENV
echo "LLVM_PROFDATA=`which llvm-profdata-19`" >> $GITHUB_ENV
- name: Build Python
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
cd cpython
./configure ${{ inputs.pgo == true && '--enable-optimizations --with-lto=yes' || '' }} ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }}
make ${{ runner.arch == 'ARM64' && '-j' || '-j4' }}
./python -VV
- name: Install pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
Expand All @@ -272,7 +292,7 @@ jobs:
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
rm -rf ~/.debug/*
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }}
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} --run_id ${{ github.run_id }}
# Pull again, since another job may have committed results in the meantime
- name: Pull benchmarking
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }}
Expand Down Expand Up @@ -306,4 +326,5 @@ jobs:
if: ${{ (inputs.machine == 'linux-x86_64-vultr' || inputs.machine == 'all') }}
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang
== true && 'clang' || '' }}
10 changes: 8 additions & 2 deletions .github/workflows/_notify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ on:
description: free threading
type: boolean
default: false
clang:
description: build with latest clang
type: boolean
default: false
jobs:
notify:
runs-on: ubuntu-latest
Expand All @@ -52,9 +56,11 @@ jobs:
run: python -m bench_runner notify --fork ${{ inputs.fork }} --ref ${{ inputs.ref
}} --head ${{ inputs.head }} --date ${{ inputs.date }} --version ${{ inputs.version
}} --flags ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit ==
true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang
== true && 'clang' || '' }}
env:
GH_TOKEN: ${{ github.token }}
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang
== true && 'clang' || '' }}
15 changes: 12 additions & 3 deletions .github/workflows/_pystats.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ on:
description: free threading
type: boolean
default: false
clang:
description: build with latest clang
type: boolean
default: false
workflow_call:
inputs:
fork:
Expand Down Expand Up @@ -70,6 +74,10 @@ on:
description: free threading
type: boolean
default: false
clang:
description: build with latest clang
type: boolean
default: false
jobs:
collect-stats:
runs-on: [self-hosted, linux, cloud]
Expand Down Expand Up @@ -100,7 +108,7 @@ jobs:
if: ${{ always() }}
id: should_run
run: |
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} all true ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} all true ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} >> $GITHUB_OUTPUT
- name: Checkout python-macrobenchmarks
uses: actions/checkout@v4
if: ${{ steps.should_run.outputs.should_run != 'false' }}
Expand Down Expand Up @@ -137,7 +145,7 @@ jobs:
- name: Running pyperformance
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
venv/bin/python -m bench_runner run_benchmarks pystats cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }} ${{ inputs.individual == true && '--individual' || '' }}
venv/bin/python -m bench_runner run_benchmarks pystats cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} --run_id ${{ github.run_id }} ${{ inputs.individual == true && '--individual' || '' }}
- name: Pull benchmarking
if: ${{ steps.should_run.outputs.should_run != 'false' }}
run: |
Expand All @@ -153,4 +161,5 @@ jobs:
add: results
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang
== true && 'clang' || '' }}
16 changes: 13 additions & 3 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ env:
name: benchmark
run-name: benchmarking ${{ inputs.fork }}/${{ inputs.ref }} ${{ inputs.tier2 == true
&& 'T2' || '' }} ${{ inputs.jit == true && 'JIT' || '' }} ${{ inputs.nogil == true
&& 'NOGIL' || '' }}
&& 'NOGIL' || '' }} ${{ inputs.clang == true && 'CLANG' || '' }}
on:
workflow_dispatch:
inputs:
Expand Down Expand Up @@ -48,6 +48,10 @@ on:
description: free threading
type: boolean
default: false
clang:
description: build with latest clang
type: boolean
default: false
jobs:
# Determine the base commit of the selected commit. The output is passed to
# the `base` job below. If the data already exists for this commit, it will be
Expand Down Expand Up @@ -84,12 +88,13 @@ jobs:
- name: Determine base
id: base
run: |
python -m bench_runner get_merge_base ${{ inputs.benchmark_base }} ${{ inputs.machine }} ${{ inputs.pystats }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT
python -m bench_runner get_merge_base ${{ inputs.benchmark_base }} ${{ inputs.machine }} ${{ inputs.pystats }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang == true && 'clang' || '' }} >> $GITHUB_OUTPUT
cat $GITHUB_OUTPUT
env:
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true &&
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }}
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }},${{ inputs.clang
== true && 'clang' || '' }}
head:
uses: ./.github/workflows/_benchmark.yml
with:
Expand All @@ -103,6 +108,7 @@ jobs:
tier2: ${{ inputs.tier2 }}
jit: ${{ inputs.jit }}
nogil: ${{ inputs.nogil }}
clang: ${{ inputs.clang }}
secrets: inherit

base:
Expand All @@ -120,6 +126,7 @@ jobs:
tier2: ${{ inputs.tier2 }}
jit: ${{ inputs.jit }}
nogil: ${{ inputs.nogil }}
clang: ${{ inputs.clang }}
secrets: inherit

pystats-head:
Expand All @@ -134,6 +141,7 @@ jobs:
tier2: ${{ inputs.tier2 }}
jit: ${{ inputs.jit }}
nogil: ${{ inputs.nogil }}
clang: ${{ inputs.clang }}
secrets: inherit

pystats-base:
Expand All @@ -150,6 +158,7 @@ jobs:
tier2: ${{ inputs.tier2 }}
jit: ${{ inputs.jit }}
nogil: ${{ inputs.nogil }}
clang: ${{ inputs.clang }}
secrets: inherit

generate:
Expand All @@ -175,4 +184,5 @@ jobs:
tier2: ${{ inputs.tier2 }}
jit: ${{ inputs.jit }}
nogil: ${{ inputs.nogil }}
clang: ${{ inputs.clang }}
secrets: inherit

0 comments on commit ab2cce4

Please sign in to comment.