-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit e0d5e53
Showing
1,271 changed files
with
6,385,754 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,303 @@ | ||
# Generated file: !!! DO NOT EDIT !!! | ||
--- | ||
env: | ||
PYPERFORMANCE_HASH: ebb37f3583e26ea22cee34126b3b8a815112370b | ||
PYSTON_BENCHMARKS_HASH: 004743ccbd9e54598c543d7eb71fd3b8e10d5750 | ||
name: _benchmark | ||
on: | ||
workflow_call: | ||
inputs: | ||
fork: | ||
description: Fork of cpython to benchmark | ||
type: string | ||
ref: | ||
description: Branch, tag or (full) SHA commit to benchmark | ||
type: string | ||
machine: | ||
description: Machine to run on | ||
type: string | ||
benchmarks: | ||
description: Benchmarks to run (comma-separated; empty runs all benchmarks) | ||
type: string | ||
pgo: | ||
description: Build with PGO | ||
type: boolean | ||
force: | ||
description: Rerun and replace results if commit already exists | ||
type: boolean | ||
perf: | ||
description: Collect Linux perf profiling data (Linux only) | ||
type: boolean | ||
|
||
tier2: | ||
description: tier 2 interpreter | ||
type: boolean | ||
default: false | ||
jit: | ||
description: JIT | ||
type: boolean | ||
default: false | ||
nogil: | ||
description: free threading | ||
type: boolean | ||
default: false | ||
workflow_dispatch: | ||
inputs: | ||
fork: | ||
description: Fork of cpython to benchmark | ||
type: string | ||
default: python | ||
ref: | ||
description: Branch, tag or (full) SHA commit to benchmark | ||
type: string | ||
default: main | ||
machine: | ||
description: Machine to run on | ||
default: linux-amd64 | ||
type: choice | ||
options: | ||
- linux-x86_64-linux | ||
- linux-x86_64-vultr | ||
- all | ||
benchmarks: | ||
description: Benchmarks to run (comma-separated; empty runs all benchmarks) | ||
type: string | ||
pgo: | ||
description: Build with PGO | ||
type: boolean | ||
force: | ||
description: Rerun and replace results if commit already exists | ||
type: boolean | ||
perf: | ||
description: Collect Linux perf profiling data (Linux only) | ||
type: boolean | ||
|
||
tier2: | ||
description: tier 2 interpreter | ||
type: boolean | ||
default: false | ||
jit: | ||
description: JIT | ||
type: boolean | ||
default: false | ||
nogil: | ||
description: free threading | ||
type: boolean | ||
default: false | ||
jobs: | ||
benchmark-linux-x86_64-linux: | ||
runs-on: [self-hosted, linux, bare-metal, linux-x86_64-linux] | ||
timeout-minutes: 1440 | ||
|
||
steps: | ||
- name: Setup environment | ||
run: |- | ||
echo "BENCHMARK_MACHINE_NICKNAME=linux" >> $GITHUB_ENV | ||
- name: Checkout benchmarking | ||
uses: actions/checkout@v4 | ||
- name: git gc | ||
run: | | ||
git gc | ||
- uses: fregante/setup-git-user@v2 | ||
- name: Setup system Python | ||
if: ${{ runner.arch == 'X64' }} | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: '3.11' | ||
- name: Checkout CPython | ||
uses: actions/checkout@v4 | ||
with: | ||
repository: ${{ inputs.fork }}/cpython | ||
path: cpython | ||
ref: ${{ inputs.ref }} | ||
fetch-depth: 50 | ||
- name: Install dependencies from PyPI | ||
run: | | ||
rm -rf venv | ||
python -m venv venv | ||
venv/bin/python -m pip install --upgrade pip | ||
venv/bin/python -m pip install -r requirements.txt | ||
- name: Should we run? | ||
if: ${{ always() }} | ||
id: should_run | ||
run: | | ||
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT | ||
- name: Checkout python-macrobenchmarks | ||
uses: actions/checkout@v4 | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
with: | ||
repository: pyston/python-macrobenchmarks | ||
path: pyston-benchmarks | ||
ref: ${{ env.PYSTON_BENCHMARKS_HASH }} | ||
- name: Checkout pyperformance | ||
uses: actions/checkout@v4 | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
with: | ||
repository: mdboom/pyperformance | ||
path: pyperformance | ||
ref: ${{ env.PYPERFORMANCE_HASH }} | ||
- name: Build Python | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
cd cpython | ||
./configure ${{ inputs.pgo == true && '--enable-optimizations --with-lto=yes' || '' }} ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }} | ||
make ${{ runner.arch == 'ARM64' && '-j' || '-j4' }} | ||
- name: Install pyperformance | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
venv/bin/python -m pip install ./pyperformance | ||
- name: Tune system | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
sudo LD_LIBRARY_PATH=$LD_LIBRARY_PATH venv/bin/python -m pyperf system tune | ||
- name: Tune for (Linux) perf | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }} | ||
run: | | ||
sudo bash -c "echo 100000 > /proc/sys/kernel/perf_event_max_sample_rate" | ||
- name: Running pyperformance | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
rm -rf ~/.debug/* | ||
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }} | ||
# Pull again, since another job may have committed results in the meantime | ||
- name: Pull benchmarking | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }} | ||
run: | | ||
# Another benchmarking task may have created results for the same | ||
# commit while the above was running. This "magic" incantation means | ||
# that any local results for this commit will override anything we | ||
# just pulled in in that case. | ||
git pull -s recursive -X ours --autostash --rebase | ||
- name: Adding data to repo | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }} | ||
uses: EndBug/add-and-commit@v9 | ||
with: | ||
add: results | ||
- name: Upload benchmark artifacts | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: benchmark | ||
path: | | ||
benchmark.json | ||
overwrite: true | ||
- name: Upload perf artifacts | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: perf | ||
path: | | ||
profiling/results | ||
if: ${{ (inputs.machine == 'linux-x86_64-linux' || inputs.machine == 'all') }} | ||
env: | ||
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && | ||
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} | ||
benchmark-linux-x86_64-vultr: | ||
runs-on: [self-hosted, linux, bare-metal, linux-x86_64-vultr] | ||
timeout-minutes: 1440 | ||
|
||
steps: | ||
- name: Setup environment | ||
run: |- | ||
echo "BENCHMARK_MACHINE_NICKNAME=vultr" >> $GITHUB_ENV | ||
- name: Checkout benchmarking | ||
uses: actions/checkout@v4 | ||
- name: git gc | ||
run: | | ||
git gc | ||
- uses: fregante/setup-git-user@v2 | ||
- name: Setup system Python | ||
if: ${{ runner.arch == 'X64' }} | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: '3.11' | ||
- name: Checkout CPython | ||
uses: actions/checkout@v4 | ||
with: | ||
repository: ${{ inputs.fork }}/cpython | ||
path: cpython | ||
ref: ${{ inputs.ref }} | ||
fetch-depth: 50 | ||
- name: Install dependencies from PyPI | ||
run: | | ||
rm -rf venv | ||
python -m venv venv | ||
venv/bin/python -m pip install --upgrade pip | ||
venv/bin/python -m pip install -r requirements.txt | ||
- name: Should we run? | ||
if: ${{ always() }} | ||
id: should_run | ||
run: | | ||
venv/bin/python -m bench_runner should_run ${{ inputs.force }} ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.machine }} false ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} >> $GITHUB_OUTPUT | ||
- name: Checkout python-macrobenchmarks | ||
uses: actions/checkout@v4 | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
with: | ||
repository: pyston/python-macrobenchmarks | ||
path: pyston-benchmarks | ||
ref: ${{ env.PYSTON_BENCHMARKS_HASH }} | ||
- name: Checkout pyperformance | ||
uses: actions/checkout@v4 | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
with: | ||
repository: mdboom/pyperformance | ||
path: pyperformance | ||
ref: ${{ env.PYPERFORMANCE_HASH }} | ||
- name: Build Python | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
cd cpython | ||
./configure ${{ inputs.pgo == true && '--enable-optimizations --with-lto=yes' || '' }} ${{ inputs.tier2 == true && '--enable-experimental-jit=interpreter' || '' }} ${{ inputs.jit == true && '--enable-experimental-jit=yes' || '' }} ${{ inputs.nogil == true && '--disable-gil' || '' }} | ||
make ${{ runner.arch == 'ARM64' && '-j' || '-j4' }} | ||
- name: Install pyperformance | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
venv/bin/python -m pip install ./pyperformance | ||
- name: Tune system | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
sudo LD_LIBRARY_PATH=$LD_LIBRARY_PATH venv/bin/python -m pyperf system tune | ||
- name: Tune for (Linux) perf | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }} | ||
run: | | ||
sudo bash -c "echo 100000 > /proc/sys/kernel/perf_event_max_sample_rate" | ||
- name: Running pyperformance | ||
if: ${{ steps.should_run.outputs.should_run != 'false' }} | ||
run: | | ||
rm -rf ~/.debug/* | ||
venv/bin/python -m bench_runner run_benchmarks ${{ inputs.perf && 'perf' || 'benchmark' }} cpython/python ${{ inputs.fork }} ${{ inputs.ref }} ${{ inputs.benchmarks || 'all' }} ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && 'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} --run_id ${{ github.run_id }} | ||
# Pull again, since another job may have committed results in the meantime | ||
- name: Pull benchmarking | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }} | ||
run: | | ||
# Another benchmarking task may have created results for the same | ||
# commit while the above was running. This "magic" incantation means | ||
# that any local results for this commit will override anything we | ||
# just pulled in in that case. | ||
git pull -s recursive -X ours --autostash --rebase | ||
- name: Adding data to repo | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }} | ||
uses: EndBug/add-and-commit@v9 | ||
with: | ||
add: results | ||
- name: Upload benchmark artifacts | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && !inputs.perf }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: benchmark | ||
path: | | ||
benchmark.json | ||
overwrite: true | ||
- name: Upload perf artifacts | ||
if: ${{ steps.should_run.outputs.should_run != 'false' && inputs.perf }} | ||
uses: actions/upload-artifact@v4 | ||
with: | ||
name: perf | ||
path: | | ||
profiling/results | ||
if: ${{ (inputs.machine == 'linux-x86_64-vultr' || inputs.machine == 'all') }} | ||
env: | ||
flags: ${{ inputs.tier2 == true && 'tier2' || '' }},${{ inputs.jit == true && | ||
'jit' || '' }},${{ inputs.nogil == true && 'nogil' || '' }} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
# Generated file: !!! DO NOT EDIT !!! | ||
--- | ||
env: | ||
PYPERFORMANCE_HASH: ebb37f3583e26ea22cee34126b3b8a815112370b | ||
PYSTON_BENCHMARKS_HASH: 004743ccbd9e54598c543d7eb71fd3b8e10d5750 | ||
name: _find_failures | ||
on: | ||
schedule: | ||
- cron: 0 8 * * 0 | ||
workflow_dispatch: {} | ||
jobs: | ||
find_failures: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout benchmarking | ||
uses: actions/checkout@v4 | ||
- name: Setup system Python | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: '3.11' | ||
cache: pip | ||
- name: Install dependencies from PyPI | ||
run: python -m pip install -r requirements.txt | ||
- name: Regenerate derived data | ||
run: python -m bench_runner find_failures > failures.md | ||
env: | ||
GH_TOKEN: ${{ github.token }} | ||
- name: Add to repo | ||
uses: EndBug/add-and-commit@v9 | ||
with: | ||
add: "['failures.md']" | ||
message: Benchmarking failures |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
# Generated file: !!! DO NOT EDIT !!! | ||
--- | ||
env: | ||
PYPERFORMANCE_HASH: ebb37f3583e26ea22cee34126b3b8a815112370b | ||
PYSTON_BENCHMARKS_HASH: 004743ccbd9e54598c543d7eb71fd3b8e10d5750 | ||
name: _generate | ||
on: | ||
workflow_call: | ||
inputs: | ||
force: | ||
type: boolean | ||
default: false | ||
dry_run: | ||
type: boolean | ||
default: false | ||
|
||
workflow_dispatch: | ||
inputs: | ||
force: | ||
description: Regenerate all of the derived data, even if it already exists | ||
type: boolean | ||
default: false | ||
dry_run: | ||
description: 'Dry run: Do not commit to the repo' | ||
type: boolean | ||
default: false | ||
|
||
jobs: | ||
generate-results: | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout benchmarking | ||
uses: actions/checkout@v4 | ||
with: | ||
ref: main | ||
- name: Checkout CPython | ||
uses: actions/checkout@v4 | ||
with: | ||
repository: python/cpython | ||
path: cpython | ||
- name: Setup system Python | ||
uses: actions/setup-python@v5 | ||
with: | ||
python-version: '3.11' | ||
cache: pip | ||
- name: Install dependencies from PyPI | ||
run: python -m pip install -r requirements.txt | ||
- name: Regenerate derived data | ||
run: python -m bench_runner generate_results ${{ inputs.force == true && '--force' | ||
|| '' }} | ||
- name: Add to repo | ||
uses: EndBug/add-and-commit@v9 | ||
if: ${{ !inputs.dry_run }} | ||
with: | ||
add: "['results', 'README.md', 'RESULTS.md', 'longitudinal.svg', 'longitudinal.json', | ||
'configs.svg', 'configs.json', 'memory_long.svg', 'memory_long.json', 'memory_configs.svg', | ||
'memory_configs.json']" | ||
message: Benchmarking results for @${{ github.actor }} |
Oops, something went wrong.