Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bench: WIP #445

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 17 additions & 8 deletions .github/actions/bench/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ inputs:
description: ARCHFLAGS to pass to compilation
default: ""
opt:
description: opt flag to set for tests script
default: "true"
description: opt flag to set for tests script (opt, no_opt, all)
default: "opt"
bench_extra_args:
description: Further arguments to be appended to command line for `bench` script
default: ""
Expand Down Expand Up @@ -72,23 +72,32 @@ runs:
run: |
./scripts/tests bench -c ${{ inputs.perf }} --cross-prefix="${{ inputs.cross_prefix }}" \
--cflags="${{ inputs.cflags }}" --arch-flags="${{ inputs.archflags }}" \
--opt=$([[ ${{ inputs.opt }} == "false" ]] && echo "no_opt" || echo "opt") \
-v --output=output.json ${{ inputs.bench_extra_args }}
--opt=${{ inputs.opt }} \
-v --output-base=output ${{ inputs.bench_extra_args }}

./scripts/tests bench --components -c ${{ inputs.perf }} --cross-prefix="${{ inputs.cross_prefix }}" \
--cflags="${{ inputs.cflags }}" --arch-flags="${{ inputs.archflags }}" \
--opt=$([[ ${{ inputs.opt }} == "false" ]] && echo "no_opt" || echo "opt") \
-v --output=output.json ${{ inputs.bench_extra_args }}
-v ${{ inputs.bench_extra_args }}
- name: Check namespace
shell: ${{ env.SHELL }}
run: |
check-namespace
- name: Store benchmark result
if: ${{ inputs.store_results == 'true' }}
- name: Store optimized benchmark result
if: ${{ inputs.store_results == 'true' && (inputs.opt == "opt" || inputs.opt == "all") }}
uses: benchmark-action/github-action-benchmark@v1
with:
name: ${{ inputs.name }}
tool: "customSmallerIsBetter"
output-file-path: output.json
output-file-path: "output_opt.json"
github-token: ${{ inputs.gh_token }}
auto-push: true
- name: Store non-optimized benchmark result
if: ${{ inputs.store_results == 'true' && (inputs.opt == "no_opt" || inputs.opt == "all") }}
uses: benchmark-action/github-action-benchmark@v1
with:
name: "${{ inputs.name }} (no-opt)"
tool: "customSmallerIsBetter"
output-file-path: "output_no_opt.json"
github-token: ${{ inputs.gh_token }}
auto-push: true
13 changes: 4 additions & 9 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:
cflags: ${{ matrix.target.cflags }}
archflags: ${{ matrix.target.archflags }}
perf: ${{ matrix.target.bench_pmu }}
store_results: ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }}
store_results: true # ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }}
bench_extra_args: ${{ matrix.target.bench_extra_args }}
gh_token: ${{ secrets.AWS_GITHUB_TOKEN }}
ec2_all:
Expand All @@ -61,11 +61,6 @@ jobs:
strategy:
fail-fast: false
matrix:
opt:
- name: opt
value: true
- name: no_opt
value: false
target:
- name: Graviton2
ec2_instance_type: t4g.small
Expand Down Expand Up @@ -116,8 +111,8 @@ jobs:
ec2_ami: ${{ matrix.target.ec2_ami }}
archflags: ${{ matrix.target.archflags }}
cflags: ${{ matrix.target.cflags }}
opt: ${{ matrix.opt.value }}
store_results: ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }} # Only store optimized results
name: "${{ matrix.target.name }}${{ (!matrix.opt.value && ' (no-opt)') || ''}}"
opt: all
store_results: true # TEST ## ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }} # Only store optimized results
name: ${{ matrix.target.name }}
perf: ${{ matrix.target.perf }}
secrets: inherit
1 change: 0 additions & 1 deletion mlkem/poly.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
// Copyright (c) 2024 The mlkem-native project authors
// SPDX-License-Identifier: Apache-2.0

Check failure on line 1 in mlkem/poly.c

View workflow job for this annotation

GitHub Actions / Linting (ubuntu-latest)

Missing copyright header error

mlkem/poly.c is missing copyright header
#include "poly.h"
#include <stdint.h>
#include <string.h>
Expand Down
53 changes: 25 additions & 28 deletions scripts/lib/mlkem_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ def _run_bench(
def bench(
self,
cycles: str,
output,
output_base,
run_as_root: bool,
exec_wrapper: str,
mac_taskpolicy,
Expand All @@ -586,7 +586,7 @@ def bench(
t = self._bench
else:
t = self._bench_components
output = False
output_base = False

if mac_taskpolicy:
if exec_wrapper:
Expand All @@ -595,38 +595,35 @@ def bench(
else:
exec_wrapper = f"taskpolicy -c {mac_taskpolicy}"

# NOTE: We haven't yet decided how to output both opt/no-opt benchmark results
if self.opt.lower() == "all":
if self.compile:
t.compile(False, extra_make_args=[f"CYCLES={cycles}"])
if self.run:
self._run_bench(t, False, run_as_root, exec_wrapper)
if self.compile:
t.compile(True, extra_make_args=[f"CYCLES={cycles}"])
if self.run:
resultss = self._run_bench(t, True, run_as_root, exec_wrapper)
else:
opts = []
if self.opt.lower() == "all" or self.opt.lower() == "opt":
opts.append(True)
if self.opt.lower() == "all" or self.opt.lower() != "opt":
opts.append(False)

resultss = {}
for opt in opts:
if self.compile:
t.compile(
True if self.opt.lower() == "opt" else False,
extra_make_args=[f"CYCLES={cycles}"],
)
t.compile(opt, extra_make_args=[f"CYCLES={cycles}"])
if self.run:
resultss = self._run_bench(
t,
True if self.opt.lower() == "opt" else False,
run_as_root,
exec_wrapper,
)

if resultss is None:
exit(0)
# _run_bench currently returns a single-element dictionary
# with the optimization value as the first key.
#
# Merge those manually here. Ultimately it would be cleaner
# for _run_bench to accept "all" as opt value, and then the
# opt-indexed dictionary would make sense.
this_result = self._run_bench(t, opt, run_as_root, exec_wrapper)
if this_result is None:
exit(0)
resultss = resultss | this_result

# NOTE: There will only be one items in resultss, as we haven't yet decided how to write both opt/no-opt benchmark results
for k, results in resultss.items():
if results is not None and output is not None and components is False:
if results is not None and output_base is not None and components is False:
import json

# Write foo_opt.json or foo_no_opt.json" depending on whether
# optimization is enabled.
output = f"{output_base}_{k}.json"
with open(output, "w") as f:
v = []
for scheme in results:
Expand Down
8 changes: 4 additions & 4 deletions scripts/tests
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,9 @@ _bench_options = [
),
click.option(
"-o",
"--output",
"--output-base",
nargs=1,
help="Path to output file in json format",
help="Prefix of JSON output file. Optimized benchmarks will be written to {base}_opt.json, and non-optimized benchmarks will be written to {base}_no_opt.json",
),
click.option(
"-r",
Expand Down Expand Up @@ -225,15 +225,15 @@ def acvp(opts: Options, acvp_dir: str):
def bench(
opts: Options,
cycles: str,
output,
output_base,
run_as_root: bool,
exec_wrapper: str,
mac_taskpolicy,
components,
):
Tests(opts).bench(
cycles,
output,
output_base,
run_as_root,
exec_wrapper,
mac_taskpolicy,
Expand Down
Loading