diff --git a/.github/actions/bench/action.yml b/.github/actions/bench/action.yml index c7f38eb9b..970f920b1 100644 --- a/.github/actions/bench/action.yml +++ b/.github/actions/bench/action.yml @@ -18,8 +18,8 @@ inputs: description: ARCHFLAGS to pass to compilation default: "" opt: - description: opt flag to set for tests script - default: "true" + description: opt flag to set for tests script (opt, no_opt, all) + default: "opt" bench_extra_args: description: Further arguments to be appended to command line for `bench` script default: "" @@ -72,23 +72,32 @@ runs: run: | ./scripts/tests bench -c ${{ inputs.perf }} --cross-prefix="${{ inputs.cross_prefix }}" \ --cflags="${{ inputs.cflags }}" --arch-flags="${{ inputs.archflags }}" \ - --opt=$([[ ${{ inputs.opt }} == "false" ]] && echo "no_opt" || echo "opt") \ - -v --output=output.json ${{ inputs.bench_extra_args }} + --opt=${{ inputs.opt }} \ + -v --output-base=output ${{ inputs.bench_extra_args }} ./scripts/tests bench --components -c ${{ inputs.perf }} --cross-prefix="${{ inputs.cross_prefix }}" \ --cflags="${{ inputs.cflags }}" --arch-flags="${{ inputs.archflags }}" \ --opt=$([[ ${{ inputs.opt }} == "false" ]] && echo "no_opt" || echo "opt") \ - -v --output=output.json ${{ inputs.bench_extra_args }} + -v ${{ inputs.bench_extra_args }} - name: Check namespace shell: ${{ env.SHELL }} run: | check-namespace - - name: Store benchmark result - if: ${{ inputs.store_results == 'true' }} + - name: Store optimized benchmark result + if: ${{ inputs.store_results == 'true' && (inputs.opt == "opt" || inputs.opt == "all") }} uses: benchmark-action/github-action-benchmark@v1 with: name: ${{ inputs.name }} tool: "customSmallerIsBetter" - output-file-path: output.json + output-file-path: "output_opt.json" + github-token: ${{ inputs.gh_token }} + auto-push: true + - name: Store non-optimized benchmark result + if: ${{ inputs.store_results == 'true' && (inputs.opt == "no_opt" || inputs.opt == "all") }} + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "${{ inputs.name }} (no-opt)" + tool: "customSmallerIsBetter" + output-file-path: "output_opt.json" github-token: ${{ inputs.gh_token }} auto-push: true diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index f926bbd00..1864b7588 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -50,7 +50,7 @@ jobs: cflags: ${{ matrix.target.cflags }} archflags: ${{ matrix.target.archflags }} perf: ${{ matrix.target.bench_pmu }} - store_results: ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }} + store_results: true # ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }} bench_extra_args: ${{ matrix.target.bench_extra_args }} gh_token: ${{ secrets.AWS_GITHUB_TOKEN }} ec2_all: @@ -61,11 +61,6 @@ jobs: strategy: fail-fast: false matrix: - opt: - - name: opt - value: true - - name: no_opt - value: false target: - name: Graviton2 ec2_instance_type: t4g.small @@ -116,8 +111,8 @@ jobs: ec2_ami: ${{ matrix.target.ec2_ami }} archflags: ${{ matrix.target.archflags }} cflags: ${{ matrix.target.cflags }} - opt: ${{ matrix.opt.value }} - store_results: ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }} # Only store optimized results - name: "${{ matrix.target.name }}${{ (!matrix.opt.value && ' (no-opt)') || ''}}" + opt: all + store_results: true # TEST ## ${{ github.repository_owner == 'pq-code-package' && github.ref == 'refs/heads/main' }} # Only store optimized results + name: ${{ matrix.target.name }} perf: ${{ matrix.target.perf }} secrets: inherit diff --git a/mlkem/poly.c b/mlkem/poly.c index b1e5ecf09..4a8a97b95 100644 --- a/mlkem/poly.c +++ b/mlkem/poly.c @@ -1,4 +1,3 @@ -// Copyright (c) 2024 The mlkem-native project authors // SPDX-License-Identifier: Apache-2.0 #include "poly.h" #include diff --git a/scripts/lib/mlkem_test.py b/scripts/lib/mlkem_test.py index b1ef461bb..e60095f4f 100644 --- a/scripts/lib/mlkem_test.py +++ b/scripts/lib/mlkem_test.py @@ -574,7 +574,7 @@ def _run_bench( def bench( self, cycles: str, - output, + output_base, run_as_root: bool, exec_wrapper: str, mac_taskpolicy, @@ -586,7 +586,7 @@ def bench( t = self._bench else: t = self._bench_components - output = False + output_base = False if mac_taskpolicy: if exec_wrapper: @@ -595,38 +595,35 @@ def bench( else: exec_wrapper = f"taskpolicy -c {mac_taskpolicy}" - # NOTE: We haven't yet decided how to output both opt/no-opt benchmark results - if self.opt.lower() == "all": - if self.compile: - t.compile(False, extra_make_args=[f"CYCLES={cycles}"]) - if self.run: - self._run_bench(t, False, run_as_root, exec_wrapper) - if self.compile: - t.compile(True, extra_make_args=[f"CYCLES={cycles}"]) - if self.run: - resultss = self._run_bench(t, True, run_as_root, exec_wrapper) - else: + opts = [] + if self.opt.lower() == "all" or self.opt.lower() == "opt": + opts.append(True) + if self.opt.lower() == "all" or self.opt.lower() != "opt": + opts.append(False) + + resultss = {} + for opt in opts: if self.compile: - t.compile( - True if self.opt.lower() == "opt" else False, - extra_make_args=[f"CYCLES={cycles}"], - ) + t.compile(opt, extra_make_args=[f"CYCLES={cycles}"]) if self.run: - resultss = self._run_bench( - t, - True if self.opt.lower() == "opt" else False, - run_as_root, - exec_wrapper, - ) - - if resultss is None: - exit(0) + # _run_bench currently returns a single-element dictionary + # with the optimization value as the first key. + # + # Merge those manually here. Ultimately it would be cleaner + # for _run_bench to accept "all" as opt value, and then the + # opt-indexed dictionary would make sense. + this_result = self._run_bench(t, opt, run_as_root, exec_wrapper) + if this_result is None: + exit(0) + resultss = resultss | this_result - # NOTE: There will only be one items in resultss, as we haven't yet decided how to write both opt/no-opt benchmark results for k, results in resultss.items(): - if results is not None and output is not None and components is False: + if results is not None and output_base is not None and components is False: import json + # Write foo_opt.json or foo_no_opt.json" depending on whether + # optimization is enabled. + output = f"{output_base}_{k}.json" with open(output, "w") as f: v = [] for scheme in results: diff --git a/scripts/tests b/scripts/tests index a7882a1d3..64e6c660e 100755 --- a/scripts/tests +++ b/scripts/tests @@ -118,9 +118,9 @@ _bench_options = [ ), click.option( "-o", - "--output", + "--output-base", nargs=1, - help="Path to output file in json format", + help="Prefix of JSON output file. Optimized benchmarks will be written to {base}_opt.json, and non-optimized benchmarks will be written to {base}_no_opt.json", ), click.option( "-r", @@ -225,7 +225,7 @@ def acvp(opts: Options, acvp_dir: str): def bench( opts: Options, cycles: str, - output, + output_base, run_as_root: bool, exec_wrapper: str, mac_taskpolicy, @@ -233,7 +233,7 @@ def bench( ): Tests(opts).bench( cycles, - output, + output_base, run_as_root, exec_wrapper, mac_taskpolicy,