diff --git a/docs/rm/sim/Elf.md b/docs/rm/sim/Elf.md new file mode 100644 index 000000000..ebbc35549 --- /dev/null +++ b/docs/rm/sim/Elf.md @@ -0,0 +1 @@ +::: Elf diff --git a/docs/rm/sim/verif_utils.md b/docs/rm/sim/verif_utils.md new file mode 100644 index 000000000..cb0f61ee0 --- /dev/null +++ b/docs/rm/sim/verif_utils.md @@ -0,0 +1 @@ +::: verif_utils \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index cff2613e0..6ad869786 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -55,9 +55,11 @@ nav: - Software: - Simulation Utilities: - data_utils: rm/sim/data_utils.md + - verif_utils: rm/sim/verif_utils.md - sim_utils: rm/sim/sim_utils.md - rm/sim/Simulation.md - rm/sim/Simulator.md + - rm/sim/Elf.md - Snitch Runtime: - Pages: runtime/Pages/index.md - Files: runtime/Files/index.md diff --git a/sw/apps/atax/scripts/verify.py b/sw/apps/atax/scripts/verify.py index 26ae71185..cb5fe21f0 100755 --- a/sw/apps/atax/scripts/verify.py +++ b/sw/apps/atax/scripts/verify.py @@ -5,55 +5,33 @@ # # Luca Colagrande +import numpy as np import sys from pathlib import Path -import numpy as np from datagen import AtaxDataGen -sys.path.append(str(Path(__file__).parent / "../../../util/sim/")) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer # noqa: E402 - - -ERR_THRESHOLD = 1e-10 +sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate( - sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=["y"], - ) +class AtaxVerifier(Verifier): - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - A = elf.from_symbol('A', 'double') - x = elf.from_symbol('x', 'double') - M = elf.from_symbol('M', 'uint32_t')[0] - N = elf.from_symbol('N', 'uint32_t')[0] - A = np.reshape(A, (M, N)) + OUTPUT_UIDS = ['y'] - # Verify results - y_actual = from_buffer(raw_results['y'], 'double') - y_golden = AtaxDataGen().golden_model(A, x).flatten() + def get_actual_results(self): + return self.get_output_from_symbol('y', 'double') - relative_err = np.absolute((y_golden - y_actual) / y_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - print('Simulation results are incorrect.') - verification.dump_results_to_csv([y_golden, y_actual, relative_err], - Path.cwd() / 'results.csv') + def get_expected_results(self): + A = self.get_input_from_symbol('A', 'double') + x = self.get_input_from_symbol('x', 'double') + M = self.get_input_from_symbol('M', 'uint32_t')[0] + N = self.get_input_from_symbol('N', 'uint32_t')[0] + A = np.reshape(A, (M, N)) + return AtaxDataGen().golden_model(A, x).flatten() - return int(fail) + def check_results(self, *args): + return super().check_results(*args, rtol=1e-10) if __name__ == "__main__": - sys.exit(main()) + sys.exit(AtaxVerifier().main()) diff --git a/sw/apps/correlation/scripts/verify.py b/sw/apps/correlation/scripts/verify.py index 19a8fcc36..67746af7f 100755 --- a/sw/apps/correlation/scripts/verify.py +++ b/sw/apps/correlation/scripts/verify.py @@ -5,54 +5,32 @@ # # Luca Colagrande +import numpy as np import sys from pathlib import Path -import numpy as np from datagen import CorrelationDataGen -sys.path.append(str(Path(__file__).parent / "../../../util/sim/")) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer # noqa: E402 - - -ERR_THRESHOLD = 1e-10 +sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate( - sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['corr'], - ) +class CorrelationVerifier(Verifier): - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - M = elf.from_symbol('M', 'uint32_t')[0] - N = elf.from_symbol('N', 'uint32_t')[0] - data = elf.from_symbol('data', 'double') - data = np.reshape(data, (N, M)) + OUTPUT_UIDS = ['corr'] - # Verify results - corr_actual = from_buffer(raw_results['corr'], 'double') - corr_golden = CorrelationDataGen().golden_model(data).flatten() + def get_actual_results(self): + return self.get_output_from_symbol('corr', 'double') - relative_err = np.absolute((corr_golden - corr_actual) / corr_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - print('Simulation results are incorrect.') - verification.dump_results_to_csv([corr_golden, corr_actual, relative_err], - Path.cwd() / 'results.csv') + def get_expected_results(self): + M = self.get_input_from_symbol('M', 'uint32_t')[0] + N = self.get_input_from_symbol('N', 'uint32_t')[0] + data = self.get_input_from_symbol('data', 'double') + data = np.reshape(data, (N, M)) + return CorrelationDataGen().golden_model(data).flatten() - return int(fail) + def check_results(self, *args): + return super().check_results(*args, rtol=1e-10) if __name__ == "__main__": - sys.exit(main()) + sys.exit(CorrelationVerifier().main()) diff --git a/sw/apps/covariance/scripts/verify.py b/sw/apps/covariance/scripts/verify.py index 9f0dca523..aacbccdb1 100755 --- a/sw/apps/covariance/scripts/verify.py +++ b/sw/apps/covariance/scripts/verify.py @@ -5,54 +5,32 @@ # # Luca Colagrande +import numpy as np import sys from pathlib import Path -import numpy as np from datagen import CovarianceDataGen -sys.path.append(str(Path(__file__).parent / "../../../util/sim/")) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer # noqa: E402 - - -ERR_THRESHOLD = 1e-10 +sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate( - sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['cov'], - ) +class CovarianceVerifier(Verifier): - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - M = elf.from_symbol('M', 'uint32_t')[0] - N = elf.from_symbol('N', 'uint32_t')[0] - data = elf.from_symbol('data', 'double') - data = np.reshape(data, (N, M)) + OUTPUT_UIDS = ['cov'] - # Verify results - cov_actual = from_buffer(raw_results['cov'], 'double') - cov_golden = CovarianceDataGen().golden_model(data).flatten() + def get_actual_results(self): + return self.get_output_from_symbol('cov', 'double') - relative_err = np.absolute((cov_golden - cov_actual) / cov_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - print('Simulation results are incorrect.') - verification.dump_results_to_csv([cov_golden, cov_actual, relative_err], - Path.cwd() / 'results.csv') + def get_expected_results(self): + M = self.get_input_from_symbol('M', 'uint32_t')[0] + N = self.get_input_from_symbol('N', 'uint32_t')[0] + data = self.get_input_from_symbol('data', 'double') + data = np.reshape(data, (N, M)) + return CovarianceDataGen().golden_model(data).flatten() - return int(fail) + def check_results(self, *args): + return super().check_results(*args, rtol=1e-10) if __name__ == "__main__": - sys.exit(main()) + sys.exit(CovarianceVerifier().main()) diff --git a/sw/blas/axpy/Makefile b/sw/blas/axpy/Makefile index bed4edaa8..25bd29a9b 100644 --- a/sw/blas/axpy/Makefile +++ b/sw/blas/axpy/Makefile @@ -9,18 +9,21 @@ MK_DIR := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) DATA_DIR := $(realpath $(MK_DIR)/data) SRC_DIR := $(realpath $(MK_DIR)/src) -LENGTH ?= 24 -SECTION ?= +DATA_CFG ?= $(DATA_DIR)/params.json +SECTION ?= APP ?= axpy -SRCS ?= $(SRC_DIR)/main.c -INCDIRS += $(DATA_DIR) $(SRC_DIR) +SRCS ?= $(realpath $(SRC_DIR)/main.c) +INCDIRS ?= $(dir $(DATA_H)) $(SRC_DIR) -DATAGEN_PY = $(DATA_DIR)/datagen.py -DATA_H = $(DATA_DIR)/data.h +DATAGEN_PY = $(MK_DIR)/scripts/datagen.py +DATA_H ?= $(DATA_DIR)/data.h -$(DATA_H): $(DATAGEN_PY) - $< $(LENGTH) --section="$(SECTION)" > $@ +$(dir $(DATA_H)): + mkdir -p $@ + +$(DATA_H): $(DATAGEN_PY) $(DATA_CFG) | $(dir $(DATA_H)) + $< -c $(DATA_CFG) --section="$(SECTION)" > $@ .PHONY: clean-data clean diff --git a/sw/blas/axpy/data/datagen.py b/sw/blas/axpy/data/datagen.py deleted file mode 100755 index 3f48e348d..000000000 --- a/sw/blas/axpy/data/datagen.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Author: Luca Colagrande - -import sys -import argparse -import numpy as np -import os - -sys.path.append(os.path.join(os.path.dirname(__file__), "../../../../util/sim/")) -from data_utils import format_scalar_definition, format_array_definition, \ - format_array_declaration, format_ifdef_wrapper # noqa: E402 - -MIN = -1000 -MAX = +1000 - -# Aligns data to the size of a beat to avoid misaligned transfers -BEAT_ALIGNMENT = 64 -# AXI splits bursts crossing 4KB address boundaries. To minimize -# the occurrence of these splits the data should be aligned to 4KB -BURST_ALIGNMENT = 4096 - - -def golden_model(a, x, y): - return a*x + y - - -def main(): - # Argument parsing - parser = argparse.ArgumentParser() - parser.add_argument( - 'length', - type=int, - help='Vector length') - parser.add_argument( - '--section', - type=str, - help='Section to store vectors in') - args = parser.parse_args() - length = args.length - section = args.section - - # Randomly generate inputs - a = np.random.uniform(MIN, MAX, 1) - x = np.random.uniform(MIN, MAX, length) - y = np.random.uniform(MIN, MAX, length) - g = golden_model(a, x, y) - - # Format header file - l_str = format_scalar_definition('const uint32_t', 'l', length) - a_str = format_scalar_definition('const double', 'a', a[0]) - x_str = format_array_definition('double', 'x', x, alignment=BURST_ALIGNMENT, section=section) - y_str = format_array_definition('double', 'y', y, alignment=BURST_ALIGNMENT, section=section) - z_str = format_array_declaration('double', 'z', [length], - alignment=BURST_ALIGNMENT, section=section) - g_str = format_array_definition('double', 'g', g) - g_str = format_ifdef_wrapper('BIST', g_str) - f_str = '\n\n'.join([l_str, a_str, x_str, y_str, z_str, g_str]) - f_str += '\n' - - # Write to stdout - print(f_str) - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/sw/blas/axpy/data/params.json b/sw/blas/axpy/data/params.json new file mode 100644 index 000000000..038dc1914 --- /dev/null +++ b/sw/blas/axpy/data/params.json @@ -0,0 +1,7 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +{ + n: 24 +} diff --git a/sw/blas/axpy/scripts/datagen.py b/sw/blas/axpy/scripts/datagen.py new file mode 100755 index 000000000..96b950194 --- /dev/null +++ b/sw/blas/axpy/scripts/datagen.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Author: Luca Colagrande + +import numpy as np +import os +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), "../../../../util/sim/")) +from data_utils import format_scalar_definition, format_array_definition, \ + format_array_declaration, format_ifdef_wrapper, DataGen # noqa: E402 + + +class AxpyDataGen(DataGen): + + MIN = -1000 + MAX = +1000 + # AXI splits bursts crossing 4KB address boundaries. To minimize + # the occurrence of these splits the data should be aligned to 4KB + BURST_ALIGNMENT = 4096 + + def golden_model(self, a, x, y): + return a*x + y + + def emit_header(self, **kwargs): + header = [super().emit_header()] + + n = kwargs['n'] + a = np.random.uniform(self.MIN, self.MAX, 1) + x = np.random.uniform(self.MIN, self.MAX, n) + y = np.random.uniform(self.MIN, self.MAX, n) + g = self.golden_model(a, x, y) + + assert (n % 8) == 0, "n must be an integer multiple of the number of cores" + + header += [format_scalar_definition('const uint32_t', 'n', n)] + header += [format_scalar_definition('const double', 'a', a[0])] + header += [format_array_definition('double', 'x', x, alignment=self.BURST_ALIGNMENT, + section=kwargs['section'])] + header += [format_array_definition('double', 'y', y, alignment=self.BURST_ALIGNMENT, + section=kwargs['section'])] + header += [format_array_declaration('double', 'z', [n], alignment=self.BURST_ALIGNMENT, + section=kwargs['section'])] + result_def = format_array_definition('double', 'g', g) + header += [format_ifdef_wrapper('BIST', result_def)] + header = '\n\n'.join(header) + + return header + + +if __name__ == '__main__': + sys.exit(AxpyDataGen().main()) diff --git a/sw/blas/axpy/scripts/verify.py b/sw/blas/axpy/scripts/verify.py new file mode 100755 index 000000000..4568f9397 --- /dev/null +++ b/sw/blas/axpy/scripts/verify.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +from pathlib import Path +from datagen import AxpyDataGen + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 + + +class AxpyVerifier(Verifier): + + OUTPUT_UIDS = ['z'] + + def get_actual_results(self): + return self.get_output_from_symbol('z', 'double') + + def get_expected_results(self): + a = self.get_input_from_symbol('a', 'double') + x = self.get_input_from_symbol('x', 'double') + y = self.get_input_from_symbol('y', 'double') + return AxpyDataGen().golden_model(a, x, y) + + def check_results(self, *args): + return super().check_results(*args, rtol=1e-10) + + +if __name__ == "__main__": + sys.exit(AxpyVerifier().main()) diff --git a/sw/blas/axpy/src/axpy.h b/sw/blas/axpy/src/axpy.h index 476d64cb9..e8f5ae6c0 100644 --- a/sw/blas/axpy/src/axpy.h +++ b/sw/blas/axpy/src/axpy.h @@ -4,9 +4,9 @@ #include "snrt.h" -inline void axpy(uint32_t l, double a, double* x, double* y, double* z) { +inline void axpy(uint32_t n, double a, double* x, double* y, double* z) { int core_idx = snrt_cluster_core_idx(); - int frac = l / snrt_cluster_compute_core_num(); + int frac = n / snrt_cluster_compute_core_num(); int offset = core_idx * frac; #ifndef XSSR diff --git a/sw/blas/axpy/src/main.c b/sw/blas/axpy/src/main.c index 1b379c811..22f3dd129 100644 --- a/sw/blas/axpy/src/main.c +++ b/sw/blas/axpy/src/main.c @@ -13,7 +13,7 @@ int main() { double *remote_x, *remote_y, *remote_z; // Calculate size and pointers for each cluster - uint32_t frac = l / snrt_cluster_num(); + uint32_t frac = n / snrt_cluster_num(); uint32_t offset = frac * snrt_cluster_idx(); remote_x = x + offset; remote_y = y + offset; @@ -55,11 +55,11 @@ int main() { // TODO: currently only works for single cluster otherwise need to // synchronize all cores here #ifdef BIST - uint32_t nerr = l; + uint32_t nerr = n; // Check computation is correct if (snrt_global_core_idx() == 0) { - for (int i = 0; i < l; i++) { + for (int i = 0; i < n; i++) { if (local_z[i] == g[i]) nerr--; printf("%d %d\n", local_z[i], g[i]); } diff --git a/sw/blas/axpy/verify.py b/sw/blas/axpy/verify.py deleted file mode 100755 index 9a8eb8b7a..000000000 --- a/sw/blas/axpy/verify.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer # noqa: E402 - - -ERR_THRESHOLD = 1E-10 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['z']) - z_actual = from_buffer(raw_results['z'], 'double') - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - a = elf.from_symbol('a', 'double') - x = elf.from_symbol('x', 'double') - y = elf.from_symbol('y', 'double') - - # Verify results - z_golden = golden_model(a, x, y) - relative_err = np.absolute((z_golden - z_actual) / z_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - verification.dump_results_to_csv([z_golden, z_actual, relative_err], - Path.cwd() / 'axpy_results.csv') - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/blas/gemm/Makefile b/sw/blas/gemm/Makefile index 363166f25..7dc0f264e 100644 --- a/sw/blas/gemm/Makefile +++ b/sw/blas/gemm/Makefile @@ -16,7 +16,7 @@ APP ?= gemm SRCS ?= $(realpath $(SRC_DIR)/main.c) INCDIRS ?= $(dir $(DATA_H)) $(SRC_DIR) -DATAGEN_PY = $(DATA_DIR)/datagen.py +DATAGEN_PY = $(MK_DIR)/scripts/datagen.py DATA_H ?= $(DATA_DIR)/data.h $(dir $(DATA_H)): diff --git a/sw/blas/gemm/data/datagen.py b/sw/blas/gemm/scripts/datagen.py similarity index 100% rename from sw/blas/gemm/data/datagen.py rename to sw/blas/gemm/scripts/datagen.py diff --git a/sw/blas/gemm/scripts/verify.py b/sw/blas/gemm/scripts/verify.py new file mode 100755 index 000000000..d5a15d018 --- /dev/null +++ b/sw/blas/gemm/scripts/verify.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import numpy as np +import sys +from pathlib import Path +from datagen import golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class GemmVerifier(Verifier): + + OUTPUT_UIDS = ['c'] + ERR_THRESHOLD = {8: 1e-6, 4: 1e-6, 2: 1e-2, 1: 1e-1} + + def __init__(self): + super().__init__() + self.prec = self.get_input_from_symbol('dtype_size', 'uint32_t')[0] + + def get_actual_results(self): + return self.get_output_from_symbol('c', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + a = self.get_input_from_symbol('a', ctype_from_precision_t(self.prec)) + b = self.get_input_from_symbol('b', ctype_from_precision_t(self.prec)) + c = self.get_input_from_symbol('c', ctype_from_precision_t(self.prec)) + beta = self.get_input_from_symbol('BETA', 'uint32_t')[0] + m = self.get_input_from_symbol('M', 'uint32_t')[0] + n = self.get_input_from_symbol('N', 'uint32_t')[0] + k = self.get_input_from_symbol('K', 'uint32_t')[0] + tb = self.get_input_from_symbol('TB', 'uint32_t')[0] + a = np.reshape(a, (m, k)) + if tb: + b = np.reshape(b, (n, k)) + b = b.transpose() + else: + b = np.reshape(b, (k, n)) + c = np.reshape(c, (m, n)) + return golden_model(1, a, b, beta, c).flatten() + + def check_results(self, *args): + return super().check_results(*args, atol=self.ERR_THRESHOLD[self.prec]) + + +if __name__ == "__main__": + sys.exit(GemmVerifier().main()) diff --git a/sw/blas/gemm/verify.py b/sw/blas/gemm/verify.py deleted file mode 100755 index 61964efb9..000000000 --- a/sw/blas/gemm/verify.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / "../../../util/sim/")) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = {8: 1e-6, 4: 1e-6, 2: 1e-2, 1: 1e-1} - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate( - sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=["c"], - ) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - prec = elf.from_symbol('dtype_size', 'uint32_t')[0] - a = elf.from_symbol('a', ctype_from_precision_t(prec)) - b = elf.from_symbol('b', ctype_from_precision_t(prec)) - c = elf.from_symbol('c', ctype_from_precision_t(prec)) - beta = elf.from_symbol('BETA', 'uint32_t')[0] - m = elf.from_symbol('M', 'uint32_t')[0] - n = elf.from_symbol('N', 'uint32_t')[0] - k = elf.from_symbol('K', 'uint32_t')[0] - tb = elf.from_symbol('TB', 'uint32_t')[0] - a = np.reshape(a, (m, k)) - if tb: - b = np.reshape(b, (n, k)) - b = b.transpose() - else: - b = np.reshape(b, (k, n)) - c = np.reshape(c, (m, n)) - - # Verify results - c_actual = from_buffer(raw_results['c'], ctype_from_precision_t(prec)) - c_golden = golden_model(1, a, b, beta, c).flatten() - - absolute_err = np.absolute(c_golden - c_actual) - fail = np.any(absolute_err > ERR_THRESHOLD[prec]) - if (fail or args.dump_results): - print('Simulation results are incorrect.') - verification.dump_results_to_csv([c_golden, c_actual, absolute_err], - Path.cwd() / 'results.csv') - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/dnn/batchnorm/data/datagen.py b/sw/dnn/batchnorm/scripts/datagen.py similarity index 100% rename from sw/dnn/batchnorm/data/datagen.py rename to sw/dnn/batchnorm/scripts/datagen.py diff --git a/sw/dnn/common.mk b/sw/dnn/common.mk index c2f874bb6..3e1242f8b 100644 --- a/sw/dnn/common.mk +++ b/sw/dnn/common.mk @@ -8,6 +8,7 @@ MK_DIR := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) DATA_DIR := $(realpath $(MK_DIR)/$(APP)/data) +SCRIPTS_DIR := $(realpath $(MK_DIR)/$(APP)/scripts) SRC_DIR := $(realpath $(MK_DIR)/$(APP)/src) COMMON_SRC_DIR := $(realpath $(MK_DIR)/src) @@ -17,7 +18,7 @@ SECTION ?= SRCS ?= $(realpath $(SRC_DIR)/main.c) INCDIRS ?= $(DATA_DIR) $(SRC_DIR) $(COMMON_SRC_DIR) -DATAGEN_PY := $(DATA_DIR)/datagen.py +DATAGEN_PY := $(SCRIPTS_DIR)/datagen.py DATA_H := $(DATA_DIR)/data.h $(DATA_H): $(DATAGEN_PY) $(DATA_CFG) diff --git a/sw/dnn/concat/data/datagen.py b/sw/dnn/concat/scripts/datagen.py similarity index 100% rename from sw/dnn/concat/data/datagen.py rename to sw/dnn/concat/scripts/datagen.py diff --git a/sw/dnn/concat/scripts/verify.py b/sw/dnn/concat/scripts/verify.py new file mode 100755 index 000000000..b5fdc9e83 --- /dev/null +++ b/sw/dnn/concat/scripts/verify.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +import torch +from pathlib import Path +from datagen import golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class ConcatVerifier(Verifier): + + OUTPUT_UIDS = ['output'] + + def __init__(self): + super().__init__() + self.layer_struct = { + 'num_inputs': 'I', + 'height': 'I', + 'width': 'I', + 'inputs': 'I', + 'output': 'I', + 'dtype': 'I' + } + self.layer = self.get_input_from_symbol('layer', self.layer_struct) + self.num_inputs = self.layer['num_inputs'] + self.input_shape = [self.layer['height'], self.layer['width']] + self.prec = self.layer['dtype'] + + def get_actual_results(self): + return self.get_output_from_symbol('output', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + inputs = [self.get_input_from_symbol(f'input_{i}', ctype_from_precision_t(self.prec)) + for i in range(self.num_inputs)] + inputs = [torch.from_numpy(tensor.reshape(self.input_shape)) for tensor in inputs] + return golden_model(inputs).detach().numpy().flatten() + + def check_results(self, *args): + return super().check_results(*args, rtol=1E-6) + + +if __name__ == "__main__": + sys.exit(ConcatVerifier().main()) diff --git a/sw/dnn/concat/verify.py b/sw/dnn/concat/verify.py deleted file mode 100755 index e14877e34..000000000 --- a/sw/dnn/concat/verify.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -import torch -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = 1E-6 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['output']) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - - layer_struct = { - 'num_inputs': 'I', - 'height': 'I', - 'width': 'I', - 'inputs': 'I', - 'output': 'I', - 'dtype': 'I' - } - layer = elf.from_symbol('layer', layer_struct) - num_inputs = layer['num_inputs'] - input_shape = [layer['height'], layer['width']] - inputs = layer['inputs'] - prec = layer['dtype'] - - inputs = [elf.from_symbol(f'input_{i}', ctype_from_precision_t(prec)) - for i in range(num_inputs)] - inputs = [torch.from_numpy(tensor.reshape(input_shape)) for tensor in inputs] - - # Verify results - output_actual = from_buffer(raw_results['output'], ctype_from_precision_t(prec)) - output_golden = golden_model(inputs).detach().numpy().flatten() - - relative_err = np.absolute((output_golden - output_actual) / output_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - verification.dump_results_to_csv([output_golden, output_actual, relative_err], - Path.cwd() / 'concat_results.csv') - print('Maximum relative error:', np.max(relative_err)) - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/dnn/conv2d/data/datagen.py b/sw/dnn/conv2d/scripts/datagen.py similarity index 100% rename from sw/dnn/conv2d/data/datagen.py rename to sw/dnn/conv2d/scripts/datagen.py diff --git a/sw/dnn/conv2d/verify.py b/sw/dnn/conv2d/scripts/verify.py similarity index 84% rename from sw/dnn/conv2d/verify.py rename to sw/dnn/conv2d/scripts/verify.py index 7e60a3cd0..92a8b167f 100755 --- a/sw/dnn/conv2d/verify.py +++ b/sw/dnn/conv2d/scripts/verify.py @@ -11,10 +11,10 @@ import torch from data.datagen import golden_model -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) import verification # noqa: E402 from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 +from data_utils import from_buffer, ctype_from_precision_t, check_result # noqa: E402 ERR_THRESHOLD = 1E-6 @@ -74,13 +74,12 @@ def main(): output_golden = golden_model(inputs, filters, padding=1, stride=1) output_golden = output_golden.detach().numpy().flatten() - relative_err = np.absolute((output_golden - output_actual) / output_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): + fail, rel_err = check_result(output_golden, output_actual, rtol=ERR_THRESHOLD) + if fail: verification.dump_results_to_csv( - [output_golden, output_actual, relative_err], + [output_golden, output_actual, rel_err], Path.cwd() / 'results.csv') - print('Maximum relative error:', np.max(relative_err)) + print('Maximum relative error:', np.max(rel_err)) return int(fail) diff --git a/sw/dnn/flashattention_2/data/datagen.py b/sw/dnn/flashattention_2/scripts/datagen.py similarity index 100% rename from sw/dnn/flashattention_2/data/datagen.py rename to sw/dnn/flashattention_2/scripts/datagen.py diff --git a/sw/dnn/flashattention_2/scripts/verify.py b/sw/dnn/flashattention_2/scripts/verify.py new file mode 100755 index 000000000..467c91346 --- /dev/null +++ b/sw/dnn/flashattention_2/scripts/verify.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +import torch +from pathlib import Path +from datagen import exact_golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class FlashAttention2Verifier(Verifier): + + OUTPUT_UIDS = ['O'] + + def __init__(self): + super().__init__() + self.layer_struct = { + 'N': 'I', + 'd': 'I', + 'B_r': 'I', + 'B_c': 'I', + 'Q': 'I', + 'K': 'I', + 'V': 'I', + 'O': 'I', + 'dtype': 'I', + 'baseline': 'I' + } + self.layer = self.get_input_from_symbol('layer', self.layer_struct) + self.N = self.layer['N'] + self.d = self.layer['d'] + self.B_r = self.layer['B_r'] + self.B_c = self.layer['B_c'] + self.prec = self.layer['dtype'] + + def get_actual_results(self): + return self.get_output_from_symbol('O', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + Q = self.get_input_from_symbol('Q', ctype_from_precision_t(self.prec)) + K = self.get_input_from_symbol('K', ctype_from_precision_t(self.prec)) + V = self.get_input_from_symbol('V', ctype_from_precision_t(self.prec)) + Q = torch.from_numpy(Q.reshape(self.N, self.d)) + V = torch.from_numpy(V.reshape(self.N, self.d)) + # Golden model expects key matrix in (N, d) form, while Snitch binary stores it in (d, N) + K = torch.from_numpy(K.reshape(self.d, self.N)) + K = torch.transpose(K, 0, 1) + # return torch_golden_model(Q, K, V).detach().numpy().flatten() + return exact_golden_model(Q, K, V, self.B_r, self.B_c).flatten() + + def check_results(self, *args): + return super().check_results(*args, rtol=1E-4) + + +if __name__ == "__main__": + sys.exit(FlashAttention2Verifier().main()) diff --git a/sw/dnn/flashattention_2/verify.py b/sw/dnn/flashattention_2/verify.py deleted file mode 100755 index 74248cb5c..000000000 --- a/sw/dnn/flashattention_2/verify.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -import torch -from data.datagen import exact_golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = 1E-4 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['O']) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - - layer_struct = { - 'N': 'I', - 'd': 'I', - 'B_r': 'I', - 'B_c': 'I', - 'Q': 'I', - 'K': 'I', - 'V': 'I', - 'O': 'I', - 'dtype': 'I', - 'baseline': 'I' - } - layer = elf.from_symbol('layer', layer_struct) - N = layer['N'] - d = layer['d'] - B_r = layer['B_r'] - B_c = layer['B_c'] - prec = layer['dtype'] - - Q = elf.from_symbol('Q', ctype_from_precision_t(prec)) - K = elf.from_symbol('K', ctype_from_precision_t(prec)) - V = elf.from_symbol('V', ctype_from_precision_t(prec)) - Q = torch.from_numpy(Q.reshape(N, d)) - V = torch.from_numpy(V.reshape(N, d)) - # Golden model expects key matrix in (N, d) form, while Snitch binary stores it in (d, N) - K = torch.from_numpy(K.reshape(d, N)) - K = torch.transpose(K, 0, 1) - - # Verify results - O_actual = from_buffer(raw_results['O'], ctype_from_precision_t(prec)) - O_golden = exact_golden_model(Q, K, V, B_r, B_c).flatten() - # O_golden = torch_golden_model(Q, K, V).detach().numpy().flatten() - - relative_err = np.absolute((O_golden - O_actual) / O_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - verification.dump_results_to_csv([O_golden, O_actual, relative_err], - Path.cwd() / 'flashattention_2_results.csv') - print('Maximum relative error:', np.max(relative_err)) - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/dnn/fused_concat_linear/data/datagen.py b/sw/dnn/fused_concat_linear/scripts/datagen.py similarity index 100% rename from sw/dnn/fused_concat_linear/data/datagen.py rename to sw/dnn/fused_concat_linear/scripts/datagen.py diff --git a/sw/dnn/fused_concat_linear/scripts/verify.py b/sw/dnn/fused_concat_linear/scripts/verify.py new file mode 100755 index 000000000..6f55bd7d9 --- /dev/null +++ b/sw/dnn/fused_concat_linear/scripts/verify.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +import torch +from pathlib import Path +from datagen import golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class FusedConcatLinearVerifier(Verifier): + + OUTPUT_UIDS = ['linear_output'] + + def __init__(self): + super().__init__() + self.layer_struct = { + 'num_inputs': 'I', + 'in_height': 'I', + 'in_width': 'I', + 'out_height': 'I', + 'out_width': 'I', + 'inputs': 'I', + 'weights': 'I', + 'concat_output': 'I', + 'linear_output': 'I', + 'dtype': 'I', + 'baseline': 'I' + } + self.layer = self.get_input_from_symbol('layer', self.layer_struct) + self.num_inputs = self.layer['num_inputs'] + self.input_shape = [self.layer['in_height'], self.layer['in_width']] + self.weights_shape = [self.layer['in_width']*self.num_inputs, self.layer['out_width']] + self.prec = self.layer['dtype'] + + def get_actual_results(self): + return self.get_output_from_symbol('linear_output', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + inputs = [self.get_input_from_symbol(f'input_{i}', ctype_from_precision_t(self.prec)) + for i in range(self.num_inputs)] + inputs = [torch.from_numpy(tensor.reshape(self.input_shape)) for tensor in inputs] + weights = self.get_input_from_symbol('weights', ctype_from_precision_t(self.prec)) + weights = torch.from_numpy(weights.reshape(self.weights_shape)) + output_golden, _ = golden_model(inputs, weights) + return output_golden.detach().numpy().flatten() + + def check_results(self, *args): + return super().check_results(*args, rtol=1E-6) + + +if __name__ == "__main__": + sys.exit(FusedConcatLinearVerifier().main()) diff --git a/sw/dnn/fused_concat_linear/verify.py b/sw/dnn/fused_concat_linear/verify.py deleted file mode 100755 index 382c18e4d..000000000 --- a/sw/dnn/fused_concat_linear/verify.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -import torch -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = 1E-6 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['linear_output']) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - - layer_struct = { - 'num_inputs': 'I', - 'in_height': 'I', - 'in_width': 'I', - 'out_height': 'I', - 'out_width': 'I', - 'inputs': 'I', - 'weights': 'I', - 'concat_output': 'I', - 'linear_output': 'I', - 'dtype': 'I', - 'baseline': 'I' - } - layer = elf.from_symbol('layer', layer_struct) - num_inputs = layer['num_inputs'] - input_shape = [layer['in_height'], layer['in_width']] - weights_shape = [layer['in_width']*num_inputs, layer['out_width']] - prec = layer['dtype'] - - inputs = [elf.from_symbol(f'input_{i}', ctype_from_precision_t(prec)) - for i in range(num_inputs)] - inputs = [torch.from_numpy(tensor.reshape(input_shape)) for tensor in inputs] - weights = elf.from_symbol('weights', ctype_from_precision_t(prec)) - weights = torch.from_numpy(weights.reshape(weights_shape)) - - # Verify results - output_actual = from_buffer(raw_results['linear_output'], ctype_from_precision_t(prec)) - output_golden, _ = golden_model(inputs, weights) - output_golden = output_golden.detach().numpy().flatten() - - relative_err = np.absolute((output_golden - output_actual) / output_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): - verification.dump_results_to_csv([output_golden, output_actual, relative_err], - Path.cwd() / 'results.csv') - print('Maximum relative error:', np.max(relative_err)) - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/dnn/fusedconv/data/datagen.py b/sw/dnn/fusedconv/scripts/datagen.py similarity index 100% rename from sw/dnn/fusedconv/data/datagen.py rename to sw/dnn/fusedconv/scripts/datagen.py diff --git a/sw/dnn/fusedconv/verify.py b/sw/dnn/fusedconv/scripts/verify.py similarity index 81% rename from sw/dnn/fusedconv/verify.py rename to sw/dnn/fusedconv/scripts/verify.py index e013896f7..ef37a4178 100755 --- a/sw/dnn/fusedconv/verify.py +++ b/sw/dnn/fusedconv/scripts/verify.py @@ -11,10 +11,10 @@ import torch from data.datagen import golden_model -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) import verification # noqa: E402 from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 +from data_utils import from_buffer, ctype_from_precision_t, check_result # noqa: E402 ERR_THRESHOLD = 1E-6 @@ -107,21 +107,12 @@ def main(): layer['depthwise']) output_golden = output_golden.detach().numpy().flatten() - # relative_err = np.absolute((output_golden - output_actual) / output_golden) - # compute relative error only for non-zero elements - relative_err = np.zeros_like(output_golden) - non_zero = output_golden != 0 - zero_idx = np.where(output_golden == 0) - relative_err[non_zero] = np.absolute((output_golden[non_zero] - output_actual[non_zero]) - / output_golden[non_zero]) - relative_err[zero_idx] = np.absolute(output_golden[zero_idx] - output_actual[zero_idx]) - - fail = np.any(relative_err > ERR_THRESHOLD) - if (fail): + fail, rel_err = check_result(output_golden, output_actual, rtol=ERR_THRESHOLD) + if fail: verification.dump_results_to_csv( - [output_golden, output_actual, relative_err], + [output_golden, output_actual, rel_err], Path.cwd() / 'results.csv') - print('Maximum relative error:', np.max(relative_err)) + print('Maximum relative error:', np.max(rel_err)) return int(fail) diff --git a/sw/dnn/gelu/data/datagen.py b/sw/dnn/gelu/scripts/datagen.py similarity index 97% rename from sw/dnn/gelu/data/datagen.py rename to sw/dnn/gelu/scripts/datagen.py index 2d3f68370..a875288ca 100755 --- a/sw/dnn/gelu/data/datagen.py +++ b/sw/dnn/gelu/scripts/datagen.py @@ -39,8 +39,8 @@ def sigmoid_gelu(x): def golden_model(ifmap): - # gelu = torch.nn.GELU(approximate='tanh') - gelu = sigmoid_gelu + gelu = torch.nn.GELU(approximate='tanh') + # gelu = sigmoid_gelu return gelu(ifmap) diff --git a/sw/dnn/gelu/scripts/verify.py b/sw/dnn/gelu/scripts/verify.py new file mode 100755 index 000000000..a3a3aad99 --- /dev/null +++ b/sw/dnn/gelu/scripts/verify.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +import torch +from pathlib import Path +from datagen import golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class GeluVerifier(Verifier): + + OUTPUT_UIDS = ['ofmap'] + + def __init__(self): + super().__init__() + self.layer_struct = { + 'size': 'I', + 'ifmap': 'I', + 'ofmap': 'I', + 'dtype': 'I' + } + self.layer = self.get_input_from_symbol('layer', self.layer_struct) + self.prec = self.layer['dtype'] + + def get_actual_results(self): + return self.get_output_from_symbol('ofmap', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + ifmap = self.get_input_from_symbol('ifmap', ctype_from_precision_t(self.prec)) + ifmap = torch.from_numpy(ifmap) + return golden_model(ifmap).detach().numpy().flatten() + + def check_results(self, *args): + return super().check_results(*args, rtol=1E-10) + + +if __name__ == "__main__": + sys.exit(GeluVerifier().main()) diff --git a/sw/dnn/gelu/src/gelu.h b/sw/dnn/gelu/src/gelu.h index 5649b4fa6..5947f5a7e 100644 --- a/sw/dnn/gelu/src/gelu.h +++ b/sw/dnn/gelu/src/gelu.h @@ -50,7 +50,8 @@ static inline void gelu_fp64(double *input, double *output, uint32_t size) { if (snrt_is_compute_core()) { for (uint32_t i = 0; i < size; i++) { snrt_mcycle(); - output[i] = sigmoid_gelu_fp64(input[i], -0.2888, -1.769); + // output[i] = sigmoid_gelu_fp64(input[i], -0.2888, -1.769); + output[i] = gelu_activation_fp64(input[i]); } } } diff --git a/sw/dnn/gelu/verify.py b/sw/dnn/gelu/verify.py deleted file mode 100755 index 219991fd8..000000000 --- a/sw/dnn/gelu/verify.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -import torch -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = 1E-0 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['ofmap']) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - - layer_struct = { - 'size': 'I', - 'ifmap': 'I', - 'ofmap': 'I', - 'dtype': 'I' - } - layer = elf.from_symbol('layer', layer_struct) - prec = layer['dtype'] - - ifmap = elf.from_symbol('ifmap', ctype_from_precision_t(prec)) - ifmap = torch.from_numpy(ifmap) - - # Verify results - ofmap_actual = from_buffer(raw_results['ofmap'], ctype_from_precision_t(prec)) - ofmap_golden = golden_model(ifmap).detach().numpy().flatten() - relative_err = np.absolute((ofmap_golden - ofmap_actual) / ofmap_golden) - fail = np.any(relative_err > ERR_THRESHOLD) - - # Print results - if (fail): - verification.dump_results_to_csv([ofmap_golden, ofmap_actual, relative_err], - Path.cwd() / 'gelu_results.csv') - print('Maximum relative error:', np.max(relative_err)) - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/dnn/layernorm/data/datagen.py b/sw/dnn/layernorm/scripts/datagen.py similarity index 100% rename from sw/dnn/layernorm/data/datagen.py rename to sw/dnn/layernorm/scripts/datagen.py diff --git a/sw/dnn/layernorm/scripts/verify.py b/sw/dnn/layernorm/scripts/verify.py new file mode 100755 index 000000000..106e4c6fb --- /dev/null +++ b/sw/dnn/layernorm/scripts/verify.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +import torch +from pathlib import Path +from datagen import golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class LayernormVerifier(Verifier): + + OUTPUT_UIDS = ['ofmap'] + + def __init__(self): + super().__init__() + self.layer_struct = { + 'batch_size': 'I', + 'seq_len': 'I', + 'embeddings': 'I', + 'n_tiles': 'I', + 'baseline': 'I', + 'eps': 'f', + 'ifmap_ptr': 'I', + 'ofmap_ptr': 'I', + 'dtype': 'I' + } + self.layer = self.get_input_from_symbol('layer', self.layer_struct) + self.batch_size = self.layer['batch_size'] + self.seq_len = self.layer['seq_len'] + self.embeddings = self.layer['embeddings'] + self.eps = self.layer['eps'] + self.prec = self.layer['dtype'] + + def get_actual_results(self): + return self.get_output_from_symbol('ofmap', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + ifmap = self.get_input_from_symbol('ifmap', ctype_from_precision_t(self.prec)) + ifmap = ifmap.reshape(self.batch_size, self.seq_len, self.embeddings) + ifmap = torch.from_numpy(ifmap) + return golden_model(ifmap, self.eps, self.embeddings, self.prec).detach().numpy().flatten() + + def check_results(self, *args): + return super().check_results(*args, atol=0.001) + + +if __name__ == "__main__": + sys.exit(LayernormVerifier().main()) diff --git a/sw/dnn/layernorm/verify.py b/sw/dnn/layernorm/verify.py deleted file mode 100755 index 4a9ad5252..000000000 --- a/sw/dnn/layernorm/verify.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -import torch -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = 0.001 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['ofmap']) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - - layer_struct = { - 'batch_size': 'I', - 'seq_len': 'I', - 'embeddings': 'I', - 'n_tiles': 'I', - 'baseline': 'I', - 'eps': 'f', - 'ifmap_ptr': 'I', - 'ofmap_ptr': 'I', - 'dtype': 'I' - } - layer = elf.from_symbol('layer', layer_struct) - batch_size = layer['batch_size'] - seq_len = layer['seq_len'] - embeddings = layer['embeddings'] - eps = layer['eps'] - prec = layer['dtype'] - - ifmap = elf.from_symbol('ifmap', ctype_from_precision_t(prec)) - ifmap = ifmap.reshape(batch_size, seq_len, embeddings) - ifmap = torch.from_numpy(ifmap) - - # Verify results - ofmap_actual = from_buffer(raw_results['ofmap'], ctype_from_precision_t(prec)) - ofmap_golden = golden_model(ifmap, eps, embeddings, prec).detach().numpy().flatten() - - absolute_err = np.absolute(ofmap_golden - ofmap_actual) - fail = np.any(absolute_err > ERR_THRESHOLD) - if (fail): - verification.dump_results_to_csv([ofmap_golden, ofmap_actual, absolute_err], - Path.cwd() / 'layernorm_results.csv') - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/sw/dnn/maxpool/data/datagen.py b/sw/dnn/maxpool/scripts/datagen.py similarity index 100% rename from sw/dnn/maxpool/data/datagen.py rename to sw/dnn/maxpool/scripts/datagen.py diff --git a/sw/dnn/softmax/data/datagen.py b/sw/dnn/softmax/scripts/datagen.py similarity index 100% rename from sw/dnn/softmax/data/datagen.py rename to sw/dnn/softmax/scripts/datagen.py diff --git a/sw/dnn/softmax/scripts/verify.py b/sw/dnn/softmax/scripts/verify.py new file mode 100755 index 000000000..0ad1df653 --- /dev/null +++ b/sw/dnn/softmax/scripts/verify.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Luca Colagrande + +import sys +import torch +from pathlib import Path +from datagen import golden_model + +sys.path.append(str(Path(__file__).parent / '../../../../util/sim/')) +from verif_utils import Verifier # noqa: E402 +from data_utils import ctype_from_precision_t # noqa: E402 + + +class SoftmaxVerifier(Verifier): + + OUTPUT_UIDS = ['ofmap'] + + def __init__(self): + super().__init__() + self.layer_struct = { + 'batch_size': 'I', + 'seq_len': 'I', + 'input_samples': 'I', + 'reduce_dim': 'i', + 'ifmap_ptr': 'I', + 'ofmap_ptr': 'I', + 'dtype': 'I' + } + self.layer = self.get_input_from_symbol('layer', self.layer_struct) + self.batch_size = self.layer['batch_size'] + self.seq_len = self.layer['seq_len'] + self.input_samples = self.layer['input_samples'] + self.reduce_dim = self.layer['reduce_dim'] + self.prec = self.layer['dtype'] + + def get_actual_results(self): + return self.get_output_from_symbol('ofmap', ctype_from_precision_t(self.prec)) + + def get_expected_results(self): + ifmap = self.get_input_from_symbol('ifmap', ctype_from_precision_t(self.prec)) + ifmap = ifmap.reshape(self.batch_size, self.seq_len, self.input_samples) + ifmap = torch.from_numpy(ifmap) + return golden_model(ifmap, self.reduce_dim).detach().numpy().flatten() + + def check_results(self, *args): + return super().check_results(*args, atol=0.003) + + +if __name__ == "__main__": + sys.exit(SoftmaxVerifier().main()) diff --git a/sw/dnn/softmax/verify.py b/sw/dnn/softmax/verify.py deleted file mode 100755 index f2be1102b..000000000 --- a/sw/dnn/softmax/verify.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Luca Colagrande - -import sys -from pathlib import Path -import numpy as np -import torch -from data.datagen import golden_model - -sys.path.append(str(Path(__file__).parent / '../../../util/sim/')) -import verification # noqa: E402 -from elf import Elf # noqa: E402 -from data_utils import from_buffer, ctype_from_precision_t # noqa: E402 - - -ERR_THRESHOLD = 0.003 - - -def main(): - # Run simulation and get outputs - args = verification.parse_args() - raw_results = verification.simulate(sim_bin=args.sim_bin, - snitch_bin=args.snitch_bin, - symbols_bin=args.symbols_bin, - log=args.log, - output_uids=['ofmap']) - - # Extract input operands from ELF file - if args.symbols_bin: - elf = Elf(args.symbols_bin) - else: - elf = Elf(args.snitch_bin) - - layer_struct = { - 'batch_size': 'I', - 'seq_len': 'I', - 'input_samples': 'I', - 'reduce_dim': 'i', - 'ifmap_ptr': 'I', - 'ofmap_ptr': 'I', - 'dtype': 'I' - } - layer = elf.from_symbol('layer', layer_struct) - batch_size = layer['batch_size'] - seq_len = layer['seq_len'] - input_samples = layer['input_samples'] - reduce_dim = layer['reduce_dim'] - prec = layer['dtype'] - - ifmap = elf.from_symbol('ifmap', ctype_from_precision_t(prec)) - ifmap = ifmap.reshape(batch_size, seq_len, input_samples) - ifmap = torch.from_numpy(ifmap) - - # Verify results - ofmap_actual = from_buffer(raw_results['ofmap'], ctype_from_precision_t(prec)) - ofmap_golden = golden_model(ifmap, reduce_dim).detach().numpy().flatten() - - absolute_err = np.absolute(ofmap_golden - ofmap_actual) - fail = np.any(absolute_err > ERR_THRESHOLD) - if (fail): - verification.dump_results_to_csv([ofmap_golden, ofmap_actual, absolute_err], - Path.cwd() / 'softmax_results.csv') - - return int(fail) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/target/snitch_cluster/sw/apps/blas/gemm/test/run.py b/target/snitch_cluster/sw/apps/blas/gemm/test/run.py index 48bc9a6f9..9f958474e 100755 --- a/target/snitch_cluster/sw/apps/blas/gemm/test/run.py +++ b/target/snitch_cluster/sw/apps/blas/gemm/test/run.py @@ -52,7 +52,7 @@ def build_test(cfg): 'elf': f'build/{cfg.stem}/gemm.elf', 'name': f'gemm-{cfg.stem}', 'cmd': [ - '../../../../../../../sw/blas/gemm/verify.py', + '../../../../../../../sw/blas/gemm/scripts/verify.py', "${sim_bin}", "${elf}", '--dump-results' diff --git a/target/snitch_cluster/sw/fdiv.yaml b/target/snitch_cluster/sw/fdiv.yaml index bf9c1367c..a8b5f3930 100644 --- a/target/snitch_cluster/sw/fdiv.yaml +++ b/target/snitch_cluster/sw/fdiv.yaml @@ -4,13 +4,13 @@ runs: - elf: apps/dnn/softmax/build/softmax.elf - cmd: [../../../sw/dnn/softmax/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/dnn/softmax/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/dnn/layernorm/build/layernorm.elf - cmd: [../../../sw/dnn/layernorm/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/dnn/layernorm/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/dnn/gelu/build/gelu.elf - cmd: [../../../sw/dnn/gelu/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/dnn/gelu/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/dnn/flashattention_2/build/flashattention_2.elf - cmd: [../../../sw/dnn/flashattention_2/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/dnn/flashattention_2/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/correlation/build/correlation.elf cmd: [../../../sw/apps/correlation/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/covariance/build/covariance.elf diff --git a/target/snitch_cluster/sw/run.yaml b/target/snitch_cluster/sw/run.yaml index adb33946c..e5f07c731 100644 --- a/target/snitch_cluster/sw/run.yaml +++ b/target/snitch_cluster/sw/run.yaml @@ -75,19 +75,19 @@ runs: - elf: tests/build/caq_frep.elf simulators: [vsim, vcs, verilator] # banshee does not model FREP timing - elf: apps/blas/axpy/build/axpy.elf - cmd: [../../../sw/blas/axpy/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/blas/axpy/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/blas/gemm/build/gemm.elf - cmd: [../../../sw/blas/gemm/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/blas/gemm/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/dnn/batchnorm/build/batchnorm.elf - elf: apps/dnn/maxpool/build/maxpool.elf # - elf: apps/dnn/conv2d/build/conv2d.elf # Fails with wrong results - # cmd: [../../../sw/dnn/conv2d/verify.py, "${sim_bin}", "${elf}"] + # cmd: [../../../sw/dnn/conv2d/scripts/verify.py, "${sim_bin}", "${elf}"] # - elf: apps/dnn/fusedconv/build/fusedconv.elf # Fails with wrong results - # cmd: [../../../sw/dnn/fusedconv/verify.py, "${sim_bin}", "${elf}"] + # cmd: [../../../sw/dnn/fusedconv/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/dnn/concat/build/concat.elf - cmd: [../../../sw/dnn/concat/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/dnn/concat/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/dnn/fused_concat_linear/build/fused_concat_linear.elf - cmd: [../../../sw/dnn/fused_concat_linear/verify.py, "${sim_bin}", "${elf}"] + cmd: [../../../sw/dnn/fused_concat_linear/scripts/verify.py, "${sim_bin}", "${elf}"] - elf: apps/montecarlo/pi_estimation/build/pi_estimation.elf - elf: apps/atax/build/atax.elf cmd: [../../../sw/apps/atax/scripts/verify.py, "${sim_bin}", "${elf}"] diff --git a/util/sim/elf.py b/util/sim/Elf.py similarity index 65% rename from util/sim/elf.py rename to util/sim/Elf.py index 7a3e02285..0e2f530df 100644 --- a/util/sim/elf.py +++ b/util/sim/Elf.py @@ -3,9 +3,6 @@ # SPDX-License-Identifier: Apache-2.0 # # Luca Colagrande -# -# This class implements a minimal wrapper around pyelftools -# to easily inspect ELF files. from elftools.elf.elffile import ELFFile from elftools.elf.sections import SymbolTableSection @@ -13,8 +10,14 @@ class Elf(object): + """Minimal wrapper around `pyelftools` to easily inspect ELF files.""" def __init__(self, elf_path): + """Default constructor. + + Arguments: + elf_path: Path to an ELF binary. + """ self.elf_path = elf_path self.stream = open(self.elf_path, 'rb') self.elf = ELFFile(self.stream) @@ -27,14 +30,29 @@ def __init__(self, elf_path): self.symtab = section def get_symbol_address(self, uid): + """Returns the address of a global symbol. + + Arguments: + uid: A global symbol. + """ symbol = self.symtab.get_symbol_by_name(uid)[0] return symbol.entry["st_value"] def get_symbol_size(self, uid): + """Returns the size of a global symbol. + + Arguments: + uid: A global symbol. + """ symbol = self.symtab.get_symbol_by_name(uid)[0] return symbol.entry["st_size"] def get_raw_symbol_contents(self, uid): + """Returns a bytearray with the contents of a global symbol. + + Arguments: + uid: A global symbol. + """ addr = self.get_symbol_address(uid) size = self.get_symbol_size(uid) try: @@ -51,4 +69,15 @@ def get_raw_symbol_contents(self, uid): return contents def from_symbol(self, uid, ctype): + """Returns an array with the contents of a global symbol. + + The array is formatted from the raw byte contents returned by + [`get_raw_symbol_contents()`][Elf.Elf.get_raw_symbol_contents] + using the [`from_buffer()`][data_utils.from_buffer] function. + + Arguments: + uid: A global symbol. + ctype: C type identifier passed on to the + [`from_buffer()`][data_utils.from_buffer] function. + """ return from_buffer(self.get_raw_symbol_contents(uid), ctype) diff --git a/util/sim/data_utils.py b/util/sim/data_utils.py index 148b16286..93a437047 100644 --- a/util/sim/data_utils.py +++ b/util/sim/data_utils.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # # Author: Luca Colagrande -"""Convenience functions for data generation scripts.""" +"""Convenience functions and classes for data generation scripts.""" import argparse @@ -247,7 +247,7 @@ def parse_args(self): """Parse default data generation script arguments. Returns the arguments passed to the data generation script, parsed - using the by `parser()` method. + using the `parser()` method. """ return self.parser().parse_args() diff --git a/util/sim/verif_utils.py b/util/sim/verif_utils.py new file mode 100644 index 000000000..524cdfc6f --- /dev/null +++ b/util/sim/verif_utils.py @@ -0,0 +1,265 @@ +# Copyright 2023 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Bettina Lory +# Luca Colagrande +"""Convenience functions and classes for verification scripts.""" + + +import sys +import argparse +import numpy as np +import csv +from Elf import Elf +from pathlib import Path +from data_utils import flatten, from_buffer + +sys.path.append(str(Path(__file__).parent / '../../target/common/test/')) +from SnitchSim import SnitchSim # noqa: E402 + + +def dump_results_to_csv(results, path): + """Dumps a set of arrays to a CSV file. + + Takes a set of arrays (of the same shape or at least, same flattened + size), flattens them, and dumps them to a CSV file, with each array + mapped to a different column. + + Args: + results: A set of arrays to display side-by-side in the output + CSV. + path: Path of the output CSV file. + """ + # Flatten and zip arrays + flattened = [flatten(arr) for arr in results] + zipped = np.column_stack(flattened) + # Write row-by-row to CSV file + with open(path, 'w') as csv_file: + csv_writer = csv.writer(csv_file) + for row in zipped: + csv_writer.writerow(row) + # Print path where results were written + print(f"Wrote results to {path}") + + +class Verifier: + """Base verifier class. + + Base verifier class which can be inherited to easily develop a + custom verification script for any kernel. + + Subclasses must override the `OUTPUT_UIDS` attribute and the + [`get_actual_results()`][verif_utils.Verifier.get_actual_results], + [`get_expected_results()`][verif_utils.Verifier.get_expected_results] + and [`check_results()`][verif_utils.Verifier.check_results] methods, + at a minimum. With respect to the latter, it suffices to invoke the + parent method with an explicit value for `rtol` or `atol`. + + Attributes: + OUTPUT_UIDS: A list of global symbols representing outputs of the + simulation. See + [`simulate()`][verif_utils.Verifier.simulate] method + for details on its use. + """ + + OUTPUT_UIDS = [None] + + def parser(self): + """Default argument parser for verification scripts. + + It is an instance of the `ArgumentParser` class from the `argparse` + module. Subclasses can extend this and add custom arguments via + the parser's `add_argument()` method. + """ + parser = argparse.ArgumentParser(allow_abbrev=True) + parser.add_argument( + 'sim_bin', + help='The simulator binary to be used to start the simulation', + ) + parser.add_argument( + 'snitch_bin', + help='The Snitch binary to be executed by the simulated Snitch hardware') + parser.add_argument( + '--symbols-bin', + help='An optional binary containing the I/O symbols. By default, ' + 'these are searched for in snitch_bin. This argument serves as an ' + 'alternative.') + parser.add_argument( + '--log', + help='Redirect simulation output to this log file') + parser.add_argument( + '--dump-results', + action='store_true', + help='Dump results even if the simulation does not fail') + return parser + + def parse_args(self): + """Parse default verification script arguments. + + Returns: + args: The arguments passed to the verification script, parsed + using the [`parser()`][verif_utils.Verifier.parser] + method. + """ + return self.parser().parse_args() + + def __init__(self): + """Default constructor. + + Parses command-line arguments using the + [`parse_args()`][verif_utils.Verifier.parse_args] method and + stores them in `self.args` for use by other methods. + """ + self.args = self.parse_args() + + def simulate(self): + """Launch simulation and retrieve results. + + Spawns a subprocess to simulate the `snitch_bin` binary, using a + command of the form `sim_bin snitch_bin `. It + communicates with the simulation using inter-process communication + (IPC) facilities, to poll the program for termination and retrieve + the memory contents where the results of the simulation are + stored. The results of the simulation must have global symbols + associated in `snitch_bin` in order to retrieve their address and + size in memory from `snitch_bin`. Alternatively, if `symbols_bin` + is given, the global symbols will be looked up in `symbols_bin`. + + It populates a dictionary, mapping UIDs, as listed in + `OUTPUT_UIDS`, to their memory contents at the end of the + simulation, formatted as raw bytes. The dictionary is stored in + the `self.raw_outputs` attribute. + """ + # Open ELF file for processing + elf = Elf(self.args.snitch_bin) + + # Start simulation + sim = SnitchSim(self.args.sim_bin, self.args.snitch_bin, log=self.args.log) + sim.start() + + # Wait for kernel execution to be over + tohost = elf.get_symbol_address('tohost') + sim.poll(tohost, 1, 0) + + # Read out results from memory + if self.args.symbols_bin: + elf = Elf(self.args.symbols_bin) + self.raw_outputs = {} + for uid in self.OUTPUT_UIDS: + address = elf.get_symbol_address(uid) + size = elf.get_symbol_size(uid) + self.raw_outputs[uid] = sim.read(address, size) + + # Terminate + sim.finish(wait_for_sim=True) + + def check_results(self, actual, expected, atol=None, rtol=None): + """Check if the actual results are within the expected range. + + Args: + expected: The expected results. + actual: The actual results. + atol: Absolute tolerance. The maximum absolute difference + between the expected and actual results. Mutually + exclusive with `rtol`. + rtol: Relative tolerance. The maximum relative difference + between the expected and actual results. Mutually + exclusive with `atol`. + + Returns: + retcode: An exit code representing the status of the + simulation: 1 if the simulation results do not match the + expected results, 0 otherwise. + """ + # Calculate absolute or relative error + if atol is not None and rtol is not None: + raise ValueError('atol and rtol are mutually exclusive.') + if atol is not None: + success = np.allclose(expected, actual, atol=atol, rtol=0, equal_nan=False) + error = np.abs(expected - actual) + err_type = "absolute" + elif rtol is not None: + success = np.allclose(expected, actual, atol=0, rtol=rtol, equal_nan=False) + scale = np.maximum(np.abs(expected), np.abs(actual)) + scale[scale == 0] = 1 # Avoid division by zero, use absolute tolerance instead + error = np.abs(expected - actual) / scale + err_type = "relative" + else: + raise ValueError('Either atol or rtol must be specified.') + + # Dump results on failure or if requested + if not success or self.args.dump_results: + dump_results_to_csv([expected, actual, error], Path.cwd() / 'results.csv') + print(f'Maximum {err_type} error: {np.max(error)}') + + # Return exit code + return int(not success) + + def get_actual_results(self): + """Get actual simulation results. + + Subclasses should override this method to return the simulation + results in a format comparable to the expected results. + """ + pass + + def get_expected_results(self): + """Get expected simulation results. + + Subclasses should override this method to return the expected + results from the simulation. + """ + pass + + def get_input_from_symbol(self, *args): + """Get the value of an input variable from its global symbol. + + Retrieves the contents of a global symbol from `snitch_bin`. + Alternatively, if `symbols_bin` is provided, it looks up the + symbol contents from there. + + Args: + args: Positional arguments accepted by the + [`from_symbol()`][Elf.Elf.from_symbol] method. + """ + if self.args.symbols_bin: + elf = Elf(self.args.symbols_bin) + else: + elf = Elf(self.args.snitch_bin) + return elf.from_symbol(*args) + + def get_output_from_symbol(self, uid, ctype): + """Get the value of an output variable from its global symbol. + + Retrieves the value of a global symbol from the memory contents + at the end of the simulation. + + Note: + Calling this method before a call to the + [`simulate()`][verif_utils.Verifier.simulate] method results in + undefined behaviour. + + Args: + uid: Global symbol to look up in memory. + ctype: C type specifier used to interpret the data in memory. + """ + return from_buffer(self.raw_outputs[uid], ctype) + + def main(self): + """Default main function for data generation scripts.""" + # Run simulation + self.simulate() + + # Get actual and expected results + actual_results = self.get_actual_results() + expected_results = self.get_expected_results() + + # Compare actual and expected results + retcode = self.check_results(actual_results, expected_results) + + # Verify that overriden check_results() method returns an exit code + if retcode is not None: + return retcode + else: + raise ValueError('check_results() method must return an exit code') diff --git a/util/sim/verification.py b/util/sim/verification.py deleted file mode 100644 index 5e36ee0a2..000000000 --- a/util/sim/verification.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2023 ETH Zurich and University of Bologna. -# Licensed under the Apache License, Version 2.0, see LICENSE for details. -# SPDX-License-Identifier: Apache-2.0 -# -# Bettina Lory -# Luca Colagrande - -import sys -import argparse -import numpy as np -import csv -from elf import Elf -from pathlib import Path -from data_utils import flatten - -sys.path.append(str(Path(__file__).parent / '../../target/common/test/')) -from SnitchSim import SnitchSim # noqa: E402 - - -def parse_args(): - # Argument parsing - parser = argparse.ArgumentParser(allow_abbrev=True) - parser.add_argument( - 'sim_bin', - help='The simulator binary to be used to start the simulation', - ) - parser.add_argument( - 'snitch_bin', - help='The Snitch binary to be executed by the simulated Snitch hardware') - parser.add_argument( - '--symbols-bin', - help='An optional binary containing the I/O symbols. By default, ' - 'these are searched for in snitch_bin. This argument serves as an ' - 'alternative.') - parser.add_argument( - '--log', - help='Redirect simulation output to this log file') - parser.add_argument( - '--dump-results', - action='store_true', - help='Dump results even if the simulation does not fail') - return parser.parse_args() - - -def simulate(sim_bin, snitch_bin, log, output_uids, symbols_bin=None): - # Open ELF file for processing - elf = Elf(snitch_bin) - - # Start simulation - sim = SnitchSim(sim_bin, snitch_bin, log=log) - sim.start() - - # Wait for kernel execution to be over - tohost = elf.get_symbol_address('tohost') - sim.poll(tohost, 1, 0) - - # Read out results from memory - if symbols_bin: - elf = Elf(symbols_bin) - raw_outputs = {} - for uid in output_uids: - address = elf.get_symbol_address(uid) - size = elf.get_symbol_size(uid) - raw_outputs[uid] = sim.read(address, size) - - # Terminate - sim.finish(wait_for_sim=True) - - return raw_outputs - - -def dump_results_to_csv(results, path): - """Dumps a set of arrays to a CSV file. - - Takes a set of arrays (of the same shape or at least, same flattened - size), flattens them, and dumps them to a CSV file, with each array - mapped to a different column. - - Args: - results: A set of arrays to display side-by-side in the output - CSV. - path: Path of the output CSV file. - """ - # Flatten and zip arrays - flattened = [flatten(arr) for arr in results] - zipped = np.column_stack(flattened) - # Write row-by-row to CSV file - with open(path, 'w') as csv_file: - csv_writer = csv.writer(csv_file) - for row in zipped: - csv_writer.writerow(row) - # Print path where results were written - print(f"Wrote results to {path}")