From e91d8e92b59bd27a79d288692ec3b7a287bb23b5 Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Wed, 27 Nov 2024 14:43:09 +0100 Subject: [PATCH 1/9] Add more data generation methods and add the output_signed define --- CHANGELOG.md | 1 + test/NnxTestClasses.py | 64 +++++++++++++++++++++++++++++++++++++----- test/testgen.py | 29 ++++++++++++++++++- 3 files changed, 86 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72ad7dd..120e935 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Added +- choice of data generation method (ones, incremented, or random) - N-EUREKA accelerator support: 3x3, 1x1, and 3x3 depthwise convolution kernels - Support for kernels without normalization and quantization for NE16 - isort check diff --git a/test/NnxTestClasses.py b/test/NnxTestClasses.py index afde0a3..eeaa077 100644 --- a/test/NnxTestClasses.py +++ b/test/NnxTestClasses.py @@ -25,6 +25,7 @@ import numpy.typing as npt import torch from pydantic import BaseModel, PositiveInt, field_validator, model_validator +from enum import Enum from HeaderWriter import HeaderWriter from NeuralEngineFunctionalModel import NeuralEngineFunctionalModel @@ -207,9 +208,49 @@ def _calculate_global_shift( return torch.ceil(torch.log2(s / target_s)).type(torch.int32) @staticmethod - def _random_data(_type: IntegerType, shape: Tuple): + def _generate_random(_type: IntegerType, shape: Tuple): return torch.randint(_type.min, _type.max, size=shape) + @staticmethod + def _generate_ones(_type: IntegerType, shape: Tuple): + _ = _type + return torch.ones(shape, dtype=torch.int64) + + @staticmethod + def _generate_incremented(_type: IntegerType, shape: Tuple): + def incr_generator(): + x = 0 + while True: + yield x + x += 1 + if x > _type.max: + x = 0 + + return ( + torch.from_numpy( + np.fromiter(incr_generator(), count=np.prod(shape), dtype=np.int64) + ) + .reshape((shape[0], shape[2], shape[3], shape[1])) + .permute((0, 3, 1, 2)) + .type(torch.int64) + ) + + class DataGenerationMethod(Enum): + RANDOM = 0 + ONES = 1 + INCREMENTED = 2 + + @staticmethod + def _generate_data( + _type: IntegerType, shape: Tuple, method: NnxTestGenerator.DataGenerationMethod + ): + if method == NnxTestGenerator.DataGenerationMethod.RANDOM: + return NnxTestGenerator._generate_random(_type, shape) + elif method == NnxTestGenerator.DataGenerationMethod.ONES: + return NnxTestGenerator._generate_ones(_type, shape) + elif method == NnxTestGenerator.DataGenerationMethod.INCREMENTED: + return NnxTestGenerator._generate_incremented(_type, shape) + @staticmethod def from_conf( conf: NnxTestConf, @@ -218,6 +259,7 @@ def from_conf( scale: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, global_shift: Optional[torch.Tensor] = None, + data_generation_method: DataGenerationMethod = DataGenerationMethod.RANDOM, verbose: bool = False, ) -> NnxTest: torch.manual_seed(NnxTestGenerator._DEFAULT_SEED) @@ -233,27 +275,33 @@ def from_conf( bias_shape = (1, conf.out_channel, 1, 1) if input is None: - input = NnxTestGenerator._random_data( + input = NnxTestGenerator._generate_data( _type=conf.in_type, shape=input_shape, + method=data_generation_method, ) if weight is None: - weight = NnxTestGenerator._random_data( + weight = NnxTestGenerator._generate_data( _type=conf.weight_type, shape=weight_shape, + method=data_generation_method, ) if conf.has_norm_quant: if scale is None: assert conf.scale_type is not None - scale = NnxTestGenerator._random_data( - conf.scale_type, shape=scale_shape + scale = NnxTestGenerator._generate_data( + conf.scale_type, + shape=scale_shape, + method=data_generation_method, ) if conf.has_bias and bias is None: assert conf.bias_type is not None - bias = NnxTestGenerator._random_data( - conf.bias_type, shape=bias_shape + bias = NnxTestGenerator._generate_data( + conf.bias_type, + shape=bias_shape, + method=data_generation_method, ).type(torch.int32) if global_shift is None: global_shift = torch.Tensor([0]).type(torch.int32) @@ -328,6 +376,7 @@ def generate(self, test_name: str, test: NnxTest): # Render output out_ctype = test.conf.out_type.ctype() + out_signed = test.conf.out_type._signed out_data_golden = test.output.permute(0, 2, 3, 1).ravel() self.header_writer.generate_vector_files( "output", @@ -398,6 +447,7 @@ def generate(self, test_name: str, test: NnxTest): "height": out_height, "width": out_width, "channel": out_channel, + "signed": out_signed, "bits": test.conf.out_type._bits, }, "weight": { diff --git a/test/testgen.py b/test/testgen.py index 521aecc..0ae78ea 100644 --- a/test/testgen.py +++ b/test/testgen.py @@ -83,7 +83,16 @@ def test_gen( exit(-1) test_conf = nnxTestConfCls.model_validate(test_conf_dict) - test = NnxTestGenerator.from_conf(test_conf, verbose=args.print_tensors) + + method = NnxTestGenerator.DataGenerationMethod.RANDOM + if args.gen_ones: + method = NnxTestGenerator.DataGenerationMethod.ONES + if args.gen_incremented: + method = NnxTestGenerator.DataGenerationMethod.INCREMENTED + + test = NnxTestGenerator.from_conf( + test_conf, data_generation_method=method, verbose=args.print_tensors + ) if not args.skip_save: test.save(args.test_dir) if args.headers: @@ -189,6 +198,20 @@ def add_common_arguments(parser: argparse.ArgumentParser): dest="print_tensors", help="Print tensor values to stdout.", ) +parser_test.add_argument( + "--gen-ones", + action="store_true", + default=False, + dest="gen_ones", + help="Generate all ones for input tensors, useful for testing arithmetic issues.", +) +parser_test.add_argument( + "--gen-incremented", + action="store_true", + default=False, + dest="gen_incremented", + help="Generate incremented values for input tensors, useful for testing tensor load issues.", +) add_common_arguments(parser_test) parser_test.set_defaults(func=test_gen) @@ -212,6 +235,10 @@ def add_common_arguments(parser: argparse.ArgumentParser): args = parser.parse_args() +assert not ( + args.gen_ones and args.gen_incremented +), "You can choose only one method for input generation." + if args.accelerator == "ne16": nnxMemoryLayoutCls = Ne16MemoryLayout nnxTestConfCls = Ne16TestConf From 46dfd089155686a4740feb22de2d59064e39e7ae Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Tue, 3 Dec 2024 13:17:28 +0100 Subject: [PATCH 2/9] Change MemoryLayout to NnxWeight --- test/{Ne16MemoryLayout.py => Ne16Weight.py} | 37 ++++++++---- ...eurekaMemoryLayout.py => NeurekaWeight.py} | 46 +++++++++++---- test/NnxTestClasses.py | 59 ++++++++++++++----- test/app/src/nnx_layer.c | 6 +- test/conftest.py | 12 ++-- test/test.py | 10 +--- test/testgen.py | 31 +++++----- 7 files changed, 135 insertions(+), 66 deletions(-) rename test/{Ne16MemoryLayout.py => Ne16Weight.py} (77%) rename test/{NeurekaMemoryLayout.py => NeurekaWeight.py} (81%) diff --git a/test/Ne16MemoryLayout.py b/test/Ne16Weight.py similarity index 77% rename from test/Ne16MemoryLayout.py rename to test/Ne16Weight.py index db76fb1..0151ce0 100644 --- a/test/Ne16MemoryLayout.py +++ b/test/Ne16Weight.py @@ -19,12 +19,15 @@ import numpy as np import numpy.typing as npt +from HeaderWriter import HeaderWriter +from NnxTestClasses import NnxWeight, WmemLiteral -class Ne16MemoryLayout: + +class Ne16Weight(NnxWeight): _CIN_SUBTILE = 16 @staticmethod - def weightEncode( + def encode( weight: npt.NDArray[np.uint8], bits: int, depthwise: bool = False ) -> npt.NDArray[np.uint8]: """Unroll weight into expected memory format @@ -39,8 +42,8 @@ def weightEncode( cout, cin, height, width = weight.shape # Pad cin to be divisible with CIN_SUBTILE - if cin % Ne16MemoryLayout._CIN_SUBTILE != 0: - cinPad = Ne16MemoryLayout._CIN_SUBTILE - cin % Ne16MemoryLayout._CIN_SUBTILE + if cin % Ne16Weight._CIN_SUBTILE != 0: + cinPad = Ne16Weight._CIN_SUBTILE - cin % Ne16Weight._CIN_SUBTILE weight = np.pad( weight, ((0, 0), (0, cinPad), (0, 0), (0, 0)), @@ -51,8 +54,8 @@ def weightEncode( # Reshape into (cout, cinMajor, cinMinor, flattened spatial, 1) # The 1 at the end is required by the unpacking - cinMajor = cin // Ne16MemoryLayout._CIN_SUBTILE - cinMinor = Ne16MemoryLayout._CIN_SUBTILE + cinMajor = cin // Ne16Weight._CIN_SUBTILE + cinMinor = Ne16Weight._CIN_SUBTILE weight = weight.reshape(cout, cinMajor, cinMinor, height * width, 1) # Unpack 'bits' bits in little order, e.g. bits=4: 3 => [1, 1, 0, 0] @@ -74,7 +77,7 @@ def weightEncode( return weight.flatten() @staticmethod - def weightDecode( + def decode( weight: npt.NDArray[np.uint8], bits: int, cout: int, @@ -82,9 +85,8 @@ def weightDecode( height: int, width: int, ) -> npt.NDArray[np.uint8]: - """Reverse of weight_roll""" - cinMajor = int(np.ceil(cin / Ne16MemoryLayout._CIN_SUBTILE)) - cinMinor = Ne16MemoryLayout._CIN_SUBTILE + cinMajor = int(np.ceil(cin / Ne16Weight._CIN_SUBTILE)) + cinMinor = Ne16Weight._CIN_SUBTILE cinMinorBytes = int(np.ceil(cinMinor / 8)) weight = weight.reshape(cout, cinMajor, bits, height * width, cinMinorBytes, 1) @@ -96,3 +98,18 @@ def weightDecode( weight = weight[:, :cin, :, :] return weight + + @staticmethod + def source_generate( + wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter + ) -> None: + assert wmem == "tcdm", f"Invalid wmem source provided: {wmem}" + section = "PI_L1" + + header_writer.generate_vector_files( + "weight", + _type="uint8_t", + size=init.size, + init=init, + section=section, + ) diff --git a/test/NeurekaMemoryLayout.py b/test/NeurekaWeight.py similarity index 81% rename from test/NeurekaMemoryLayout.py rename to test/NeurekaWeight.py index a9acb4c..9f9a9cd 100644 --- a/test/NeurekaMemoryLayout.py +++ b/test/NeurekaWeight.py @@ -20,14 +20,17 @@ import numpy as np import numpy.typing as npt +from HeaderWriter import HeaderWriter +from NnxTestClasses import NnxWeight, WmemLiteral -class NeurekaMemoryLayout: + +class NeurekaWeight(NnxWeight): _WEIGHT_BANDWIDTH = 256 _CIN_SUBTILE_1x1 = 32 _CIN_SUBTILE_3x3 = 28 @staticmethod - def weightEncode( + def encode( weight: npt.NDArray[np.uint8], bits: int, depthwise: bool = False ) -> npt.NDArray[np.uint8]: """Unroll weight into expected memory format @@ -43,9 +46,9 @@ def weightEncode( cout, cin, height, width = weight.shape cinSubtile = ( - NeurekaMemoryLayout._CIN_SUBTILE_3x3 + NeurekaWeight._CIN_SUBTILE_3x3 if height == 3 - else NeurekaMemoryLayout._CIN_SUBTILE_1x1 + else NeurekaWeight._CIN_SUBTILE_1x1 ) # Pad cin to be divisible with CIN_SUBTILE @@ -79,7 +82,7 @@ def weightEncode( # (-1, Weight Bandwidth) weight = np.pad( weight, - ((0, 0), (0, NeurekaMemoryLayout._WEIGHT_BANDWIDTH - weight.shape[-1])), + ((0, 0), (0, NeurekaWeight._WEIGHT_BANDWIDTH - weight.shape[-1])), "constant", constant_values=0, ) @@ -102,7 +105,7 @@ def weightEncode( weight = weight.transpose(0, 1, 3, 4, 2, 5) # (-1, Weight Bandwidth) weight = weight.reshape( - cout * cinMajor, NeurekaMemoryLayout._WEIGHT_BANDWIDTH + cout * cinMajor, NeurekaWeight._WEIGHT_BANDWIDTH ) # cout*cinMajor, 256b # Pack bits @@ -116,7 +119,7 @@ def weightEncode( return weight.flatten() @staticmethod - def weightDecode( + def decode( weight: npt.NDArray[np.uint8], bits: int, cout: int, @@ -124,19 +127,19 @@ def weightDecode( height: int, width: int, ) -> npt.NDArray[np.uint8]: - """Reverse of weightEncode""" + """Reverse of encode""" cinSubtile = ( - NeurekaMemoryLayout._CIN_SUBTILE_3x3 + NeurekaWeight._CIN_SUBTILE_3x3 if height == 3 - else NeurekaMemoryLayout._CIN_SUBTILE_1x1 + else NeurekaWeight._CIN_SUBTILE_1x1 ) cinMajor = int(np.ceil(cin / cinSubtile)) cinMinor = cinSubtile - weightBandwidthBytes = int(np.ceil(NeurekaMemoryLayout._WEIGHT_BANDWIDTH / 8)) + weightBandwidthBytes = int(np.ceil(NeurekaWeight._WEIGHT_BANDWIDTH / 8)) weight = weight.reshape(-1, weightBandwidthBytes, 1) weight = np.unpackbits(weight, axis=-1, count=8, bitorder="little") - weight = weight.reshape(-1, NeurekaMemoryLayout._WEIGHT_BANDWIDTH) + weight = weight.reshape(-1, NeurekaWeight._WEIGHT_BANDWIDTH) if height == 3 and width == 3: weight = weight[:, : height * width * cinMinor] @@ -153,3 +156,22 @@ def weightDecode( weight = weight[:, :cin, :, :] return weight + + @staticmethod + def source_generate( + wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter + ) -> None: + if wmem == "sram": + section = '__attribute__((section(".weightmem_sram")))' + elif wmem == "mram": + section = '__attribute__((section(".weightmem_mram")))' + else: + section = "PI_L1" + + header_writer.generate_vector_files( + "weight", + _type="uint8_t", + size=init.size, + init=init, + section=section, + ) diff --git a/test/NnxTestClasses.py b/test/NnxTestClasses.py index eeaa077..7481260 100644 --- a/test/NnxTestClasses.py +++ b/test/NnxTestClasses.py @@ -19,13 +19,14 @@ from __future__ import annotations import os +from abc import ABC, abstractmethod +from enum import Enum from typing import Callable, Literal, Optional, Set, Tuple, Type, Union import numpy as np import numpy.typing as npt import torch from pydantic import BaseModel, PositiveInt, field_validator, model_validator -from enum import Enum from HeaderWriter import HeaderWriter from NeuralEngineFunctionalModel import NeuralEngineFunctionalModel @@ -344,14 +345,47 @@ def regenerate(test: NnxTest, regen_tensors: Set[str]) -> NnxTest: return NnxTestGenerator.from_conf(test.conf, **kwargs) +class NnxWeight(ABC): + + @staticmethod + @abstractmethod + def encode( + weight: npt.NDArray[np.uint8], bits: int, depthwise: bool = False + ) -> npt.NDArray[np.uint8]: + """Unroll weight into expected memory format + + Expected input weight shape is (cout, cin, height, width). + """ + ... + + @staticmethod + @abstractmethod + def decode( + weight: npt.NDArray[np.uint8], + bits: int, + cout: int, + cin: int, + height: int, + width: int, + ) -> npt.NDArray[np.uint8]: + """Reverse of encode""" + ... + + @staticmethod + @abstractmethod + def source_generate( + wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter + ) -> None: + """Function implementing generation of weight's sources""" + ... + + class NnxTestHeaderGenerator: DEFAULT_HEADERS_DIR = "app/gen" def __init__( self, - weightEncode: Callable[ - [npt.NDArray[np.uint8], int, bool], npt.NDArray[np.uint8] - ], + nnxWeightCls: Type[NnxWeight], headers_dir: Optional[Union[str, os.PathLike]] = None, ): if headers_dir is None: @@ -359,7 +393,7 @@ def __init__( self.header_writer = HeaderWriter(headers_dir) # function that takes the weights in CoutCinK format, bitwidth, and a depthwise flag, # and returns a numpy array of dtype=np.uint8 of data in a layout correct for the accelerator - self.weightEncode = weightEncode + self.nnxWeightCls = nnxWeightCls def generate(self, test_name: str, test: NnxTest): assert test.input is not None and test.output is not None @@ -393,21 +427,14 @@ def generate(self, test_name: str, test: NnxTest): weight_offset = -(2 ** (weight_bits - 1)) weight_out_ch, weight_in_ch, weight_ks_h, weight_ks_w = test.weight.shape weight_data: np.ndarray = test.weight.numpy() - weight_offset - weight_init = self.weightEncode( + weight_init = self.nnxWeightCls.encode( weight_data.astype(np.uint8), weight_type._bits, test.conf.depthwise, ) - if test.conf.wmem == "sram": - section = '__attribute__((section(".weightmem_sram")))' - else: - section = "PI_L1" - self.header_writer.generate_vector_files( - "weight", - _type="uint8_t", - size=weight_init.size, - init=weight_init, - section=section, + + self.nnxWeightCls.source_generate( + test.conf.wmem, weight_init, self.header_writer ) # Render scale diff --git a/test/app/src/nnx_layer.c b/test/app/src/nnx_layer.c index b6dca81..bea5c5e 100644 --- a/test/app/src/nnx_layer.c +++ b/test/app/src/nnx_layer.c @@ -172,7 +172,11 @@ static void task_prepare(nnx_task_t *task) { const nnx_task_flag_e flag_bias = HAS_BIAS ? nnxTaskFlagTrue : nnxTaskFlagFalse; - const uint32_t bias_addr = (uint32_t)(HAS_BIAS ? bias : NULL); +#if HAS_BIAS == 1 + const uint32_t bias_addr = (uint32_t)bias; +#else + const uint32_t bias_addr = (uint32_t)NULL; +#endif nnx_quant_function_e quant_function = HAS_RELU ? quantFunctionRelu : quantFunctionIdentity; diff --git a/test/conftest.py b/test/conftest.py index 8d7578e..187f8ea 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -22,11 +22,11 @@ import pydantic import pytest -from Ne16MemoryLayout import Ne16MemoryLayout from Ne16TestConf import Ne16TestConf -from NeurekaMemoryLayout import NeurekaMemoryLayout +from Ne16Weight import Ne16Weight from NeurekaTestConf import NeurekaTestConf -from NnxTestClasses import NnxTest, NnxTestGenerator +from NeurekaWeight import NeurekaWeight +from NnxTestClasses import NnxTest, NnxTestGenerator, NnxWeight _SUPPORTED_ACCELERATORS = ["ne16", "neureka"] @@ -82,10 +82,10 @@ def pytest_generate_tests(metafunc): nnxName = metafunc.config.getoption("accelerator") if nnxName == "ne16": - nnxMemoryLayoutCls = Ne16MemoryLayout + nnxWeightCls = Ne16Weight nnxTestConfCls = Ne16TestConf elif nnxName == "neureka": - nnxMemoryLayoutCls = NeurekaMemoryLayout + nnxWeightCls = NeurekaWeight nnxTestConfCls = NeurekaTestConf else: assert ( @@ -125,4 +125,4 @@ def pytest_generate_tests(metafunc): metafunc.parametrize("nnxTestAndName", nnxTestAndNames) metafunc.parametrize("timeout", [timeout]) metafunc.parametrize("nnxName", [nnxName]) - metafunc.parametrize("nnxMemoryLayoutCls", [nnxMemoryLayoutCls]) + metafunc.parametrize("nnxWeightCls", [nnxWeightCls]) diff --git a/test/test.py b/test/test.py index 1893cdf..b422de5 100644 --- a/test/test.py +++ b/test/test.py @@ -23,9 +23,7 @@ from pathlib import Path from typing import Dict, Optional, Tuple, Type, Union -from Ne16MemoryLayout import Ne16MemoryLayout -from NeurekaMemoryLayout import NeurekaMemoryLayout -from NnxTestClasses import NnxTest, NnxTestConf, NnxTestHeaderGenerator +from NnxTestClasses import NnxTest, NnxTestConf, NnxTestHeaderGenerator, NnxWeight HORIZONTAL_LINE = "\n" + "-" * 100 + "\n" @@ -113,12 +111,10 @@ def test( nnxTestAndName: Tuple[NnxTest, str], timeout: int, nnxName: str, - nnxMemoryLayoutCls: Union[Type[Ne16MemoryLayout], Type[NeurekaMemoryLayout]], + nnxWeightCls: Type[NnxWeight], ): nnxTest, nnxTestName = nnxTestAndName - NnxTestHeaderGenerator(nnxMemoryLayoutCls.weightEncode).generate( - nnxTestName, nnxTest - ) + NnxTestHeaderGenerator(nnxWeightCls).generate(nnxTestName, nnxTest) Path("app/src/nnx_layer.c").touch() cmd = f"make -C app all run platform=gvsoc" diff --git a/test/testgen.py b/test/testgen.py index 0ae78ea..4e954c7 100644 --- a/test/testgen.py +++ b/test/testgen.py @@ -19,25 +19,30 @@ import argparse import json import os -from typing import Optional, Set, Type, Union +from typing import Callable, Optional, Set, Type, Union +import numpy as np +import numpy.typing as npt import toml -from Ne16MemoryLayout import Ne16MemoryLayout +from HeaderWriter import HeaderWriter from Ne16TestConf import Ne16TestConf -from NeurekaMemoryLayout import NeurekaMemoryLayout +from Ne16Weight import Ne16Weight from NeurekaTestConf import NeurekaTestConf +from NeurekaWeight import NeurekaWeight from NnxTestClasses import ( NnxTest, NnxTestConf, NnxTestGenerator, NnxTestHeaderGenerator, + NnxWeight, + WmemLiteral, ) def headers_gen( args, - nnxMemoryLayoutCls: Union[Type[Ne16MemoryLayout], Type[NeurekaMemoryLayout]], + nnxWeightCls: Type[NnxWeight], nnxTestConfCls: Type[NnxTestConf], test: Optional[NnxTest] = None, ): @@ -46,9 +51,7 @@ def headers_gen( assert test is not None if not test.is_valid(): test = NnxTestGenerator.from_conf(test.conf) - NnxTestHeaderGenerator(nnxMemoryLayoutCls.weightEncode).generate( - args.test_dir, test - ) + NnxTestHeaderGenerator(nnxWeightCls).generate(args.test_dir, test) def print_tensors(test: NnxTest): @@ -68,7 +71,7 @@ def print_tensors(test: NnxTest): def test_gen( args, - nnxMemoryLayoutCls: Union[Type[Ne16MemoryLayout], Type[NeurekaMemoryLayout]], + nnxWeightCls: Type[NnxWeight], nnxTestConfCls: Type[NnxTestConf], ): if args.conf.endswith(".toml"): @@ -96,7 +99,7 @@ def test_gen( if not args.skip_save: test.save(args.test_dir) if args.headers: - headers_gen(args, nnxMemoryLayoutCls, nnxTestConfCls, test) + headers_gen(args, nnxWeightCls, nnxTestConfCls, test) if args.print_tensors: print_tensors(test) @@ -126,10 +129,10 @@ def _regen_recursive( def test_regen( args, - nnxMemoryLayoutCls: Union[Type[Ne16MemoryLayout], Type[NeurekaMemoryLayout]], + nnxWeightCls: Type[NnxWeight], nnxTestConfCls: Type[NnxTestConf], ): - _ = nnxMemoryLayoutCls + _ = nnxWeightCls regen_tensors = set(args.tensors + ["output"]) for test_dir in args.test_dirs: @@ -240,12 +243,12 @@ def add_common_arguments(parser: argparse.ArgumentParser): ), "You can choose only one method for input generation." if args.accelerator == "ne16": - nnxMemoryLayoutCls = Ne16MemoryLayout + nnxWeightCls = Ne16Weight nnxTestConfCls = Ne16TestConf elif args.accelerator == "neureka": - nnxMemoryLayoutCls = NeurekaMemoryLayout + nnxWeightCls = NeurekaWeight nnxTestConfCls = NeurekaTestConf else: assert False, f"Unsupported accelerator {args.accelerator}." -args.func(args, nnxMemoryLayoutCls, nnxTestConfCls) +args.func(args, nnxWeightCls, nnxTestConfCls) From f7c76fc036e49a6c4cb600ec031f28094175961a Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Thu, 5 Dec 2024 12:14:32 +0100 Subject: [PATCH 3/9] Fix regen option of testgen --- test/NnxTestClasses.py | 10 +++++++--- test/testgen.py | 26 ++++++++++++++------------ 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/test/NnxTestClasses.py b/test/NnxTestClasses.py index 7481260..60e9631 100644 --- a/test/NnxTestClasses.py +++ b/test/NnxTestClasses.py @@ -21,7 +21,7 @@ import os from abc import ABC, abstractmethod from enum import Enum -from typing import Callable, Literal, Optional, Set, Tuple, Type, Union +from typing import Literal, Optional, Set, Tuple, Type, Union, get_args import numpy as np import numpy.typing as npt @@ -337,9 +337,13 @@ def from_conf( global_shift=global_shift, ) + TensorName = Literal["input", "output", "weight", "scale", "bias"] + @staticmethod - def regenerate(test: NnxTest, regen_tensors: Set[str]) -> NnxTest: - test_tensors = set(["input", "output", "weight", "scale", "bias"]) + def regenerate( + test: NnxTest, regen_tensors: Set[NnxTestGenerator.TensorName] + ) -> NnxTest: + test_tensors = set(get_args(NnxTestGenerator.TensorName)) load_tensors = test_tensors - regen_tensors kwargs = {tensor: getattr(test, tensor) for tensor in load_tensors} return NnxTestGenerator.from_conf(test.conf, **kwargs) diff --git a/test/testgen.py b/test/testgen.py index 4e954c7..e88cf16 100644 --- a/test/testgen.py +++ b/test/testgen.py @@ -19,7 +19,8 @@ import argparse import json import os -from typing import Callable, Optional, Set, Type, Union +import typing +from typing import Optional, Set, Type, Union import numpy as np import numpy.typing as npt @@ -106,7 +107,7 @@ def test_gen( def _regen( path: Union[str, os.PathLike], - regen_tensors: Set[str], + regen_tensors: Set[NnxTestGenerator.TensorName], nnxTestConfCls: Type[NnxTestConf], ) -> None: test = NnxTest.load(nnxTestConfCls, path) @@ -116,7 +117,7 @@ def _regen( def _regen_recursive( path: Union[str, os.PathLike], - regen_tensors: Set[str], + regen_tensors: Set[NnxTestGenerator.TensorName], nnxTestConfCls: Type[NnxTestConf], ) -> None: if NnxTest.is_test_dir(path): @@ -133,13 +134,12 @@ def test_regen( nnxTestConfCls: Type[NnxTestConf], ): _ = nnxWeightCls - regen_tensors = set(args.tensors + ["output"]) + regen_tensors = set(args.tensors) - for test_dir in args.test_dirs: - if args.recurse: - _regen_recursive(test_dir, regen_tensors, nnxTestConfCls) - else: - _regen(test_dir, regen_tensors, nnxTestConfCls) + if args.recursive: + _regen_recursive(args.test_dir, regen_tensors, nnxTestConfCls) + else: + _regen(args.test_dir, regen_tensors, nnxTestConfCls) def add_common_arguments(parser: argparse.ArgumentParser): @@ -220,10 +220,12 @@ def add_common_arguments(parser: argparse.ArgumentParser): parser_regen = subparsers.add_parser("regen", description="Regenerate test tensors.") parser_regen.add_argument( - "tensors", + "--tensor", type=str, - nargs="?", - default=[], + dest="tensors", + choices=typing.get_args(NnxTestGenerator.TensorName), + action="append", + default=["output"], help="Tensors that should be regenerated. Output included by default.", ) parser_regen.add_argument( From 0bc8d43ec2033a68f74ffb422abbb573d68a3f51 Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Fri, 6 Dec 2024 11:21:05 +0100 Subject: [PATCH 4/9] Fix global shift type to uint8 --- test/NnxTestClasses.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/NnxTestClasses.py b/test/NnxTestClasses.py index 60e9631..bd0223e 100644 --- a/test/NnxTestClasses.py +++ b/test/NnxTestClasses.py @@ -19,6 +19,7 @@ from __future__ import annotations import os +import typing from abc import ABC, abstractmethod from enum import Enum from typing import Literal, Optional, Set, Tuple, Type, Union, get_args @@ -206,7 +207,8 @@ def _calculate_global_shift( """Calculate global shift so that the output values are in the range of out_type""" s = tensor.type(torch.float64).std() target_s = 2 ** (out_type._bits - 1) - return torch.ceil(torch.log2(s / target_s)).type(torch.int32) + shift = torch.ceil(torch.log2(s / target_s)) + return torch.clamp(shift, 0, 255).type(torch.uint8) @staticmethod def _generate_random(_type: IntegerType, shape: Tuple): @@ -305,7 +307,7 @@ def from_conf( method=data_generation_method, ).type(torch.int32) if global_shift is None: - global_shift = torch.Tensor([0]).type(torch.int32) + global_shift = torch.Tensor([0]).type(torch.uint8) conv_kwargs = { **conf.__dict__, "out_type": NeuralEngineFunctionalModel.ACCUMULATOR_TYPE, From bd791350d263d777eb88b31edae45e62c4c696eb Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Thu, 12 Dec 2024 14:34:10 +0100 Subject: [PATCH 5/9] Add NnxMapping --- test/NnxMapping.py | 31 +++++++++++++++++++++++++++++++ test/conftest.py | 44 +++++++++++++++++++------------------------- test/test.py | 12 ++++++++---- 3 files changed, 58 insertions(+), 29 deletions(-) create mode 100644 test/NnxMapping.py diff --git a/test/NnxMapping.py b/test/NnxMapping.py new file mode 100644 index 0000000..221d35e --- /dev/null +++ b/test/NnxMapping.py @@ -0,0 +1,31 @@ +from typing import List, Literal, get_args + +from Ne16TestConf import Ne16TestConf +from Ne16Weight import Ne16Weight +from NeurekaTestConf import NeurekaTestConf +from NeurekaWeight import NeurekaWeight +from NnxTestClasses import NnxTestConf, NnxWeight + +NnxName = Literal["ne16", "neureka"] + + +def valid_nnx_names() -> List[str]: + return get_args(NnxName) + + +def is_valid_nnx_name(name: str) -> bool: + return name in valid_nnx_names() + + +def NnxWeightClsFromName(name: NnxName) -> NnxWeight: + if name == "ne16": + return Ne16Weight + elif name == "neureka": + return NeurekaWeight + + +def NnxTestConfClsFromName(name: NnxName) -> NnxTestConf: + if name == "ne16": + return Ne16TestConf + elif name == "neureka": + return NeurekaTestConf diff --git a/test/conftest.py b/test/conftest.py index 187f8ea..fdb4bb8 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -22,13 +22,14 @@ import pydantic import pytest -from Ne16TestConf import Ne16TestConf -from Ne16Weight import Ne16Weight -from NeurekaTestConf import NeurekaTestConf -from NeurekaWeight import NeurekaWeight -from NnxTestClasses import NnxTest, NnxTestGenerator, NnxWeight - -_SUPPORTED_ACCELERATORS = ["ne16", "neureka"] +from NnxMapping import ( + NnxName, + NnxTestConfClsFromName, + is_valid_nnx_name, + valid_nnx_names, +) +from NnxTestClasses import NnxTest, NnxTestGenerator +from TestClasses import implies def pytest_addoption(parser): @@ -52,7 +53,7 @@ def pytest_addoption(parser): parser.addoption( "-A", "--accelerator", - choices=_SUPPORTED_ACCELERATORS, + choices=valid_nnx_names(), default="ne16", help="Choose an accelerator to test. Default: ne16", ) @@ -81,16 +82,9 @@ def pytest_generate_tests(metafunc): timeout = metafunc.config.getoption("timeout") nnxName = metafunc.config.getoption("accelerator") - if nnxName == "ne16": - nnxWeightCls = Ne16Weight - nnxTestConfCls = Ne16TestConf - elif nnxName == "neureka": - nnxWeightCls = NeurekaWeight - nnxTestConfCls = NeurekaTestConf - else: - assert ( - False - ), f"Given accelerator {nnxName} not supported. Supported accelerators: {_SUPPORTED_ACCELERATORS}" + assert is_valid_nnx_name( + nnxName + ), f"Given accelerator {nnxName} not supported. Supported accelerators: {valid_nnx_names()}" if recursive: tests_dirs = test_dirs @@ -99,7 +93,8 @@ def pytest_generate_tests(metafunc): test_dirs.extend(_find_test_dirs(tests_dir)) # Load valid tests - nnxTestAndNames = [] + nnxTestNames = [] + nnxTestConfCls = NnxTestConfClsFromName(nnxName) for test_dir in test_dirs: try: test = NnxTest.load(nnxTestConfCls, test_dir) @@ -107,22 +102,21 @@ def pytest_generate_tests(metafunc): if not test.is_valid() or regenerate: test = NnxTestGenerator.from_conf(test.conf) test.save_data(test_dir) - nnxTestAndNames.append((test, test_dir)) + nnxTestNames.append(test_dir) except pydantic.ValidationError as e: for error in e.errors(): if error["type"] == "missing": raise e - nnxTestAndNames.append( + nnxTestNames.append( pytest.param( - (None, test_dir), + test_dir, marks=pytest.mark.skipif( True, reason=f"Invalid test {test_dir}: {e.errors}" ), ) ) - metafunc.parametrize("nnxTestAndName", nnxTestAndNames) - metafunc.parametrize("timeout", [timeout]) metafunc.parametrize("nnxName", [nnxName]) - metafunc.parametrize("nnxWeightCls", [nnxWeightCls]) + metafunc.parametrize("nnxTestName", nnxTestNames) + metafunc.parametrize("timeout", [timeout]) diff --git a/test/test.py b/test/test.py index b422de5..ccbdad7 100644 --- a/test/test.py +++ b/test/test.py @@ -23,6 +23,7 @@ from pathlib import Path from typing import Dict, Optional, Tuple, Type, Union +from NnxMapping import NnxName, NnxTestConfClsFromName, NnxWeightClsFromName from NnxTestClasses import NnxTest, NnxTestConf, NnxTestHeaderGenerator, NnxWeight HORIZONTAL_LINE = "\n" + "-" * 100 + "\n" @@ -108,12 +109,15 @@ def assert_message( def test( - nnxTestAndName: Tuple[NnxTest, str], + nnxName: NnxName, + nnxTestName: Tuple[NnxTest, str], timeout: int, - nnxName: str, - nnxWeightCls: Type[NnxWeight], ): - nnxTest, nnxTestName = nnxTestAndName + nnxTestConfCls = NnxTestConfClsFromName(nnxName) + # conftest.py makes sure the test is valid and generated + nnxTest = NnxTest.load(nnxTestConfCls, nnxTestName) + + nnxWeightCls = NnxWeightClsFromName(nnxName) NnxTestHeaderGenerator(nnxWeightCls).generate(nnxTestName, nnxTest) Path("app/src/nnx_layer.c").touch() From 6d80c9673ff5ee2aafbfe01f1f8d585f775ff928 Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Thu, 12 Dec 2024 16:06:35 +0100 Subject: [PATCH 6/9] Fix pyright errors and add the NnxMapping --- test/NnxMapping.py | 32 ++++++++++++++------------------ test/TestClasses.py | 26 +++++--------------------- test/conftest.py | 18 +++++------------- test/test.py | 14 +++++++------- 4 files changed, 31 insertions(+), 59 deletions(-) diff --git a/test/NnxMapping.py b/test/NnxMapping.py index 221d35e..4cdbaf0 100644 --- a/test/NnxMapping.py +++ b/test/NnxMapping.py @@ -1,4 +1,5 @@ -from typing import List, Literal, get_args +from enum import Enum +from typing import Dict, NamedTuple, Type from Ne16TestConf import Ne16TestConf from Ne16Weight import Ne16Weight @@ -6,26 +7,21 @@ from NeurekaWeight import NeurekaWeight from NnxTestClasses import NnxTestConf, NnxWeight -NnxName = Literal["ne16", "neureka"] +class NnxName(Enum): + ne16 = "ne16" + neureka = "neureka" -def valid_nnx_names() -> List[str]: - return get_args(NnxName) + def __str__(self): + return self.value -def is_valid_nnx_name(name: str) -> bool: - return name in valid_nnx_names() +class NnxAcceleratorClasses(NamedTuple): + testConfCls: Type[NnxTestConf] + weightCls: Type[NnxWeight] -def NnxWeightClsFromName(name: NnxName) -> NnxWeight: - if name == "ne16": - return Ne16Weight - elif name == "neureka": - return NeurekaWeight - - -def NnxTestConfClsFromName(name: NnxName) -> NnxTestConf: - if name == "ne16": - return Ne16TestConf - elif name == "neureka": - return NeurekaTestConf +NnxMapping: Dict[NnxName, NnxAcceleratorClasses] = { + NnxName.ne16: NnxAcceleratorClasses(Ne16TestConf, Ne16Weight), + NnxName.neureka: NnxAcceleratorClasses(NeurekaTestConf, NeurekaWeight), +} diff --git a/test/TestClasses.py b/test/TestClasses.py index fb84009..e7e7500 100644 --- a/test/TestClasses.py +++ b/test/TestClasses.py @@ -97,30 +97,14 @@ def ctype(self) -> Optional[str]: def __str__(self) -> str: return self.name - def __eq__(self, __value: object) -> bool: - if isinstance(__value, str): - return self.name == __value - elif isinstance(__value, IntegerType): - return self.name == __value.name + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + return self.name == other + elif isinstance(other, IntegerType): + return self.name == other.name else: return False @model_serializer def ser_model(self) -> str: return self.name - - if TYPE_CHECKING: - # Ensure type checkers see the correct return type - def model_dump( - self, - *, - mode: Literal["json", "python"] | str = "python", - include: Any = None, - exclude: Any = None, - by_alias: bool = False, - exclude_unset: bool = False, - exclude_defaults: bool = False, - exclude_none: bool = False, - round_trip: bool = False, - warnings: bool = True, - ) -> dict[str, Any]: ... diff --git a/test/conftest.py b/test/conftest.py index fdb4bb8..ba379ef 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -22,12 +22,7 @@ import pydantic import pytest -from NnxMapping import ( - NnxName, - NnxTestConfClsFromName, - is_valid_nnx_name, - valid_nnx_names, -) +from NnxMapping import NnxMapping, NnxName from NnxTestClasses import NnxTest, NnxTestGenerator from TestClasses import implies @@ -53,8 +48,9 @@ def pytest_addoption(parser): parser.addoption( "-A", "--accelerator", - choices=valid_nnx_names(), - default="ne16", + type=NnxName, + choices=list(NnxName), + default=NnxName.ne16, help="Choose an accelerator to test. Default: ne16", ) parser.addoption( @@ -82,10 +78,6 @@ def pytest_generate_tests(metafunc): timeout = metafunc.config.getoption("timeout") nnxName = metafunc.config.getoption("accelerator") - assert is_valid_nnx_name( - nnxName - ), f"Given accelerator {nnxName} not supported. Supported accelerators: {valid_nnx_names()}" - if recursive: tests_dirs = test_dirs test_dirs = [] @@ -94,7 +86,7 @@ def pytest_generate_tests(metafunc): # Load valid tests nnxTestNames = [] - nnxTestConfCls = NnxTestConfClsFromName(nnxName) + nnxTestConfCls = NnxMapping[nnxName].testConfCls for test_dir in test_dirs: try: test = NnxTest.load(nnxTestConfCls, test_dir) diff --git a/test/test.py b/test/test.py index ccbdad7..9b48f83 100644 --- a/test/test.py +++ b/test/test.py @@ -23,7 +23,7 @@ from pathlib import Path from typing import Dict, Optional, Tuple, Type, Union -from NnxMapping import NnxName, NnxTestConfClsFromName, NnxWeightClsFromName +from NnxMapping import NnxMapping, NnxName from NnxTestClasses import NnxTest, NnxTestConf, NnxTestHeaderGenerator, NnxWeight HORIZONTAL_LINE = "\n" + "-" * 100 + "\n" @@ -110,20 +110,20 @@ def assert_message( def test( nnxName: NnxName, - nnxTestName: Tuple[NnxTest, str], + nnxTestName: str, timeout: int, ): - nnxTestConfCls = NnxTestConfClsFromName(nnxName) + testConfCls, weightCls = NnxMapping[nnxName] + # conftest.py makes sure the test is valid and generated - nnxTest = NnxTest.load(nnxTestConfCls, nnxTestName) + nnxTest = NnxTest.load(testConfCls, nnxTestName) - nnxWeightCls = NnxWeightClsFromName(nnxName) - NnxTestHeaderGenerator(nnxWeightCls).generate(nnxTestName, nnxTest) + NnxTestHeaderGenerator(weightCls).generate(nnxTestName, nnxTest) Path("app/src/nnx_layer.c").touch() cmd = f"make -C app all run platform=gvsoc" passed, msg, stdout, stderr = execute_command( - cmd=cmd, timeout=timeout, envflags={"ACCELERATOR": nnxName} + cmd=cmd, timeout=timeout, envflags={"ACCELERATOR": str(nnxName)} ) assert passed, assert_message(msg, nnxTestName, cmd, stdout, stderr) From 8bceea087d23596486b8ee007a529990d0a683c2 Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Thu, 12 Dec 2024 16:11:36 +0100 Subject: [PATCH 7/9] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 120e935..f655e53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Added +- add NnxMapping dictionary that maps accelerator name to the accelerator specific classes - choice of data generation method (ones, incremented, or random) - N-EUREKA accelerator support: 3x3, 1x1, and 3x3 depthwise convolution kernels - Support for kernels without normalization and quantization for NE16 @@ -16,6 +17,8 @@ ### Changed +- conftest now passes only strings to test.py to improve readability of pytest logs +- NnxMemoryLayout is now NnxWeight and also has a method for source generation - the `wmem` field in the test configurations is now required - `ne16_task_init` got split into smaller parts: `ne16_task_init`, `ne16_task_set_op_to_conv`, `ne16_task_set_weight_offset`, `ne16_task_set_bits`, `ne16_task_set_norm_quant` - strides in `ne16_task_set_strides`, `ne16_task_set_dims`, and `ne16_task_set_ptrs` are now strides between consecutive elements in that dimension @@ -29,6 +32,7 @@ ### Fixed +- global shift should have been of type uint8 not int32 - type conversion compiler warning ## [0.3.0] - 2024-01-14 From d493a8839be37de9662abd314da8eea17eb60312 Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Thu, 12 Dec 2024 16:24:44 +0100 Subject: [PATCH 8/9] Change testgen to use the new NnxMapping --- test/testgen.py | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/test/testgen.py b/test/testgen.py index e88cf16..84691e4 100644 --- a/test/testgen.py +++ b/test/testgen.py @@ -27,10 +27,7 @@ import toml from HeaderWriter import HeaderWriter -from Ne16TestConf import Ne16TestConf -from Ne16Weight import Ne16Weight -from NeurekaTestConf import NeurekaTestConf -from NeurekaWeight import NeurekaWeight +from NnxMapping import NnxMapping, NnxName from NnxTestClasses import ( NnxTest, NnxTestConf, @@ -43,8 +40,8 @@ def headers_gen( args, - nnxWeightCls: Type[NnxWeight], nnxTestConfCls: Type[NnxTestConf], + nnxWeightCls: Type[NnxWeight], test: Optional[NnxTest] = None, ): if test is None: @@ -72,8 +69,8 @@ def print_tensors(test: NnxTest): def test_gen( args, - nnxWeightCls: Type[NnxWeight], nnxTestConfCls: Type[NnxTestConf], + nnxWeightCls: Type[NnxWeight], ): if args.conf.endswith(".toml"): test_conf_dict = toml.load(args.conf) @@ -100,7 +97,7 @@ def test_gen( if not args.skip_save: test.save(args.test_dir) if args.headers: - headers_gen(args, nnxWeightCls, nnxTestConfCls, test) + headers_gen(args, nnxTestConfCls, nnxWeightCls, test) if args.print_tensors: print_tensors(test) @@ -130,8 +127,8 @@ def _regen_recursive( def test_regen( args, - nnxWeightCls: Type[NnxWeight], nnxTestConfCls: Type[NnxTestConf], + nnxWeightCls: Type[NnxWeight], ): _ = nnxWeightCls regen_tensors = set(args.tensors) @@ -155,8 +152,9 @@ def add_common_arguments(parser: argparse.ArgumentParser): parser.add_argument( "-a", "--accelerator", - choices=["ne16", "neureka"], - default="ne16", + type=NnxName, + choices=list(NnxName), + default=NnxName.ne16, help="Choose an accelerator. Default: ne16", ) @@ -244,13 +242,6 @@ def add_common_arguments(parser: argparse.ArgumentParser): args.gen_ones and args.gen_incremented ), "You can choose only one method for input generation." -if args.accelerator == "ne16": - nnxWeightCls = Ne16Weight - nnxTestConfCls = Ne16TestConf -elif args.accelerator == "neureka": - nnxWeightCls = NeurekaWeight - nnxTestConfCls = NeurekaTestConf -else: - assert False, f"Unsupported accelerator {args.accelerator}." +testConfCls, weightCls = NnxMapping[args.accelerator] -args.func(args, nnxWeightCls, nnxTestConfCls) +args.func(args, testConfCls, weightCls) From 2f213b81953d49c5bdbabdde2a3f98a4c8f82d7b Mon Sep 17 00:00:00 2001 From: Luka Macan Date: Thu, 12 Dec 2024 17:02:11 +0100 Subject: [PATCH 9/9] Remove unused imports and move asserts for datagen into test_gen --- test/testgen.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/test/testgen.py b/test/testgen.py index 84691e4..5c0d1ba 100644 --- a/test/testgen.py +++ b/test/testgen.py @@ -22,8 +22,6 @@ import typing from typing import Optional, Set, Type, Union -import numpy as np -import numpy.typing as npt import toml from HeaderWriter import HeaderWriter @@ -34,7 +32,6 @@ NnxTestGenerator, NnxTestHeaderGenerator, NnxWeight, - WmemLiteral, ) @@ -72,6 +69,10 @@ def test_gen( nnxTestConfCls: Type[NnxTestConf], nnxWeightCls: Type[NnxWeight], ): + assert not ( + args.gen_ones and args.gen_incremented + ), "You can choose only one method for input generation." + if args.conf.endswith(".toml"): test_conf_dict = toml.load(args.conf) elif args.conf.endswith(".json"): @@ -238,10 +239,6 @@ def add_common_arguments(parser: argparse.ArgumentParser): args = parser.parse_args() -assert not ( - args.gen_ones and args.gen_incremented -), "You can choose only one method for input generation." - testConfCls, weightCls = NnxMapping[args.accelerator] args.func(args, testConfCls, weightCls)