Skip to content

Commit

Permalink
wip: add cmake to tests
Browse files Browse the repository at this point in the history
  • Loading branch information
lukamac committed Dec 5, 2024
1 parent 7458477 commit eba3064
Show file tree
Hide file tree
Showing 9 changed files with 95 additions and 45 deletions.
4 changes: 3 additions & 1 deletion test/Ne16Weight.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,9 @@ def decode(
return weight

@staticmethod
def source_generate(wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter) -> None:
def source_generate(
wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter
) -> None:
assert wmem == "tcdm", f"Invalid wmem source provided: {wmem}"
section = "PI_L1"

Expand Down
3 changes: 2 additions & 1 deletion test/NeurekaV2TestConf.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ def check_valid_out_type_with_norm_quant(self) -> NeurekaV2TestConf:
@field_validator("wmem")
@classmethod
def check_valid_wmem(cls, v: WmemLiteral) -> WmemLiteral:
_supported_wmem = ["tcdm", "sram", "mram"]
breakpoint()
_supported_wmem = ["sram", "mram"]
assert (
v in _supported_wmem
), f"Unsupported wmem {v}. Supported {_supported_wmem}."
Expand Down
4 changes: 3 additions & 1 deletion test/NeurekaV2Weight.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,9 @@ def decode(
return weight

@staticmethod
def source_generate(wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter) -> None:
def source_generate(
wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter
) -> None:
if wmem == "sram":
section = '__attribute__((section(".weightmem_sram")))'
elif wmem == "mram":
Expand Down
4 changes: 3 additions & 1 deletion test/NeurekaWeight.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,9 @@ def decode(
return weight

@staticmethod
def source_generate(wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter) -> None:
def source_generate(
wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter
) -> None:
if wmem == "sram":
section = '__attribute__((section(".weightmem_sram")))'
elif wmem == "mram":
Expand Down
40 changes: 23 additions & 17 deletions test/NnxTestClasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@
import os
from abc import ABC, abstractmethod
from enum import Enum
from typing import Callable, Literal, Optional, Set, Tuple, Type, Union
from typing import Literal, Optional, Set, Tuple, Type, Union

import numpy as np
import numpy.typing as npt
import torch
from pydantic import BaseModel, PositiveInt, field_validator, model_validator
from pydantic import BaseModel, PositiveInt, model_validator

from HeaderWriter import HeaderWriter
from NeuralEngineFunctionalModel import NeuralEngineFunctionalModel
Expand Down Expand Up @@ -226,19 +226,25 @@ def incr_generator():
x += 1
if x > _type.max:
x = 0
return torch.from_numpy(
np.fromiter(incr_generator(), count=np.prod(shape), dtype=np.int64)
).reshape(
(shape[0], shape[2], shape[3], shape[1])
).permute((0, 3, 1, 2)).type(torch.int64)

return (
torch.from_numpy(
np.fromiter(incr_generator(), count=np.prod(shape), dtype=np.int64)
)
.reshape((shape[0], shape[2], shape[3], shape[1]))
.permute((0, 3, 1, 2))
.type(torch.int64)
)

class DataGenerationMethod(Enum):
RANDOM = 0
ONES = 1
INCREMENTED = 2

@staticmethod
def _generate_data(_type: IntegerType, shape: Tuple, method: NnxTestGenerator.DataGenerationMethod):
def _generate_data(
_type: IntegerType, shape: Tuple, method: NnxTestGenerator.DataGenerationMethod
):
if method == NnxTestGenerator.DataGenerationMethod.RANDOM:
return NnxTestGenerator._generate_random(_type, shape)
elif method == NnxTestGenerator.DataGenerationMethod.ONES:
Expand Down Expand Up @@ -287,13 +293,15 @@ def from_conf(
if scale is None:
assert conf.scale_type is not None
scale = NnxTestGenerator._generate_data(
conf.scale_type, shape=scale_shape,
conf.scale_type,
shape=scale_shape,
method=data_generation_method,
)
if conf.has_bias and bias is None:
assert conf.bias_type is not None
bias = NnxTestGenerator._generate_data(
conf.bias_type, shape=bias_shape,
conf.bias_type,
shape=bias_shape,
method=data_generation_method,
).type(torch.int32)
if global_shift is None:
Expand Down Expand Up @@ -342,9 +350,7 @@ class NnxWeight(ABC):
@staticmethod
@abstractmethod
def encode(
weight: npt.NDArray[np.uint8],
bits: int,
depthwise: bool = False
weight: npt.NDArray[np.uint8], bits: int, depthwise: bool = False
) -> npt.NDArray[np.uint8]:
"""Unroll weight into expected memory format
Expand All @@ -368,9 +374,7 @@ def decode(
@staticmethod
@abstractmethod
def source_generate(
wmem: WmemLiteral,
init: npt.NDArray[np.uint8],
header_writer: HeaderWriter
wmem: WmemLiteral, init: npt.NDArray[np.uint8], header_writer: HeaderWriter
) -> None:
"""Function implementing generation of weight's sources"""
...
Expand Down Expand Up @@ -429,7 +433,9 @@ def generate(self, test_name: str, test: NnxTest):
test.conf.depthwise,
)

self.nnxWeightCls.source_generate(test.conf.wmem, weight_init, self.header_writer)
self.nnxWeightCls.source_generate(
test.conf.wmem, weight_init, self.header_writer
)

# Render scale
if test.scale is not None:
Expand Down
22 changes: 20 additions & 2 deletions test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# SPDX-License-Identifier: Apache-2.0

import os
import subprocess
from typing import Union

import pydantic
Expand All @@ -28,7 +29,7 @@
from NeurekaV2TestConf import NeurekaV2TestConf
from NeurekaV2Weight import NeurekaV2Weight
from NeurekaWeight import NeurekaWeight
from NnxTestClasses import NnxTest, NnxTestGenerator, NnxWeight
from NnxTestClasses import NnxTest, NnxTestGenerator

_SUPPORTED_ACCELERATORS = ["ne16", "neureka", "neureka_v2"]

Expand Down Expand Up @@ -70,6 +71,13 @@ def pytest_addoption(parser):
default=120,
help="Execution timeout in seconds. Default: 120s",
)
parser.addoption(
"--build-flow",
dest="build_flow",
choices=["make", "cmake"],
default="make",
help="Choose the build flow. Default: make",
)


def _find_test_dirs(path: Union[str, os.PathLike]):
Expand All @@ -82,6 +90,7 @@ def pytest_generate_tests(metafunc):
regenerate = metafunc.config.getoption("regenerate")
timeout = metafunc.config.getoption("timeout")
nnxName = metafunc.config.getoption("accelerator")
build_flow = metafunc.config.getoption("build_flow")

if nnxName == "ne16":
nnxWeightCls = Ne16Weight
Expand Down Expand Up @@ -115,7 +124,7 @@ def pytest_generate_tests(metafunc):
nnxTestAndNames.append((test, test_dir))
except pydantic.ValidationError as e:
for error in e.errors():
if error['type'] == 'missing':
if error["type"] == "missing":
raise e

nnxTestAndNames.append(
Expand All @@ -127,7 +136,16 @@ def pytest_generate_tests(metafunc):
)
)

if build_flow == "cmake":
os.makedirs("app/build/gvsoc_workdir", exist_ok=True)
assert "GVSOC" in os.environ, "The GVSOC environment variable is not set."
subprocess.run(
f"cmake -Sapp -Bapp/build -GNinja -DCMAKE_TOOLCHAIN_FILE=cmake/toolchain_llvm.cmake -DACCELERATOR={nnxName}".split(),
check=True,
)

metafunc.parametrize("nnxTestAndName", nnxTestAndNames)
metafunc.parametrize("timeout", [timeout])
metafunc.parametrize("nnxName", [nnxName])
metafunc.parametrize("nnxWeightCls", [nnxWeightCls])
metafunc.parametrize("build_flow", [build_flow])
1 change: 1 addition & 0 deletions test/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ pydantic
pytest
pytorch==1.11.0
toml
ninja
56 changes: 39 additions & 17 deletions test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import re
import subprocess
from pathlib import Path
from typing import Dict, Optional, Tuple, Type, Union
from typing import Dict, Literal, Optional, Tuple, Type, Union

from NnxTestClasses import NnxTest, NnxTestConf, NnxTestHeaderGenerator, NnxWeight

Expand Down Expand Up @@ -93,12 +93,11 @@ def execute_command(
return status, msg, stdout, stderr


def assert_message(
msg: str, test_name: str, cmd: str, stdout: str, stderr: Optional[str] = None
):
def assert_message(msg: str, test_name: str, stdout: str, stderr: Optional[str] = None):
retval = (
f"Test {test_name} failed: {msg}\n"
f"Command: {cmd}\n" + HORIZONTAL_LINE + f"\nCaptured stdout:\n{stdout}\n"
+ HORIZONTAL_LINE
+ f"\nCaptured stdout:\n{stdout}\n"
)

if stderr is not None:
Expand All @@ -107,35 +106,58 @@ def assert_message(
return retval


def build(nnxName: str, flow: Literal["make", "cmake"]) -> None:
env = os.environ

if flow == "make":
cmd = "make -C app all platform=gvsoc"
env["ACCELERATOR"] = nnxName
elif flow == "cmake":
cmd = "cmake --build app/build"

subprocess.run(cmd.split(), check=True, capture_output=True, text=True, env=env)


def run(nnxName: str, flow: Literal["make", "cmake"]) -> str:
env = os.environ

if flow == "make":
cmd = "make -C app run platform=gvsoc"
env["ACCELERATOR"] = nnxName
elif flow == "cmake":
bin = os.path.abspath("app/build/test-pulp-nnx")
gvsoc = os.environ["GVSOC"]
cmd = f"{gvsoc} --binary {bin} --work-dir app/build/gvsoc_workdir --target siracusa image flash run"

proc = subprocess.run(
cmd.split(), check=True, capture_output=True, text=True, env=env
)

return proc.stdout


def test(
nnxTestAndName: Tuple[NnxTest, str],
timeout: int,
nnxName: str,
nnxWeightCls: Type[NnxWeight],
build_flow: Literal["cmake", "make"],
):
nnxTest, nnxTestName = nnxTestAndName
NnxTestHeaderGenerator(nnxWeightCls).generate(
nnxTestName, nnxTest
)

Path("app/src/nnx_layer.c").touch()
cmd = f"make -C app all run platform=gvsoc"
passed, msg, stdout, stderr = execute_command(
cmd=cmd, timeout=timeout, envflags={"ACCELERATOR": nnxName}
)
NnxTestHeaderGenerator(nnxWeightCls).generate(nnxTestName, nnxTest)

assert passed, assert_message(msg, nnxTestName, cmd, stdout, stderr)
build(nnxName, build_flow)
stdout = run(nnxName, build_flow)

match_success = re.search(r"> Success! No errors found.", stdout)
match_fail = re.search(r"> Failure! Found (\d*)/(\d*) errors.", stdout)

assert match_success or match_fail, assert_message(
"No regexes matched.", nnxTestName, cmd, stdout
"No regexes matched.", nnxTestName, stdout
)

assert not match_fail, assert_message(
f"Errors found: {match_fail.group(1)}/{match_fail.group(2)}",
nnxTestName,
cmd,
stdout,
)
6 changes: 1 addition & 5 deletions test/testgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,10 @@
import argparse
import json
import os
from typing import Callable, Optional, Set, Type, Union
from typing import Optional, Set, Type, Union

import numpy as np
import numpy.typing as npt
import toml

from HeaderWriter import HeaderWriter
from Ne16TestConf import Ne16TestConf
from Ne16Weight import Ne16Weight
from NeurekaTestConf import NeurekaTestConf
Expand All @@ -38,7 +35,6 @@
NnxTestGenerator,
NnxTestHeaderGenerator,
NnxWeight,
WmemLiteral,
)


Expand Down

0 comments on commit eba3064

Please sign in to comment.