Skip to content

Commit

Permalink
Correct linting
Browse files Browse the repository at this point in the history
  • Loading branch information
colluca committed Feb 8, 2024
1 parent 8a1829d commit 5ba32c1
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 59 deletions.
3 changes: 2 additions & 1 deletion sw/dnn/concat/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ def main():
inputs = layer['inputs']
prec = layer['dtype']

inputs = [elf.from_symbol(f'input_{i}', ctype_from_precision_t(prec)) for i in range(num_inputs)]
inputs = [elf.from_symbol(f'input_{i}', ctype_from_precision_t(prec))
for i in range(num_inputs)]
inputs = [torch.from_numpy(tensor.reshape(input_shape)) for tensor in inputs]

# Verify results
Expand Down
2 changes: 1 addition & 1 deletion sw/dnn/conv2d/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def main():
'dtype': 'I'
}

layer = elf.from_buffer('layer', layer_struct)
layer = elf.from_symbol('layer', layer_struct)
co = layer['CO']
ci = layer['CI']
ih = layer['IH']
Expand Down
2 changes: 1 addition & 1 deletion sw/dnn/fused_concat_linear/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def main():
weights_shape = [layer['in_width']*num_inputs, layer['out_width']]
prec = layer['dtype']

inputs = [elf.from_symbol(f'input_{i}', ctype_from_precision_t(prec))
inputs = [elf.from_symbol(f'input_{i}', ctype_from_precision_t(prec))
for i in range(num_inputs)]
inputs = [torch.from_numpy(tensor.reshape(input_shape)) for tensor in inputs]
weights = elf.from_symbol('weights', ctype_from_precision_t(prec))
Expand Down
51 changes: 20 additions & 31 deletions sw/dnn/fusedconv/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,43 +71,32 @@ def main():
}

layer = elf.from_symbol('layer', layer_struct)
ifmap = [np.array(bytes_to_float(
elf.get_symbol_contents('fusedconv_pInBuffer_dram'),
PRECISION_T[layer['dtype']]),
dtype=NUMPY_T[PRECISION_T[layer['dtype']]])]
ifmap = torch.from_numpy(
ifmap[0].reshape(layer['dim_in_y'], layer['dim_in_x'], layer['ch_in']))
kernel = [np.array(bytes_to_float(
elf.get_symbol_contents('fusedconv_pWeight_dram'),
PRECISION_T[layer['dtype']]),
dtype=NUMPY_T[PRECISION_T[layer['dtype']]])]
dim_in_y = layer['dim_in_y']
dim_in_x = layer['dim_in_x']
dim_kernel_y = layer['dim_kernel_y']
dim_kernel_x = layer['dim_kernel_x']
ch_in = layer['ch_in']
ch_out = layer['ch_out']
prec = layer['dtype']

ifmap = elf.from_symbol('fusedconv_pInBuffer_dram', ctype_from_precision_t(prec))
ifmap = torch.from_numpy(ifmap.reshape(dim_in_y, dim_in_x, ch_in))
kernel = elf.from_symbol('fusedconv_pWeight_dram', ctype_from_precision_t(prec))
if not layer['depthwise']:
kernel = torch.from_numpy(
kernel[0].reshape(layer['ch_out'], layer['dim_kernel_y'],
layer['dim_kernel_x'], layer['ch_in']))
kernel = torch.from_numpy(kernel.reshape(ch_out, dim_kernel_y, dim_kernel_x, ch_in))
else:
kernel = torch.from_numpy(
kernel[0].reshape(layer['dim_kernel_y'], layer['dim_kernel_x'],
layer['ch_out']))

bn_k = [np.array(bytes_to_float(
elf.get_symbol_contents('fusedconv_kappa_dram'),
PRECISION_T[layer['dtype']]),
dtype=NUMPY_T[PRECISION_T[layer['dtype']]])]
bn_k = torch.from_numpy(bn_k[0])
bn_l = [np.array(bytes_to_float(
elf.get_symbol_contents('fusedconv_lambda_dram'),
PRECISION_T[layer['dtype']]),
dtype=NUMPY_T[PRECISION_T[layer['dtype']]])]
bn_l = torch.from_numpy(bn_l[0])
kernel = torch.from_numpy(kernel.reshape(dim_kernel_y, dim_kernel_x, ch_out))

bn_k = elf.from_symbol('fusedconv_kappa_dram', ctype_from_precision_t(prec))
bn_k = torch.from_numpy(bn_k)
bn_l = elf.from_symbol('fusedconv_lambda_dram', ctype_from_precision_t(prec))
bn_l = torch.from_numpy(bn_l)

flag_y_accumulate_start = layer['flag_y_accumulate_start']

# Verify results
output_actual = np.array(bytes_to_float(
raw_results['fusedconv_pOutBuffer_dram'],
PRECISION_T[layer['dtype']]),
dtype=NUMPY_T[PRECISION_T[layer['dtype']]])
output_actual = from_buffer(raw_results['fusedconv_pOutBuffer_dram'],
ctype_from_precision_t(prec))
output_golden, _, _ = golden_model(ifmap, kernel,
bn_k, bn_l,
layer,
Expand Down
46 changes: 22 additions & 24 deletions sw/snRuntime/src/dma.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,8 @@ inline snrt_dma_txid_t snrt_dma_load_1d_tile(void *dst, void *src,
/// selected by tile_idx. Every element in the src and dst arrays has prec
/// bytes.
inline snrt_dma_txid_t snrt_dma_store_1d_tile(void *dst, void *src,
size_t tile_idx, size_t tile_size,
uint32_t prec) {
size_t tile_idx, size_t tile_size,
uint32_t prec) {
size_t tile_nbytes = tile_size * prec;
return snrt_dma_start_1d(dst + tile_idx * tile_nbytes, src, tile_nbytes);
}
Expand All @@ -230,46 +230,44 @@ inline snrt_dma_txid_t snrt_dma_store_1d_tile(void *dst, void *src,
/// of shape (full_x1_size, full_x0_size). The specific tile is selected
/// by the (tile_x1_idx, tile_x0_idx) tuple. Every element in the src and
/// destination arrays has prec bytes.
inline snrt_dma_txid_t snrt_dma_load_2d_tile(void *dst, void *src,
size_t tile_x1_idx, size_t tile_x0_idx,
size_t tile_x1_size, size_t tile_x0_size,
size_t full_x0_size, uint32_t prec) {
inline snrt_dma_txid_t snrt_dma_load_2d_tile(
void *dst, void *src, size_t tile_x1_idx, size_t tile_x0_idx,
size_t tile_x1_size, size_t tile_x0_size, size_t full_x0_size,
uint32_t prec) {
size_t src_offset = 0;
// Advance src array in x0 and x1 dimensions, and convert to byte offset
src_offset += tile_x0_idx * tile_x0_size;
src_offset += tile_x1_idx * tile_x1_size * full_x0_size;
src_offset *= prec;
// Initiate transfer
return snrt_dma_start_2d(
dst, // dst
src + src_offset, // src
tile_x0_size * prec, // size
tile_x0_size * prec, // dst_stride
full_x0_size * prec, // src_stride
tile_x1_size // repeat
return snrt_dma_start_2d(dst, // dst
src + src_offset, // src
tile_x0_size * prec, // size
tile_x0_size * prec, // dst_stride
full_x0_size * prec, // src_stride
tile_x1_size // repeat
);
}

/// Store a 2D-tile of shape (tile_x1_size, tile_x0_size) to the 2D array
/// of shape (full_x1_size, full_x0_size). The specific tile is selected
/// by the (tile_x1_idx, tile_x0_idx) tuple. Every element in the src and
/// destination arrays has prec bytes.
inline snrt_dma_txid_t snrt_dma_store_2d_tile(void *dst, void *src,
size_t tile_x1_idx, size_t tile_x0_idx,
size_t tile_x1_size, size_t tile_x0_size,
size_t full_x0_size, uint32_t prec) {
inline snrt_dma_txid_t snrt_dma_store_2d_tile(
void *dst, void *src, size_t tile_x1_idx, size_t tile_x0_idx,
size_t tile_x1_size, size_t tile_x0_size, size_t full_x0_size,
uint32_t prec) {
size_t dst_offset = 0;
// Advance dst array in x0 and x1 dimensions, and convert to byte offset
dst_offset += tile_x0_idx * tile_x0_size;
dst_offset += tile_x1_idx * tile_x1_size * full_x0_size;
dst_offset *= prec;
// Initiate transfer
return snrt_dma_start_2d(
dst + dst_offset, // dst
src, // src
tile_x0_size * prec, // size
full_x0_size * prec, // dst_stride
tile_x0_size * prec, // src_stride
tile_x1_size // repeat
return snrt_dma_start_2d(dst + dst_offset, // dst
src, // src
tile_x0_size * prec, // size
full_x0_size * prec, // dst_stride
tile_x0_size * prec, // src_stride
tile_x1_size // repeat
);
}
1 change: 0 additions & 1 deletion util/sim/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import numpy as np



def emit_license():
s = (f"// Copyright {datetime.now().year} ETH Zurich and University of Bologna.\n"
f"// Licensed under the Apache License, Version 2.0, see LICENSE for details.\n"
Expand Down
1 change: 1 addition & 0 deletions util/sim/elf.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

class Elf(object):


def __init__(self, elf_path):
self.elf_path = elf_path
self.stream = open(self.elf_path, 'rb')
Expand Down

0 comments on commit 5ba32c1

Please sign in to comment.