Skip to content

Commit

Permalink
Merge branch 'main' into qonnx-1p0
Browse files Browse the repository at this point in the history
  • Loading branch information
jmitrevs authored Oct 1, 2024
2 parents 86abdd2 + d439f26 commit 6363702
Show file tree
Hide file tree
Showing 21 changed files with 316 additions and 208 deletions.
29 changes: 26 additions & 3 deletions hls4ml/backends/catapult/passes/core_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,15 @@ def format(self, node):
typedef {table_t.name} table_t;
}};\n"""

param_activ_config_template = """struct {type}_config{index} : nnet::activ_config {{
static const unsigned n_in = {n_in};
static const unsigned table_size = {table_size};
static const unsigned io_type = nnet::{iotype};
static const unsigned reuse_factor = {reuse};
typedef {table_t.name} table_t;
typedef {param_t.name} param_t;
}};\n"""

hard_activ_config_template = """struct {type}_config{index} {{
static const unsigned n_in = {n_in};
static const {slope_t.name} slope;
Expand All @@ -140,14 +149,16 @@ def format(self, node):
}};\n"""

activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {output});'
param_activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {param}, {output});'
param_activ_function_template = (
'nnet::{activation}<{input_t}, {param_t.name}, {output_t}, {config}>({input}, {param}, {output});'
)

activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h']


class ActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((Activation, ParametrizedActivation, PReLU))
super().__init__(Activation)
self.template = activ_config_template

def format(self, node):
Expand All @@ -157,6 +168,18 @@ def format(self, node):
return self.template.format(**params)


class ParamActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((ParametrizedActivation, PReLU))
self.template = param_activ_config_template

def format(self, node):
params = self._default_config_params(node)
params['type'] = node.get_attr('activation')

return self.template.format(**params)


class HardActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(HardActivation)
Expand Down Expand Up @@ -210,7 +233,7 @@ def __init__(self):
def format(self, node):
params = self._default_function_params(node)
params['activation'] = node.get_attr('activation').lower()
params['param'] = node.get_weights('alpha').name
params['param'] = node.get_weights('param').name
params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index)

return self.template.format(**params)
27 changes: 15 additions & 12 deletions hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import math
import os
import re
import subprocess
from bisect import bisect_left
from collections.abc import Iterable

Expand Down Expand Up @@ -147,19 +147,22 @@ def compile(self, model):
Returns:
string: Returns the name of the compiled library.
"""
curr_dir = os.getcwd()
os.chdir(model.config.get_output_dir())

lib_name = None
try:
ret_val = os.system('bash build_lib.sh')
if ret_val != 0:
raise Exception(f'Failed to compile project "{model.config.get_project_name()}"')
lib_name = '{}/firmware/{}-{}.so'.format(
model.config.get_output_dir(), model.config.get_project_name(), model.config.get_config_value('Stamp')
)
finally:
os.chdir(curr_dir)
ret_val = subprocess.run(
['./build_lib.sh'],
shell=True,
text=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=model.config.get_output_dir(),
)
if ret_val.returncode != 0:
print(ret_val.stdout)
raise Exception(f'Failed to compile project "{model.config.get_project_name()}"')
lib_name = '{}/firmware/{}-{}.so'.format(
model.config.get_output_dir(), model.config.get_project_name(), model.config.get_config_value('Stamp')
)

return lib_name

Expand Down
29 changes: 26 additions & 3 deletions hls4ml/backends/quartus/passes/core_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,15 @@ def format(self, node):
typedef {table_t.name} table_t;
}};\n"""

param_activ_config_template = """struct {type}_config{index} : nnet::activ_config {{
static const unsigned n_in = {n_in};
static const unsigned table_size = {table_size};
static const unsigned io_type = nnet::{iotype};
static const unsigned reuse_factor = {reuse};
typedef {table_t.name} table_t;
typedef {param_t.name} param_t;
}};\n"""

hard_activ_config_template = """struct {type}_config{index} {{
static const unsigned n_in = {n_in};
static const {slope_t.name} slope;
Expand All @@ -146,14 +155,16 @@ def format(self, node):
}};\n"""

activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {output});'
param_activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {param}, {output});'
param_activ_function_template = (
'nnet::{activation}<{input_t}, {param_t.name}, {output_t}, {config}>({input}, {param}, {output});'
)

activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h']


class ActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((Activation, ParametrizedActivation, PReLU, UnaryLUT))
super().__init__((Activation, UnaryLUT))
self.template = activ_config_template

def format(self, node):
Expand All @@ -163,6 +174,18 @@ def format(self, node):
return self.template.format(**params)


class ParamActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((ParametrizedActivation, PReLU))
self.template = param_activ_config_template

def format(self, node):
params = self._default_config_params(node)
params['type'] = node.get_attr('activation')

return self.template.format(**params)


class HardActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(HardActivation)
Expand Down Expand Up @@ -216,7 +239,7 @@ def __init__(self):
def format(self, node):
params = self._default_function_params(node)
params['activation'] = node.get_attr('activation').lower()
params['param'] = node.get_weights('alpha').name
params['param'] = node.get_weights('param').name
params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index)

return self.template.format(**params)
29 changes: 26 additions & 3 deletions hls4ml/backends/vivado/passes/core_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,15 @@ def format(self, node):
typedef {table_t.name} table_t;
}};\n"""

param_activ_config_template = """struct {type}_config{index} : nnet::activ_config {{
static const unsigned n_in = {n_in};
static const unsigned table_size = {table_size};
static const unsigned io_type = nnet::{iotype};
static const unsigned reuse_factor = {reuse};
typedef {table_t.name} table_t;
typedef {param_t.name} param_t;
}};\n"""

hard_activ_config_template = """struct {type}_config{index} {{
static const unsigned n_in = {n_in};
static const {slope_t.name} slope;
Expand All @@ -138,14 +147,16 @@ def format(self, node):
}};\n"""

activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {output});'
param_activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {param}, {output});'
param_activ_function_template = (
'nnet::{activation}<{input_t}, {param_t.name}, {output_t}, {config}>({input}, {param}, {output});'
)

activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h']


class ActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((Activation, ParametrizedActivation, PReLU, UnaryLUT))
super().__init__((Activation, UnaryLUT))
self.template = activ_config_template

def format(self, node):
Expand All @@ -155,6 +166,18 @@ def format(self, node):
return self.template.format(**params)


class ParamActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__((ParametrizedActivation, PReLU))
self.template = param_activ_config_template

def format(self, node):
params = self._default_config_params(node)
params['type'] = node.get_attr('activation')

return self.template.format(**params)


class HardActivationConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(HardActivation)
Expand Down Expand Up @@ -208,7 +231,7 @@ def __init__(self):
def format(self, node):
params = self._default_function_params(node)
params['activation'] = node.get_attr('activation').lower()
params['param'] = node.get_weights('alpha').name
params['param'] = node.get_weights('param').name
params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index)

return self.template.format(**params)
2 changes: 1 addition & 1 deletion hls4ml/converters/keras/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def parse_activation_layer(keras_layer, input_names, input_shapes, data_reader):
elif layer['class_name'] == 'ReLU':
layer['class_name'] = 'Activation'
elif layer['class_name'] == 'PReLU':
layer['alpha_data'] = get_weights_data(data_reader, layer['name'], 'alpha')
layer['param_data'] = get_weights_data(data_reader, layer['name'], 'alpha')

if layer['class_name'] == 'Activation' and layer['activation'] == 'softmax':
layer['class_name'] = 'Softmax'
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/converters/pytorch/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def parse_activation_layer(operation, layer_name, input_names, input_shapes, nod
if layer['class_name'] == 'ELU':
layer['activ_param'] = class_object.alpha
if layer['class_name'] == 'PReLU':
layer['alpha_data'] = class_object.weight.data.numpy()
layer['param_data'] = class_object.weight.data.numpy()
if layer['class_name'] == 'Threshold':
layer['activ_param'] = class_object.threshold
layer['class_name'] = 'ThresholdedReLU'
Expand Down
20 changes: 19 additions & 1 deletion hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -907,6 +907,17 @@ def initialize(self):


class ParametrizedActivation(Activation):
_expected_attributes = [
Attribute('n_in'),
Attribute('activation', value_type=str),
TypeAttribute('param'),
]

def initialize(self):
super().initialize()
param_t = NamedType(*reversed(self.model.config.get_precision(self, 'param')))
self.set_attr('param_t', param_t)

def _get_act_function_name(self):
act = self.get_attr('activation').lower()
if act == 'leakyrelu':
Expand Down Expand Up @@ -944,9 +955,16 @@ def initialize(self):


class PReLU(Activation):
_expected_attributes = [
Attribute('n_in'),
Attribute('activation', value_type=str),
WeightAttribute('param'),
TypeAttribute('param'),
]

def initialize(self):
super().initialize()
self.add_weights_variable(name='alpha', var_name='a{index}')
self.add_weights_variable(name='param', var_name='a{index}')


class Softmax(Activation):
Expand Down
16 changes: 16 additions & 0 deletions hls4ml/model/optimizer/passes/infer_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ def _infer_precision(self, node, types_to_infer):
if node_class in ['SimpleRNN', 'LSTM', 'GRU']:
return self._infer_rnn_precision(node, types_to_infer)

if node_class in ['ParametrizedActivation']:
return self._infer_par_act_precision(node, types_to_infer)

# What about quantized activation layer? Setting it to 'auto' manually will break it here. We should prevent
# this in config_from_* functions

Expand Down Expand Up @@ -557,3 +560,16 @@ def _infer_rnn_precision(self, node, types_to_infer):
inferred_types.append(f'{weightvar}_t')

return inferred_types

def _infer_par_act_precision(self, node, types_to_infer):
inferred_types = []

# For threshold relu, set the parameter precision to be the input precision by default;
# for other parametrized activations, just allow the default precision to be used.
# Can override these values in the configuration by explicitly setting them.
if 'param_t' in inferred_types and self.get_attr('activation').lower() == 'thresholdedrelu':
in_type = node.get_input_variable().type.precision
node.attributes['param_t'].type = in_type
inferred_types.append('param_t')

return inferred_types
20 changes: 10 additions & 10 deletions hls4ml/templates/catapult/nnet_utils/nnet_activation.h
Original file line number Diff line number Diff line change
Expand Up @@ -686,8 +686,8 @@ void hard_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {
// *************************************************
// Leaky RELU Activation
// *************************************************
template <class data_T, class res_T, typename CONFIG_T>
void leaky_relu(data_T data[CONFIG_T::n_in], data_T alpha, res_T res[CONFIG_T::n_in]) {
template <class data_T, class param_T, class res_T, typename CONFIG_T>
void leaky_relu(data_T data[CONFIG_T::n_in], param_T alpha, res_T res[CONFIG_T::n_in]) {
//#pragma HLS PIPELINE

data_T datareg;
Expand All @@ -703,8 +703,8 @@ void leaky_relu(data_T data[CONFIG_T::n_in], data_T alpha, res_T res[CONFIG_T::n
// *************************************************
// Thresholded RELU Activation
// *************************************************
template <class data_T, class res_T, typename CONFIG_T>
void thresholded_relu(data_T data[CONFIG_T::n_in], data_T theta, res_T res[CONFIG_T::n_in]) {
template <class data_T, class param_T, class res_T, typename CONFIG_T>
void thresholded_relu(data_T data[CONFIG_T::n_in], param_T theta, res_T res[CONFIG_T::n_in]) {
//#pragma HLS PIPELINE

data_T datareg;
Expand Down Expand Up @@ -917,8 +917,8 @@ template <typename CONFIG_T, int N_TABLE> void init_elu_table(typename CONFIG_T:

#ifndef USE_AC_MATH

template <class data_T, class res_T, typename CONFIG_T>
void elu(data_T data[CONFIG_T::n_in], const res_T alpha, res_T res[CONFIG_T::n_in]) {
template <class data_T, class param_T, class res_T, typename CONFIG_T>
void elu(data_T data[CONFIG_T::n_in], const param_T alpha, res_T res[CONFIG_T::n_in]) {
// Initialize the lookup table
#ifdef __HLS_SYN__
bool initialized = false;
Expand Down Expand Up @@ -953,8 +953,8 @@ void elu(data_T data[CONFIG_T::n_in], const res_T alpha, res_T res[CONFIG_T::n_i

#else

template <class data_T, class res_T, typename CONFIG_T>
void elu(data_T data[CONFIG_T::n_in], const res_T alpha, res_T res[CONFIG_T::n_in]) {
template <class data_T, class param_T, class res_T, typename CONFIG_T>
void elu(data_T data[CONFIG_T::n_in], const param_T alpha, res_T res[CONFIG_T::n_in]) {
for (int ii = 0; ii < CONFIG_T::n_in; ii++) {
ac_math::ac_elu_pwl(data[ii], res[ii], alpha);
}
Expand Down Expand Up @@ -1045,8 +1045,8 @@ template <class data_T, class res_T, typename CONFIG_T> void selu(data_T data[CO
// *************************************************
// PReLU Activation
// *************************************************
template <class data_T, class res_T, typename CONFIG_T>
void prelu(data_T data[CONFIG_T::n_in], data_T alpha[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {
template <class data_T, class param_T, class res_T, typename CONFIG_T>
void prelu(data_T data[CONFIG_T::n_in], param_T alpha[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {
//#pragma HLS PIPELINE

data_T datareg;
Expand Down
Loading

0 comments on commit 6363702

Please sign in to comment.