Skip to content

Commit

Permalink
Merge branch 'main' into oneapi_separableconv
Browse files Browse the repository at this point in the history
  • Loading branch information
laurilaatu authored Dec 5, 2024
2 parents d1c10ca + c8e1857 commit 6de4043
Show file tree
Hide file tree
Showing 26 changed files with 695 additions and 94 deletions.
149 changes: 149 additions & 0 deletions docs/attr_doc_gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
import numbers

import hls4ml.backends as backends
import hls4ml.model.attributes as attributes
import hls4ml.model.layers as layers


class AttrList:
def __init__(self, cls_name, cls_attrs) -> None:
self.cls_name = cls_name
self.config_attrs = [attr for attr in cls_attrs if attr.configurable is True]
self.type_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'TypeAttribute']
self.weight_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'WeightAttribute']
self.base_attrs = [attr for attr in cls_attrs if attr not in self.config_attrs + self.type_attrs + self.weight_attrs]
self.backend_attrs = {}
self.reverse_backend_attrs = [] # Will hold (attr, backend_name) pairs, used temporarily
self.unique_backend_attrs = []

def add_backend_attrs(self, backend_name, backend_attrs):
self.backend_attrs[backend_name] = backend_attrs

for attr in backend_attrs:
self.reverse_backend_attrs.append((attr, backend_name))

def sift_backend_attrs(self):
grouped_dict = {}
for attr, backend_name in self.reverse_backend_attrs:
if attr not in grouped_dict:
grouped_dict[attr] = []
grouped_dict[attr].append(backend_name)

for attr, backend_names in grouped_dict.items():
attr.available_in = backend_names
self.unique_backend_attrs.append(attr)

@property
def only_configurable(self):
all_attrs = self.config_attrs + self.type_attrs + self.unique_backend_attrs
return [attr for attr in all_attrs if attr.configurable is True]


def convert_to_attr_list():
all_backends = backends.get_available_backends()
# Removing duplicates but preserving order
all_layers = list(dict.fromkeys(layers.layer_map.values()))
all_layers_attrs = []

for layer_cls in all_layers:
base_attrs = layer_cls.expected_attributes

attr_list = AttrList(layer_cls.__name__, base_attrs)

for backend_name in all_backends:
backend = backends.get_backend(backend_name)

backend_cls = backend.create_layer_class(layer_cls)
backend_attrs = backend_cls.expected_attributes

diff_atts = [
attr for attr in backend_attrs if attr not in base_attrs
] # Sets are faster, but don't preserve order
if len(diff_atts) > 0:
attr_list.add_backend_attrs(backend.name, diff_atts)

all_layers_attrs.append(attr_list)

for attr_list in all_layers_attrs:
attr_list.sift_backend_attrs()

return all_layers_attrs


def print_attrs(attrs, file):
for attr in attrs:
if attr.value_type == numbers.Integral:
vtype = 'int'
elif attr.__class__ == attributes.ChoiceAttribute:
choices = ','.join([str(c) for c in attr.choices])
vtype = f'list [{choices}]'
else:
vtype = attr.value_type.__name__ if hasattr(attr.value_type, '__name__') else str(attr.value_type)

if attr.default is None:
file.write('* ' + attr.name + ': ' + vtype + '\n\n')
else:
file.write('* ' + attr.name + ': ' + vtype + ' (Default: ' + str(attr.default) + ')\n\n')

if attr.description is not None:
file.write(' * ' + attr.description + '\n\n')

if hasattr(attr, 'available_in'):
file.write(' * Available in: ' + ', '.join(attr.available_in) + '\n\n')


def write_all_attributes(all_layers_attrs):
with open('attributes.rst', mode='w') as file:
file.write('================\n')
file.write('Layer attributes\n')
file.write('================\n\n\n')

for attr_list in all_layers_attrs:
file.write(attr_list.cls_name + '\n')
file.write('=' * len(attr_list.cls_name) + '\n')

if len(attr_list.base_attrs) > 0:
file.write('Base attributes\n')
file.write('---------------\n')
print_attrs(attr_list.type_attrs, file)

if len(attr_list.type_attrs) > 0:
file.write('Type attributes\n')
file.write('---------------\n')
print_attrs(attr_list.base_attrs, file)

if len(attr_list.weight_attrs) > 0:
file.write('Weight attributes\n')
file.write('-----------------\n')
print_attrs(attr_list.weight_attrs, file)

if len(attr_list.config_attrs) > 0:
file.write('Configurable attributes\n')
file.write('-----------------------\n')
print_attrs(attr_list.config_attrs, file)

if len(attr_list.backend_attrs) > 0:
file.write('Backend-specific attributes\n')
file.write('---------------------------\n')
print_attrs(attr_list.unique_backend_attrs, file)


def write_only_configurable(all_layers_attrs):
with open('attributes.rst', mode='w') as file:
file.write('================\n')
file.write('Layer attributes\n')
file.write('================\n\n\n')

for attr_list in all_layers_attrs:
file.write(attr_list.cls_name + '\n')
file.write('=' * len(attr_list.cls_name) + '\n')

config_attrs = attr_list.only_configurable
if len(config_attrs) > 0:
print_attrs(config_attrs, file)


if __name__ == '__main__':
all_layers_attrs = convert_to_attr_list()
write_all_attributes(all_layers_attrs)
# write_only_configurable(all_layers_attrs)
23 changes: 16 additions & 7 deletions hls4ml/backends/catapult/catapult_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
from hls4ml.report import parse_catapult_report
from hls4ml.utils import attribute_descriptions as descriptions
from hls4ml.utils.fixed_point_utils import ceil_log2


Expand All @@ -51,10 +52,12 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('static', value_type=bool, default=True))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(
ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static)
)
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

# Add ParallelizationFactor to Conv1D/2D
Expand All @@ -65,16 +68,22 @@ def _register_layer_attributes(self):

for layer in pf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
self.attribute_map[layer] = attrs

# Add ConvImplementation to Convolution+Pooling layers
cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D]

for layer in cnn_layers:
attrs = self.attribute_map.get(layer, [])
# attrs.append(ConfigurableAttribute('conv_implementation', value_type=str, default='LineBuffer'))
attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer'))
attrs.append(
ChoiceAttribute(
'conv_implementation',
choices=['LineBuffer', 'Encoded'],
default='LineBuffer',
description=descriptions.conv_implementation,
)
)
self.attribute_map[layer] = attrs

sep_conv_layers = [SeparableConv1D, SeparableConv2D]
Expand Down
24 changes: 18 additions & 6 deletions hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
UnspecifiedPrecisionType,
XnorPrecisionType,
)
from hls4ml.utils import attribute_descriptions as descriptions
from hls4ml.writer import get_writer


Expand Down Expand Up @@ -74,7 +75,7 @@ def __init__(self, name):

for layer in accum_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(TypeAttribute('accum'))
attrs.append(TypeAttribute('accum', description=descriptions.accum_type))
self.attribute_map[layer] = attrs

rf_layers = accum_layers + [
Expand All @@ -90,7 +91,7 @@ def __init__(self, name):

for layer in rf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('reuse_factor', default=1))
attrs.append(ConfigurableAttribute('reuse_factor', default=1, description=descriptions.reuse_factor))
self.attribute_map[layer] = attrs

# separable is kind of special because it is effectively two layers that will be split
Expand All @@ -104,23 +105,34 @@ def __init__(self, name):
self.attribute_map[layer] = attrs

act_attrs = self.attribute_map.get(Activation, [])
act_attrs.append(ConfigurableAttribute('table_size', default=1024))
act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
act_attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[Activation] = act_attrs

softmax_attrs = self.attribute_map.get(Softmax, [])
softmax_attrs.append(ChoiceAttribute('implementation', ['latency', 'stable', 'argmax', 'legacy'], default='stable'))
softmax_attrs.append(ConfigurableAttribute('skip', value_type=bool, default=False))
softmax_attrs.append(
ChoiceAttribute(
'implementation',
['latency', 'stable', 'argmax', 'legacy'],
default='stable',
description=descriptions.softmax_implementation,
)
)
softmax_attrs.append(
ConfigurableAttribute('skip', value_type=bool, default=False, description=descriptions.softmax_skip)
)
softmax_attrs.append(
TypeAttribute(
'exp_table',
default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT),
description=descriptions.table_type,
)
)
softmax_attrs.append(
TypeAttribute(
'inv_table',
default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT),
description=descriptions.table_type,
)
)
self.attribute_map[Softmax] = softmax_attrs
Expand Down
File renamed without changes.
9 changes: 5 additions & 4 deletions hls4ml/backends/oneapi/oneapi_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from hls4ml.model.layers import GRU, LSTM, Activation, Conv1D, Conv2D, Dense, Embedding, Layer, SimpleRNN, Softmax
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType
from hls4ml.utils import attribute_descriptions as descriptions

# from hls4ml.report import parse_oneapi_report

Expand All @@ -30,9 +31,9 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

# Add ParallelizationFactor to Conv1D/2D
Expand All @@ -43,7 +44,7 @@ def _register_layer_attributes(self):

for layer in pf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
self.attribute_map[layer] = attrs

def _register_flows(self):
Expand Down
7 changes: 4 additions & 3 deletions hls4ml/backends/quartus/quartus_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType
from hls4ml.report import parse_quartus_report
from hls4ml.utils import attribute_descriptions as descriptions


@contextmanager
Expand Down Expand Up @@ -39,9 +40,9 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

def _register_flows(self):
Expand Down
25 changes: 23 additions & 2 deletions hls4ml/backends/vivado/passes/convolution_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
typedef {config_t} mult_config;
template<unsigned K, unsigned S, unsigned W>
using scale_index = nnet::{scale_index_type}<K, S, W>;
template<class data_T, class res_T, class CONFIG_T>
using conv_kernel = nnet::{conv_fn}<data_T, res_T, CONFIG_T>;
}};
const ap_uint<config{index}::filt_width> config{index}::pixels[] = {{{instructions}}};\n"""

Expand Down Expand Up @@ -93,11 +95,30 @@ def format(self, node):
else:
params['fill_fn'] = 'FillConv1DBuffer'

is_pointwise_parallel_latency = (
node.get_attr('filt_width') == 1
and node.get_attr('strategy').lower() == 'latency'
and node.model.config.get_config_value('IOType') == 'io_parallel'
)
if is_pointwise_parallel_latency:
params['conv_fn'] = f'pointwise_conv_{node.index}'
else:
if node.get_attr('strategy').lower() == 'latency':
params['conv_fn'] = 'Conv1DLatency'
else:
params['conv_fn'] = 'Conv1DResource'

conv_config = self.template.format(**params)

mult_params = self._default_config_params(node)
mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_width')
mult_params['n_out'] = node.get_attr('n_filt')
if is_pointwise_parallel_latency:
mult_params['n_in'] = int(
node.get_attr('in_width') * node.get_attr('n_chan') * node.get_attr('filt_width') / mult_params['reuse']
)
mult_params['n_out'] = int(node.get_attr('in_width') * node.get_attr('n_filt') / mult_params['reuse'])
else:
mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_width')
mult_params['n_out'] = node.get_attr('n_filt')
mult_params['nzeros'] = node.get_weights('weight').nzeros
mult_params['product_type'] = get_backend('vivado').product_type(
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
Expand Down
Loading

0 comments on commit 6de4043

Please sign in to comment.