Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce optional description to layer attributes #1127

Merged
merged 6 commits into from
Dec 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
149 changes: 149 additions & 0 deletions docs/attr_doc_gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
import numbers

import hls4ml.backends as backends
import hls4ml.model.attributes as attributes
import hls4ml.model.layers as layers


class AttrList:
def __init__(self, cls_name, cls_attrs) -> None:
self.cls_name = cls_name
self.config_attrs = [attr for attr in cls_attrs if attr.configurable is True]
self.type_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'TypeAttribute']
self.weight_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'WeightAttribute']
self.base_attrs = [attr for attr in cls_attrs if attr not in self.config_attrs + self.type_attrs + self.weight_attrs]
self.backend_attrs = {}
self.reverse_backend_attrs = [] # Will hold (attr, backend_name) pairs, used temporarily
self.unique_backend_attrs = []

def add_backend_attrs(self, backend_name, backend_attrs):
self.backend_attrs[backend_name] = backend_attrs

for attr in backend_attrs:
self.reverse_backend_attrs.append((attr, backend_name))

def sift_backend_attrs(self):
grouped_dict = {}
for attr, backend_name in self.reverse_backend_attrs:
if attr not in grouped_dict:
grouped_dict[attr] = []
grouped_dict[attr].append(backend_name)

for attr, backend_names in grouped_dict.items():
attr.available_in = backend_names
self.unique_backend_attrs.append(attr)

@property
def only_configurable(self):
all_attrs = self.config_attrs + self.type_attrs + self.unique_backend_attrs
return [attr for attr in all_attrs if attr.configurable is True]


def convert_to_attr_list():
all_backends = backends.get_available_backends()
# Removing duplicates but preserving order
all_layers = list(dict.fromkeys(layers.layer_map.values()))
all_layers_attrs = []

for layer_cls in all_layers:
base_attrs = layer_cls.expected_attributes

attr_list = AttrList(layer_cls.__name__, base_attrs)

for backend_name in all_backends:
backend = backends.get_backend(backend_name)

backend_cls = backend.create_layer_class(layer_cls)
backend_attrs = backend_cls.expected_attributes

diff_atts = [
attr for attr in backend_attrs if attr not in base_attrs
] # Sets are faster, but don't preserve order
if len(diff_atts) > 0:
attr_list.add_backend_attrs(backend.name, diff_atts)

all_layers_attrs.append(attr_list)

for attr_list in all_layers_attrs:
attr_list.sift_backend_attrs()

return all_layers_attrs


def print_attrs(attrs, file):
for attr in attrs:
if attr.value_type == numbers.Integral:
vtype = 'int'
elif attr.__class__ == attributes.ChoiceAttribute:
choices = ','.join([str(c) for c in attr.choices])
vtype = f'list [{choices}]'
else:
vtype = attr.value_type.__name__ if hasattr(attr.value_type, '__name__') else str(attr.value_type)

if attr.default is None:
file.write('* ' + attr.name + ': ' + vtype + '\n\n')
else:
file.write('* ' + attr.name + ': ' + vtype + ' (Default: ' + str(attr.default) + ')\n\n')

if attr.description is not None:
file.write(' * ' + attr.description + '\n\n')

if hasattr(attr, 'available_in'):
file.write(' * Available in: ' + ', '.join(attr.available_in) + '\n\n')


def write_all_attributes(all_layers_attrs):
with open('attributes.rst', mode='w') as file:
file.write('================\n')
file.write('Layer attributes\n')
file.write('================\n\n\n')

for attr_list in all_layers_attrs:
file.write(attr_list.cls_name + '\n')
file.write('=' * len(attr_list.cls_name) + '\n')

if len(attr_list.base_attrs) > 0:
file.write('Base attributes\n')
file.write('---------------\n')
print_attrs(attr_list.type_attrs, file)

if len(attr_list.type_attrs) > 0:
file.write('Type attributes\n')
file.write('---------------\n')
print_attrs(attr_list.base_attrs, file)

if len(attr_list.weight_attrs) > 0:
file.write('Weight attributes\n')
file.write('-----------------\n')
print_attrs(attr_list.weight_attrs, file)

if len(attr_list.config_attrs) > 0:
file.write('Configurable attributes\n')
file.write('-----------------------\n')
print_attrs(attr_list.config_attrs, file)

if len(attr_list.backend_attrs) > 0:
file.write('Backend-specific attributes\n')
file.write('---------------------------\n')
print_attrs(attr_list.unique_backend_attrs, file)


def write_only_configurable(all_layers_attrs):
with open('attributes.rst', mode='w') as file:
file.write('================\n')
file.write('Layer attributes\n')
file.write('================\n\n\n')

for attr_list in all_layers_attrs:
file.write(attr_list.cls_name + '\n')
file.write('=' * len(attr_list.cls_name) + '\n')

config_attrs = attr_list.only_configurable
if len(config_attrs) > 0:
print_attrs(config_attrs, file)


if __name__ == '__main__':
all_layers_attrs = convert_to_attr_list()
write_all_attributes(all_layers_attrs)
# write_only_configurable(all_layers_attrs)
23 changes: 16 additions & 7 deletions hls4ml/backends/catapult/catapult_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
from hls4ml.report import parse_catapult_report
from hls4ml.utils import attribute_descriptions as descriptions
from hls4ml.utils.fixed_point_utils import ceil_log2


Expand All @@ -51,10 +52,12 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('static', value_type=bool, default=True))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(
ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static)
)
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

# Add ParallelizationFactor to Conv1D/2D
Expand All @@ -65,16 +68,22 @@ def _register_layer_attributes(self):

for layer in pf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
self.attribute_map[layer] = attrs

# Add ConvImplementation to Convolution+Pooling layers
cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D]

for layer in cnn_layers:
attrs = self.attribute_map.get(layer, [])
# attrs.append(ConfigurableAttribute('conv_implementation', value_type=str, default='LineBuffer'))
attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer'))
attrs.append(
ChoiceAttribute(
'conv_implementation',
choices=['LineBuffer', 'Encoded'],
default='LineBuffer',
description=descriptions.conv_implementation,
)
)
self.attribute_map[layer] = attrs

sep_conv_layers = [SeparableConv1D, SeparableConv2D]
Expand Down
24 changes: 18 additions & 6 deletions hls4ml/backends/fpga/fpga_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
UnspecifiedPrecisionType,
XnorPrecisionType,
)
from hls4ml.utils import attribute_descriptions as descriptions
from hls4ml.writer import get_writer


Expand Down Expand Up @@ -74,7 +75,7 @@ def __init__(self, name):

for layer in accum_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(TypeAttribute('accum'))
attrs.append(TypeAttribute('accum', description=descriptions.accum_type))
self.attribute_map[layer] = attrs

rf_layers = accum_layers + [
Expand All @@ -90,7 +91,7 @@ def __init__(self, name):

for layer in rf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('reuse_factor', default=1))
attrs.append(ConfigurableAttribute('reuse_factor', default=1, description=descriptions.reuse_factor))
self.attribute_map[layer] = attrs

# seperable is kind of special because it is effectively two layers that will be split
Expand All @@ -104,23 +105,34 @@ def __init__(self, name):
self.attribute_map[layer] = attrs

act_attrs = self.attribute_map.get(Activation, [])
act_attrs.append(ConfigurableAttribute('table_size', default=1024))
act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
act_attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[Activation] = act_attrs

softmax_attrs = self.attribute_map.get(Softmax, [])
softmax_attrs.append(ChoiceAttribute('implementation', ['latency', 'stable', 'argmax', 'legacy'], default='stable'))
softmax_attrs.append(ConfigurableAttribute('skip', value_type=bool, default=False))
softmax_attrs.append(
ChoiceAttribute(
'implementation',
['latency', 'stable', 'argmax', 'legacy'],
default='stable',
description=descriptions.softmax_implementation,
)
)
softmax_attrs.append(
ConfigurableAttribute('skip', value_type=bool, default=False, description=descriptions.softmax_skip)
)
softmax_attrs.append(
TypeAttribute(
'exp_table',
default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT),
description=descriptions.table_type,
)
)
softmax_attrs.append(
TypeAttribute(
'inv_table',
default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT),
description=descriptions.table_type,
)
)
self.attribute_map[Softmax] = softmax_attrs
Expand Down
9 changes: 5 additions & 4 deletions hls4ml/backends/oneapi/oneapi_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from hls4ml.model.layers import GRU, LSTM, Activation, Conv1D, Conv2D, Dense, Embedding, Layer, SimpleRNN, Softmax
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType
from hls4ml.utils import attribute_descriptions as descriptions

# from hls4ml.report import parse_oneapi_report

Expand All @@ -30,9 +31,9 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

# Add ParallelizationFactor to Conv1D/2D
Expand All @@ -43,7 +44,7 @@ def _register_layer_attributes(self):

for layer in pf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
self.attribute_map[layer] = attrs

def _register_flows(self):
Expand Down
7 changes: 4 additions & 3 deletions hls4ml/backends/quartus/quartus_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType
from hls4ml.report import parse_quartus_report
from hls4ml.utils import attribute_descriptions as descriptions


@contextmanager
Expand Down Expand Up @@ -39,9 +40,9 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

def _register_flows(self):
Expand Down
22 changes: 16 additions & 6 deletions hls4ml/backends/vivado/vivado_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from hls4ml.model.optimizer import get_backend_passes, layer_optimizer
from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType
from hls4ml.report import parse_vivado_report
from hls4ml.utils import attribute_descriptions as descriptions


class VivadoBackend(FPGABackend):
Expand All @@ -49,10 +50,12 @@ def _register_layer_attributes(self):

for layer in rnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1))
attrs.append(ConfigurableAttribute('static', value_type=bool, default=True))
attrs.append(ConfigurableAttribute('table_size', default=1024))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8)))
attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor))
attrs.append(
ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static)
)
attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size))
attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type))
self.attribute_map[layer] = attrs

# Add ParallelizationFactor to Conv1D/2D
Expand All @@ -63,14 +66,21 @@ def _register_layer_attributes(self):

for layer in pf_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ConfigurableAttribute('parallelization_factor', default=1))
attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf))
self.attribute_map[layer] = attrs

# Add ConvImplementation to Convolution+Pooling layers
cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D]
for layer in cnn_layers:
attrs = self.attribute_map.get(layer, [])
attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer'))
attrs.append(
ChoiceAttribute(
'conv_implementation',
choices=['LineBuffer', 'Encoded'],
default='LineBuffer',
description=descriptions.conv_implementation,
)
)
self.attribute_map[layer] = attrs

def _register_flows(self):
Expand Down
Loading
Loading