diff --git a/docs/attr_doc_gen.py b/docs/attr_doc_gen.py new file mode 100644 index 000000000..0ba2a5b77 --- /dev/null +++ b/docs/attr_doc_gen.py @@ -0,0 +1,149 @@ +import numbers + +import hls4ml.backends as backends +import hls4ml.model.attributes as attributes +import hls4ml.model.layers as layers + + +class AttrList: + def __init__(self, cls_name, cls_attrs) -> None: + self.cls_name = cls_name + self.config_attrs = [attr for attr in cls_attrs if attr.configurable is True] + self.type_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'TypeAttribute'] + self.weight_attrs = [attr for attr in cls_attrs if attr.__class__.__name__ == 'WeightAttribute'] + self.base_attrs = [attr for attr in cls_attrs if attr not in self.config_attrs + self.type_attrs + self.weight_attrs] + self.backend_attrs = {} + self.reverse_backend_attrs = [] # Will hold (attr, backend_name) pairs, used temporarily + self.unique_backend_attrs = [] + + def add_backend_attrs(self, backend_name, backend_attrs): + self.backend_attrs[backend_name] = backend_attrs + + for attr in backend_attrs: + self.reverse_backend_attrs.append((attr, backend_name)) + + def sift_backend_attrs(self): + grouped_dict = {} + for attr, backend_name in self.reverse_backend_attrs: + if attr not in grouped_dict: + grouped_dict[attr] = [] + grouped_dict[attr].append(backend_name) + + for attr, backend_names in grouped_dict.items(): + attr.available_in = backend_names + self.unique_backend_attrs.append(attr) + + @property + def only_configurable(self): + all_attrs = self.config_attrs + self.type_attrs + self.unique_backend_attrs + return [attr for attr in all_attrs if attr.configurable is True] + + +def convert_to_attr_list(): + all_backends = backends.get_available_backends() + # Removing duplicates but preserving order + all_layers = list(dict.fromkeys(layers.layer_map.values())) + all_layers_attrs = [] + + for layer_cls in all_layers: + base_attrs = layer_cls.expected_attributes + + attr_list = AttrList(layer_cls.__name__, base_attrs) + + for backend_name in all_backends: + backend = backends.get_backend(backend_name) + + backend_cls = backend.create_layer_class(layer_cls) + backend_attrs = backend_cls.expected_attributes + + diff_atts = [ + attr for attr in backend_attrs if attr not in base_attrs + ] # Sets are faster, but don't preserve order + if len(diff_atts) > 0: + attr_list.add_backend_attrs(backend.name, diff_atts) + + all_layers_attrs.append(attr_list) + + for attr_list in all_layers_attrs: + attr_list.sift_backend_attrs() + + return all_layers_attrs + + +def print_attrs(attrs, file): + for attr in attrs: + if attr.value_type == numbers.Integral: + vtype = 'int' + elif attr.__class__ == attributes.ChoiceAttribute: + choices = ','.join([str(c) for c in attr.choices]) + vtype = f'list [{choices}]' + else: + vtype = attr.value_type.__name__ if hasattr(attr.value_type, '__name__') else str(attr.value_type) + + if attr.default is None: + file.write('* ' + attr.name + ': ' + vtype + '\n\n') + else: + file.write('* ' + attr.name + ': ' + vtype + ' (Default: ' + str(attr.default) + ')\n\n') + + if attr.description is not None: + file.write(' * ' + attr.description + '\n\n') + + if hasattr(attr, 'available_in'): + file.write(' * Available in: ' + ', '.join(attr.available_in) + '\n\n') + + +def write_all_attributes(all_layers_attrs): + with open('attributes.rst', mode='w') as file: + file.write('================\n') + file.write('Layer attributes\n') + file.write('================\n\n\n') + + for attr_list in all_layers_attrs: + file.write(attr_list.cls_name + '\n') + file.write('=' * len(attr_list.cls_name) + '\n') + + if len(attr_list.base_attrs) > 0: + file.write('Base attributes\n') + file.write('---------------\n') + print_attrs(attr_list.type_attrs, file) + + if len(attr_list.type_attrs) > 0: + file.write('Type attributes\n') + file.write('---------------\n') + print_attrs(attr_list.base_attrs, file) + + if len(attr_list.weight_attrs) > 0: + file.write('Weight attributes\n') + file.write('-----------------\n') + print_attrs(attr_list.weight_attrs, file) + + if len(attr_list.config_attrs) > 0: + file.write('Configurable attributes\n') + file.write('-----------------------\n') + print_attrs(attr_list.config_attrs, file) + + if len(attr_list.backend_attrs) > 0: + file.write('Backend-specific attributes\n') + file.write('---------------------------\n') + print_attrs(attr_list.unique_backend_attrs, file) + + +def write_only_configurable(all_layers_attrs): + with open('attributes.rst', mode='w') as file: + file.write('================\n') + file.write('Layer attributes\n') + file.write('================\n\n\n') + + for attr_list in all_layers_attrs: + file.write(attr_list.cls_name + '\n') + file.write('=' * len(attr_list.cls_name) + '\n') + + config_attrs = attr_list.only_configurable + if len(config_attrs) > 0: + print_attrs(config_attrs, file) + + +if __name__ == '__main__': + all_layers_attrs = convert_to_attr_list() + write_all_attributes(all_layers_attrs) + # write_only_configurable(all_layers_attrs) diff --git a/hls4ml/backends/catapult/catapult_backend.py b/hls4ml/backends/catapult/catapult_backend.py index 6d1c17a3c..030016d6c 100644 --- a/hls4ml/backends/catapult/catapult_backend.py +++ b/hls4ml/backends/catapult/catapult_backend.py @@ -32,6 +32,7 @@ from hls4ml.model.optimizer import get_backend_passes, layer_optimizer from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType from hls4ml.report import parse_catapult_report +from hls4ml.utils import attribute_descriptions as descriptions from hls4ml.utils.fixed_point_utils import ceil_log2 @@ -51,10 +52,12 @@ def _register_layer_attributes(self): for layer in rnn_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) - attrs.append(ConfigurableAttribute('static', value_type=bool, default=True)) - attrs.append(ConfigurableAttribute('table_size', default=1024)) - attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) + attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor)) + attrs.append( + ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static) + ) + attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size)) + attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type)) self.attribute_map[layer] = attrs # Add ParallelizationFactor to Conv1D/2D @@ -65,7 +68,7 @@ def _register_layer_attributes(self): for layer in pf_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('parallelization_factor', default=1)) + attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf)) self.attribute_map[layer] = attrs # Add ConvImplementation to Convolution+Pooling layers @@ -73,8 +76,14 @@ def _register_layer_attributes(self): for layer in cnn_layers: attrs = self.attribute_map.get(layer, []) - # attrs.append(ConfigurableAttribute('conv_implementation', value_type=str, default='LineBuffer')) - attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer')) + attrs.append( + ChoiceAttribute( + 'conv_implementation', + choices=['LineBuffer', 'Encoded'], + default='LineBuffer', + description=descriptions.conv_implementation, + ) + ) self.attribute_map[layer] = attrs sep_conv_layers = [SeparableConv1D, SeparableConv2D] diff --git a/hls4ml/backends/fpga/fpga_backend.py b/hls4ml/backends/fpga/fpga_backend.py index a9fc09b7a..fbfed71c5 100644 --- a/hls4ml/backends/fpga/fpga_backend.py +++ b/hls4ml/backends/fpga/fpga_backend.py @@ -45,6 +45,7 @@ UnspecifiedPrecisionType, XnorPrecisionType, ) +from hls4ml.utils import attribute_descriptions as descriptions from hls4ml.writer import get_writer @@ -74,7 +75,7 @@ def __init__(self, name): for layer in accum_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(TypeAttribute('accum')) + attrs.append(TypeAttribute('accum', description=descriptions.accum_type)) self.attribute_map[layer] = attrs rf_layers = accum_layers + [ @@ -90,7 +91,7 @@ def __init__(self, name): for layer in rf_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('reuse_factor', default=1)) + attrs.append(ConfigurableAttribute('reuse_factor', default=1, description=descriptions.reuse_factor)) self.attribute_map[layer] = attrs # seperable is kind of special because it is effectively two layers that will be split @@ -104,23 +105,34 @@ def __init__(self, name): self.attribute_map[layer] = attrs act_attrs = self.attribute_map.get(Activation, []) - act_attrs.append(ConfigurableAttribute('table_size', default=1024)) - act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) + act_attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size)) + act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type)) self.attribute_map[Activation] = act_attrs softmax_attrs = self.attribute_map.get(Softmax, []) - softmax_attrs.append(ChoiceAttribute('implementation', ['latency', 'stable', 'argmax', 'legacy'], default='stable')) - softmax_attrs.append(ConfigurableAttribute('skip', value_type=bool, default=False)) + softmax_attrs.append( + ChoiceAttribute( + 'implementation', + ['latency', 'stable', 'argmax', 'legacy'], + default='stable', + description=descriptions.softmax_implementation, + ) + ) + softmax_attrs.append( + ConfigurableAttribute('skip', value_type=bool, default=False, description=descriptions.softmax_skip) + ) softmax_attrs.append( TypeAttribute( 'exp_table', default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT), + description=descriptions.table_type, ) ) softmax_attrs.append( TypeAttribute( 'inv_table', default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT), + description=descriptions.table_type, ) ) self.attribute_map[Softmax] = softmax_attrs diff --git a/hls4ml/backends/oneapi/oneapi_backend.py b/hls4ml/backends/oneapi/oneapi_backend.py index c85a8c0e9..7d0f0d48e 100644 --- a/hls4ml/backends/oneapi/oneapi_backend.py +++ b/hls4ml/backends/oneapi/oneapi_backend.py @@ -10,6 +10,7 @@ from hls4ml.model.layers import GRU, LSTM, Activation, Conv1D, Conv2D, Dense, Embedding, Layer, SimpleRNN, Softmax from hls4ml.model.optimizer import get_backend_passes, layer_optimizer from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType +from hls4ml.utils import attribute_descriptions as descriptions # from hls4ml.report import parse_oneapi_report @@ -30,9 +31,9 @@ def _register_layer_attributes(self): for layer in rnn_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) - attrs.append(ConfigurableAttribute('table_size', default=1024)) - attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) + attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor)) + attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size)) + attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type)) self.attribute_map[layer] = attrs # Add ParallelizationFactor to Conv1D/2D @@ -43,7 +44,7 @@ def _register_layer_attributes(self): for layer in pf_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('parallelization_factor', default=1)) + attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf)) self.attribute_map[layer] = attrs def _register_flows(self): diff --git a/hls4ml/backends/quartus/quartus_backend.py b/hls4ml/backends/quartus/quartus_backend.py index 683d3f77b..6e596fe2d 100644 --- a/hls4ml/backends/quartus/quartus_backend.py +++ b/hls4ml/backends/quartus/quartus_backend.py @@ -11,6 +11,7 @@ from hls4ml.model.optimizer import get_backend_passes, layer_optimizer from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType from hls4ml.report import parse_quartus_report +from hls4ml.utils import attribute_descriptions as descriptions @contextmanager @@ -39,9 +40,9 @@ def _register_layer_attributes(self): for layer in rnn_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) - attrs.append(ConfigurableAttribute('table_size', default=1024)) - attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) + attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor)) + attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size)) + attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type)) self.attribute_map[layer] = attrs def _register_flows(self): diff --git a/hls4ml/backends/vivado/vivado_backend.py b/hls4ml/backends/vivado/vivado_backend.py index 365690881..117805dd8 100644 --- a/hls4ml/backends/vivado/vivado_backend.py +++ b/hls4ml/backends/vivado/vivado_backend.py @@ -31,6 +31,7 @@ from hls4ml.model.optimizer import get_backend_passes, layer_optimizer from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType from hls4ml.report import parse_vivado_report +from hls4ml.utils import attribute_descriptions as descriptions class VivadoBackend(FPGABackend): @@ -49,10 +50,12 @@ def _register_layer_attributes(self): for layer in rnn_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) - attrs.append(ConfigurableAttribute('static', value_type=bool, default=True)) - attrs.append(ConfigurableAttribute('table_size', default=1024)) - attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) + attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1, description=descriptions.reuse_factor)) + attrs.append( + ConfigurableAttribute('static', value_type=bool, default=True, description=descriptions.recurrent_static) + ) + attrs.append(ConfigurableAttribute('table_size', default=1024, description=descriptions.table_size)) + attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8), description=descriptions.table_type)) self.attribute_map[layer] = attrs # Add ParallelizationFactor to Conv1D/2D @@ -63,14 +66,21 @@ def _register_layer_attributes(self): for layer in pf_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('parallelization_factor', default=1)) + attrs.append(ConfigurableAttribute('parallelization_factor', default=1, description=descriptions.conv_pf)) self.attribute_map[layer] = attrs # Add ConvImplementation to Convolution+Pooling layers cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D] for layer in cnn_layers: attrs = self.attribute_map.get(layer, []) - attrs.append(ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded'], default='LineBuffer')) + attrs.append( + ChoiceAttribute( + 'conv_implementation', + choices=['LineBuffer', 'Encoded'], + default='LineBuffer', + description=descriptions.conv_implementation, + ) + ) self.attribute_map[layer] = attrs def _register_flows(self): diff --git a/hls4ml/model/attributes.py b/hls4ml/model/attributes.py index 0e8df6e10..d03d2bd10 100644 --- a/hls4ml/model/attributes.py +++ b/hls4ml/model/attributes.py @@ -36,11 +36,12 @@ class Attribute: """ - def __init__(self, name, value_type=Integral, default=None, configurable=False): + def __init__(self, name, value_type=Integral, default=None, configurable=False, description=None): self.name = name self.value_type = value_type self.default = default self.configurable = configurable + self.description = description def validate_value(self, value): if self.value_type is not None: @@ -59,6 +60,20 @@ def config_name(self): """ return convert_to_pascal_case(self.name) + def __eq__(self, other: object) -> bool: + if not isinstance(other, Attribute): + return NotImplemented + return ( + self.name == other.name + and self.value_type == other.value_type + and self.default == other.default + and self.configurable == other.configurable + and self.description == other.description + ) + + def __hash__(self) -> int: + return hash((self.name, self.value_type, self.default, self.configurable, self.description)) + class ConfigurableAttribute(Attribute): """ @@ -68,8 +83,8 @@ class ConfigurableAttribute(Attribute): when defining the expected attributes of layer classes. """ - def __init__(self, name, value_type=int, default=None): - super().__init__(name, value_type, default, configurable=True) + def __init__(self, name, value_type=Integral, default=None, description=None): + super().__init__(name, value_type, default, configurable=True, description=description) class TypeAttribute(Attribute): @@ -79,10 +94,10 @@ class TypeAttribute(Attribute): As a convention, the name of the attribute storing a type will end in ``_t``. """ - def __init__(self, name, default=None, configurable=True): + def __init__(self, name, default=None, configurable=True, description=None): if not name.endswith('_t'): name += '_t' - super().__init__(name, value_type=NamedType, default=default, configurable=configurable) + super().__init__(name, value_type=NamedType, default=default, configurable=configurable, description=description) class ChoiceAttribute(Attribute): @@ -90,25 +105,31 @@ class ChoiceAttribute(Attribute): Represents an attribute whose value can be one of several predefined values. """ - def __init__(self, name, choices, default=None, configurable=True): - super().__init__(name, value_type=list, default=default, configurable=configurable) + def __init__(self, name, choices, default=None, configurable=True, description=None): + super().__init__(name, value_type=list, default=default, configurable=configurable, description=description) assert len(choices) > 0 if default is not None: assert default in choices self.choices = choices - self.value_type = str(self.choices) def validate_value(self, value): return value in self.choices + def __eq__(self, other: object) -> bool: + base_eq = super().__eq__(other) + return base_eq and hasattr(other, 'choices') and set(self.choices) == set(other.choices) + + def __hash__(self) -> int: + return super().__hash__() ^ hash(tuple(sorted(self.choices))) + class WeightAttribute(Attribute): """ Represents an attribute that will store a weight variable. """ - def __init__(self, name): - super().__init__(name, value_type=WeightVariable, default=None, configurable=False) + def __init__(self, name, description=None): + super().__init__(name, value_type=WeightVariable, default=None, configurable=False, description=description) class CodeAttrubute(Attribute): @@ -116,8 +137,8 @@ class CodeAttrubute(Attribute): Represents an attribute that will store generated source code block. """ - def __init__(self, name): - super(WeightAttribute, self).__init__(name, value_type=Source, default=None, configurable=False) + def __init__(self, name, description=None): + super().__init__(name, value_type=Source, default=None, configurable=False, description=description) # endregion diff --git a/hls4ml/model/layers.py b/hls4ml/model/layers.py index 891f187ea..c276e2814 100644 --- a/hls4ml/model/layers.py +++ b/hls4ml/model/layers.py @@ -26,6 +26,7 @@ WeightVariable, find_minimum_width, ) +from hls4ml.utils import attribute_descriptions as descriptions from hls4ml.utils.string_utils import convert_to_snake_case @@ -53,9 +54,9 @@ class Layer: """ _expected_attributes = [ - Attribute('index'), - ConfigurableAttribute('trace', default=False), - TypeAttribute('result'), + Attribute('index', description=descriptions.index), + ConfigurableAttribute('trace', default=False, description=descriptions.trace), + TypeAttribute('result', description=descriptions.result_type), ] @classproperty diff --git a/hls4ml/model/types.py b/hls4ml/model/types.py index 9fb257a1e..9d0a97440 100644 --- a/hls4ml/model/types.py +++ b/hls4ml/model/types.py @@ -64,12 +64,15 @@ def __init__(self, width, signed): self.width = width self.signed = signed - def __eq__(self, other): + def __eq__(self, other: object) -> bool: eq = self.width == other.width eq = eq and self.signed == other.signed return eq + def __hash__(self) -> int: + return hash((self.width, self.signed)) + class IntegerPrecisionType(PrecisionType): """Arbitrary precision integer data type. @@ -89,12 +92,15 @@ def __str__(self): return typestring # Does this need to make sure other is also an IntegerPrecisionType? I could see a match between Fixed and Integer - def __eq__(self, other): + def __eq__(self, other: object) -> bool: if isinstance(other, IntegerPrecisionType): return super().__eq__(other) return False + def __hash__(self) -> int: + return super().__hash__() + @property def integer(self): return self.width @@ -186,7 +192,7 @@ def __str__(self): typestring = '{signed}fixed<{args}>'.format(signed='u' if not self.signed else '', args=args) return typestring - def __eq__(self, other): + def __eq__(self, other: object) -> bool: if isinstance(other, FixedPrecisionType): eq = super().__eq__(other) eq = eq and self.integer == other.integer @@ -197,6 +203,9 @@ def __eq__(self, other): return False + def __hash__(self) -> int: + return super().__hash__() ^ hash((self.integer, self.rounding_mode, self.saturation_mode, self.saturation_bits)) + class XnorPrecisionType(PrecisionType): """ diff --git a/hls4ml/utils/attribute_descriptions.py b/hls4ml/utils/attribute_descriptions.py new file mode 100644 index 000000000..756f276fa --- /dev/null +++ b/hls4ml/utils/attribute_descriptions.py @@ -0,0 +1,51 @@ +"""Strings holding attribute descriptions.""" + +# Common attributes + +reuse_factor = ( + 'The number of times each multiplier is used by controlling the amount of pipelining/unrolling. ' + 'Lower number results in more parallelism and lower latency at the expense of the resources used.' + 'Reuse factor = 1 corresponds to all multiplications executed in parallel, and hence, the lowest possible latency.' +) + +index = 'Internal node counter used for bookkeeping and variable/tensor naming.' +trace = 'Enables saving of layer output (tracing) when using hls_model.predict(...) or hls_model.trace(...)' + +result_type = 'The datatype (precision) of the output tensor.' +accum_type = 'The datatype (precision) used to store intermediate results of the computation within the layer.' + +# Activation-related attributes + +table_size = 'The size of the lookup table used to approximate the function.' +table_type = 'The datatype (precision) used for the values of the lookup table.' + +softmax_implementation = ( + 'Choice of implementation of softmax function. ' + '"latency" provides good latency at the expense of extra resources. performs well on small number of classes. ' + '"stable" may require extra clock cycles but has better accuracy. ' + '"legacy" is the older implementation which has bad accuracy, but is fast and has low resource use. ' + 'It is superseded by the "latency" implementation for most applications. ' + '"argmax" is a special implementation that can be used if only the output with the highest probability is important. ' + 'Using this implementation will save resources and clock cycles.' +) +softmax_skip = 'If enabled, skips the softmax node and returns the raw outputs.' + +# Convolution-related attributes + +conv_pf = ( + 'The number of outputs computed in parallel. Essentially the number of multiplications of input window with the ' + 'convolution kernel occuring in parallel. ' + 'Higher number results in more parallelism (lower latency and II) at the expense of resources used.' + 'Currently only supported in io_parallel.' +) +conv_implementation = ( + '"LineBuffer" implementation is preferred over "Encoded" for most use cases. ' + 'This attribute only applies to io_stream.' +) + +# Recurrent-related attributes + +recurrent_static = ( + 'If set to True, will reuse the the same recurrent block for computation, resulting in lower resource ' + 'usage at the expense of serialized computation and higher latency/II.' +)