From d2d907bcdc038cb7c4c70fc96f8a845af5bfd5ff Mon Sep 17 00:00:00 2001 From: Chang Sun Date: Sun, 10 Nov 2024 21:49:57 +0000 Subject: [PATCH] cosmatic --- hls4ml/backends/fpga/passes/clone.py | 3 ++- .../fpga/passes/inplace_parallel_reshape.py | 6 ++++-- .../fpga/passes/inplace_stream_flatten.py | 3 +-- hls4ml/model/graph.py | 16 ++++++++------- test/pytest/test_multiout_network.py | 20 ++++++++++++------- 5 files changed, 29 insertions(+), 19 deletions(-) diff --git a/hls4ml/backends/fpga/passes/clone.py b/hls4ml/backends/fpga/passes/clone.py index c640d6f37..6971dc55c 100644 --- a/hls4ml/backends/fpga/passes/clone.py +++ b/hls4ml/backends/fpga/passes/clone.py @@ -80,7 +80,8 @@ def transform(self, model, node): if n_outputs == 1: continue if n_outputs > 3: - msg = f'ERROR: Cloning output {output} of {node.__class__.__name__} ({node.name}) more than 3 times not currently supported' # noqa: E501 + msg = f'ERROR: Cloning output {output} of {node.class_name}\ + ({node.name}) more than 3 times not currently supported' raise ValueError(msg) out_var = node.get_output_variable(output) diff --git a/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py b/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py index a56531a31..82efe6710 100644 --- a/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py +++ b/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py @@ -12,7 +12,7 @@ class InplaceParallelReshape(OptimizerPass): def match(self, node): if not isinstance(node, Reshape): - return + return False return node.model.config.get_config_value('IOType') == 'io_parallel' def transform(self, model, node): @@ -24,6 +24,8 @@ def transform(self, model, node): prev_node = node.get_input_node() assert ( prev_node.name not in model.outputs - ), f"Cannot output node {prev_node.name}: reshape is a no-op in io_parallel. As a result, the previous node {prev_node.name}'s output will be used as the output. However, this node is already an output." # noqa: E501 + ), f"Cannot output node {prev_node.name}: reshape is a no-op in io_parallel.\ + As a result, the previous node {prev_node.name}'s output will be used as the\ + output. However, this node is already an output." model.outputs = [name if name != node.name else prev_node.name for name in model.outputs] return False diff --git a/hls4ml/backends/fpga/passes/inplace_stream_flatten.py b/hls4ml/backends/fpga/passes/inplace_stream_flatten.py index a41efe6fd..69720632b 100644 --- a/hls4ml/backends/fpga/passes/inplace_stream_flatten.py +++ b/hls4ml/backends/fpga/passes/inplace_stream_flatten.py @@ -15,8 +15,7 @@ def match(self, node): if not (isinstance(node, Reshape) and len(node.get_output_variable().shape)) == 1: # Reshape with multiple outputs will be kept as is, or repack cannot handle different shapes return False - io_type = node.model.config.get_config_value('IOType') - return io_type == 'io_stream' + return node.model.config.get_config_value('IOType') == 'io_stream' def transform(self, model, node): outvar = node.get_output_variable() diff --git a/hls4ml/model/graph.py b/hls4ml/model/graph.py index ffd173b8f..fff970052 100644 --- a/hls4ml/model/graph.py +++ b/hls4ml/model/graph.py @@ -524,14 +524,16 @@ def remove_node(self, node, rewire=True): node to the input of next one. If the removed node has multiple inputs/outputs tensors, an exception is raised. - Args: - node (Layer): The node to remove rewire (bool, optional): - Deprecated, no effect + :param node: The node to remove. + :type node: Layer + :param rewire: Deprecated, no effect. + :type rewire: bool, optional - Raises: - Exception: If an attempt is made to rewire a node with - multiple inputs/outputs. - """ + :raises Exception: If an attempt is made to rewire a node with + multiple inputs/outputs. + + .. deprecated:: 1.0 + The `rewire` parameter is deprecated and has no effect.""" inputs = [inp for inp in node.inputs if inp] outputs = [outp for outp in node.outputs if outp] diff --git a/test/pytest/test_multiout_network.py b/test/pytest/test_multiout_network.py index 88e884f5c..1cf90d614 100644 --- a/test/pytest/test_multiout_network.py +++ b/test/pytest/test_multiout_network.py @@ -20,7 +20,7 @@ def model(): @pytest.fixture(scope='module') -def model2(): +def model_corner_cases(): in1 = keras.layers.Input(shape=(24, 8)) in2 = keras.layers.Input(shape=(16)) out1 = keras.layers.Conv1D(1, 3)(in1) @@ -40,7 +40,7 @@ def data(): @pytest.fixture(scope='module') -def data2(): +def data_corner_cases(): X1 = np.random.normal(0, 1, (1000, 24, 8)) X2 = np.random.normal(0, 1, (1000, 16)) X1 = np.clip(X1, -16, 15) @@ -70,18 +70,24 @@ def test_multi_output_nn(model, data, backend: str, io_type: str): @pytest.mark.parametrize('backend', ['Vivado', 'Quartus', 'Vitis', 'Catapult', 'OneAPI']) @pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) @pytest.mark.parametrize('strategy', ['latency', 'resource']) -def test_multi_output_nn_2(model2, data2, backend: str, io_type: str, strategy: str): - """Cover corner case where a flatten layer is cloned multiple times, and used as model output""" +def test_multi_output_nn_corner_cases(model_corner_cases, data_corner_cases, backend: str, io_type: str, strategy: str): + """Cover corner cases, when: + - a layer outputs both to the next layer(s) and to the model output + - when an node removal/insertion is triggered internally + - a reshape in io_parallel, or flatten in io_stream layer's output is used multiple times + - and as layer output + - and by layer taking multiple inputs + """ output_dir = str(test_root_path / f'hls4mlprj_multiout_network_2_{backend}_{io_type}_{strategy}') hls_config = {'Model': {'Precision': 'fixed<32,5>', 'ReuseFactor': 1}, 'Strategy': strategy} model_hls = convert_from_keras_model( - model2, backend=backend, output_dir=output_dir, hls_config=hls_config, io_type=io_type + model_corner_cases, backend=backend, output_dir=output_dir, hls_config=hls_config, io_type=io_type ) model_hls.compile() - r_hls = model_hls.predict(data2) - r_keras = model2.predict(data2, verbose=0, batch_size=1000) + r_hls = model_hls.predict(data_corner_cases) + r_keras = model_corner_cases.predict(data_corner_cases, verbose=0, batch_size=1000) assert np.allclose(r_hls[0], r_keras[0], atol=1e-5, rtol=0) assert np.allclose(r_hls[1], r_keras[1], atol=1e-5, rtol=0)