From af4a5b7b2912b88d1d5ccac019715522d500e228 Mon Sep 17 00:00:00 2001 From: Nikola Vukobrat Date: Fri, 30 Aug 2024 13:26:32 +0000 Subject: [PATCH] [Bug] Solves issue when certain inputs/constants aren't properly declared during MLIR emit Previously, MLIR emit was hiting edge cases when declaring constant inputs. More precisely, they were mostly skipped. This fix redefines how inputs are recognized (using kInput node type), and properly distinguish regular and constant inputs vs model parameters. Issue uncovered during #112 op bringup (reciprocal). At the same time, PR related to #112 is testing this case. Additionally, inference and training MNIST are also covering this feature for functionality. Fixes #201 --- pybuda/csrc/passes/lower_to_mlir.cpp | 31 +++++++++++++++++++++++++-- pybuda/test/mlir/test_features.py | 32 ++++++++++++++++++++++++++++ pytest.ini | 3 +++ 3 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 pybuda/test/mlir/test_features.py diff --git a/pybuda/csrc/passes/lower_to_mlir.cpp b/pybuda/csrc/passes/lower_to_mlir.cpp index 50b99f020..0914dca6f 100644 --- a/pybuda/csrc/passes/lower_to_mlir.cpp +++ b/pybuda/csrc/passes/lower_to_mlir.cpp @@ -4,6 +4,7 @@ #include "lower_to_mlir.hpp" // Standard headers +#include #include #include #include @@ -132,6 +133,8 @@ class MLIRGenerator throw std::runtime_error("Variable " + node->name() + " already declared in the current scope."); } + log_trace(LogMLIRCompiler, "Declaring {} in the current scope.", node->name()); + symbolTable_[node->name()] = {value, node}; } @@ -173,10 +176,21 @@ class MLIRGenerator // Add the graph inputs to the argument list for (auto *input: graph->ordered_module_inputs()) //for (auto *input : graph->nodes_by_type(tt::graphlib::kInput)) { + log_info(LogMLIRCompiler, "Adding input {} to the argument list.", input->name()); + argument_nodes.push_back(input); argument_types.push_back(get_node_type(input)); } + // Add the graph constants to the argument list + for (auto *constant : graph->get_constant_nodes()) + { + log_trace(LogMLIRCompiler, "Adding constant {} to the argument list.", constant->name()); + + argument_nodes.push_back(constant); + argument_types.push_back(get_node_type(constant)); + } + // Add the graph parameters to the argument list for(auto *parameter: graph->get_parameter_nodes()) { @@ -185,8 +199,10 @@ class MLIRGenerator // for forward and backward subgraphs (via GraphTraversalContext). if (graph->data_users(parameter).empty()) { + log_trace(LogMLIRCompiler, "Skipping parameter {} as it is not used in the current graph context.", parameter->name()); continue; } + log_trace(LogMLIRCompiler, "Adding parameter {} to the argument list.", parameter->name()); argument_nodes.push_back(parameter); argument_types.push_back(get_node_type(parameter)); @@ -201,6 +217,7 @@ class MLIRGenerator for (auto *output : output_nodes) { + log_trace(LogMLIRCompiler, "Adding output {} to the return list.", output->name()); returns.push_back(get_node_type(output)); } @@ -215,6 +232,7 @@ class MLIRGenerator llvm::SmallVector named_attributes; named_attributes.push_back(builder_.getNamedAttr("ttir.name", builder_.getStringAttr(argument_node->name()))); func.setArgAttrs(i, named_attributes); + log_trace(LogMLIRCompiler, "Set argument name {} for function argument {}.", argument_node->name(), i); } // Start the body of the function by creating an entry block. @@ -241,9 +259,9 @@ class MLIRGenerator // Skip if the node isn't TTForge operation if (node->node_type() != tt::graphlib::NodeType::kPyOp) { + log_trace(LogMLIRCompiler, "Skipping node {} as it is not a TTForge operation.", node->name()); continue; } - log_trace(LogMLIRCompiler, "Emitting MLIR for node {}", node->name()); tt::graphlib::OpNode *op_node = node->as(); @@ -353,9 +371,18 @@ class MLIRGenerator { llvm::SmallVector operands; +#ifdef DEBUG + // Log all values from symbolTable_ + log_trace(LogMLIRCompiler, "Logging all keys from symbolTable_"); + for (const auto& entry : symbolTable_) + { + log_trace(LogMLIRCompiler, "Key: {}", entry.first); + } +#endif + for (auto operand : graph->data_operands(op_node)) { - TT_ASSERT(symbolTable_.find(operand->name()) != symbolTable_.end(), "Operand " + operand->name() + "not found in symbol table."); + TT_ASSERT(symbolTable_.find(operand->name()) != symbolTable_.end(), "Operand " + operand->name() + " not found in symbol table."); operands.push_back(symbolTable_.at(operand->name()).first); } diff --git a/pybuda/test/mlir/test_features.py b/pybuda/test/mlir/test_features.py new file mode 100644 index 000000000..1d6251305 --- /dev/null +++ b/pybuda/test/mlir/test_features.py @@ -0,0 +1,32 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC + +# SPDX-License-Identifier: Apache-2.0 + +import os +import pytest + +import pytest +import torch +from torch import nn + +import pybuda +from pybuda.op.eval.common import compare_with_golden_pcc + +def test_multiple_inputs(): + class MultipleInputs(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, a, b, c): + return a + b + c + + inputs = [torch.rand(1, 32, 32), torch.rand(1, 32, 32), torch.rand(1, 32, 32)] + + framework_model = MultipleInputs() + fw_out = framework_model(*inputs) + + compiled_model = pybuda.compile(framework_model, sample_inputs=inputs) + co_out = compiled_model(*inputs) + + co_out = [co.to("cpu") for co in co_out] + assert [compare_with_golden_pcc(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)] diff --git a/pytest.ini b/pytest.ini index 118f863c4..501084d52 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,6 +9,9 @@ testpaths = # Ops pybuda/test/mlir/test_ops.py + # Features + pybuda/test/mlir/test_features.py + # API pybuda/test/test_api.py