Skip to content

Commit

Permalink
[Bug] Solves issue when certain inputs/constants aren't properly decl…
Browse files Browse the repository at this point in the history
…ared during MLIR emit

Previously, MLIR emit was hiting edge cases when declaring constant inputs. More precisely,
they were mostly skipped. This fix redefines how inputs are recognized (using kInput node type),
and properly distinguish regular and constant inputs vs model parameters.

Issue uncovered during #112 op bringup (reciprocal). At the same time, PR related to #112 is
testing this case. Additionally, inference and training MNIST are also covering this feature
for functionality.

Fixes #201
  • Loading branch information
nvukobratTT committed Sep 2, 2024
1 parent 831b1b5 commit af4a5b7
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 2 deletions.
31 changes: 29 additions & 2 deletions pybuda/csrc/passes/lower_to_mlir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "lower_to_mlir.hpp"

// Standard headers
#include <iostream>
#include <cstdint>
#include <stdexcept>
#include <string>
Expand Down Expand Up @@ -132,6 +133,8 @@ class MLIRGenerator
throw std::runtime_error("Variable " + node->name() + " already declared in the current scope.");
}

log_trace(LogMLIRCompiler, "Declaring {} in the current scope.", node->name());

symbolTable_[node->name()] = {value, node};
}

Expand Down Expand Up @@ -173,10 +176,21 @@ class MLIRGenerator
// Add the graph inputs to the argument list
for (auto *input: graph->ordered_module_inputs()) //for (auto *input : graph->nodes_by_type(tt::graphlib::kInput))
{
log_info(LogMLIRCompiler, "Adding input {} to the argument list.", input->name());

argument_nodes.push_back(input);
argument_types.push_back(get_node_type(input));
}

// Add the graph constants to the argument list
for (auto *constant : graph->get_constant_nodes())
{
log_trace(LogMLIRCompiler, "Adding constant {} to the argument list.", constant->name());

argument_nodes.push_back(constant);
argument_types.push_back(get_node_type(constant));
}

// Add the graph parameters to the argument list
for(auto *parameter: graph->get_parameter_nodes())
{
Expand All @@ -185,8 +199,10 @@ class MLIRGenerator
// for forward and backward subgraphs (via GraphTraversalContext).
if (graph->data_users(parameter).empty())
{
log_trace(LogMLIRCompiler, "Skipping parameter {} as it is not used in the current graph context.", parameter->name());
continue;
}
log_trace(LogMLIRCompiler, "Adding parameter {} to the argument list.", parameter->name());

argument_nodes.push_back(parameter);
argument_types.push_back(get_node_type(parameter));
Expand All @@ -201,6 +217,7 @@ class MLIRGenerator

for (auto *output : output_nodes)
{
log_trace(LogMLIRCompiler, "Adding output {} to the return list.", output->name());
returns.push_back(get_node_type(output));
}

Expand All @@ -215,6 +232,7 @@ class MLIRGenerator
llvm::SmallVector<mlir::NamedAttribute, 1> named_attributes;
named_attributes.push_back(builder_.getNamedAttr("ttir.name", builder_.getStringAttr(argument_node->name())));
func.setArgAttrs(i, named_attributes);
log_trace(LogMLIRCompiler, "Set argument name {} for function argument {}.", argument_node->name(), i);
}

// Start the body of the function by creating an entry block.
Expand All @@ -241,9 +259,9 @@ class MLIRGenerator
// Skip if the node isn't TTForge operation
if (node->node_type() != tt::graphlib::NodeType::kPyOp)
{
log_trace(LogMLIRCompiler, "Skipping node {} as it is not a TTForge operation.", node->name());
continue;
}

log_trace(LogMLIRCompiler, "Emitting MLIR for node {}", node->name());

tt::graphlib::OpNode *op_node = node->as<tt::graphlib::OpNode>();
Expand Down Expand Up @@ -353,9 +371,18 @@ class MLIRGenerator
{
llvm::SmallVector<mlir::Value> operands;

#ifdef DEBUG
// Log all values from symbolTable_
log_trace(LogMLIRCompiler, "Logging all keys from symbolTable_");
for (const auto& entry : symbolTable_)
{
log_trace(LogMLIRCompiler, "Key: {}", entry.first);
}
#endif

for (auto operand : graph->data_operands(op_node))
{
TT_ASSERT(symbolTable_.find(operand->name()) != symbolTable_.end(), "Operand " + operand->name() + "not found in symbol table.");
TT_ASSERT(symbolTable_.find(operand->name()) != symbolTable_.end(), "Operand " + operand->name() + " not found in symbol table.");
operands.push_back(symbolTable_.at(operand->name()).first);
}

Expand Down
32 changes: 32 additions & 0 deletions pybuda/test/mlir/test_features.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC

# SPDX-License-Identifier: Apache-2.0

import os
import pytest

import pytest
import torch
from torch import nn

import pybuda
from pybuda.op.eval.common import compare_with_golden_pcc

def test_multiple_inputs():
class MultipleInputs(nn.Module):
def __init__(self):
super().__init__()

def forward(self, a, b, c):
return a + b + c

inputs = [torch.rand(1, 32, 32), torch.rand(1, 32, 32), torch.rand(1, 32, 32)]

framework_model = MultipleInputs()
fw_out = framework_model(*inputs)

compiled_model = pybuda.compile(framework_model, sample_inputs=inputs)
co_out = compiled_model(*inputs)

co_out = [co.to("cpu") for co in co_out]
assert [compare_with_golden_pcc(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)]
3 changes: 3 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ testpaths =
# Ops
pybuda/test/mlir/test_ops.py

# Features
pybuda/test/mlir/test_features.py

# API
pybuda/test/test_api.py

Expand Down

0 comments on commit af4a5b7

Please sign in to comment.