Skip to content

Commit

Permalink
Adding tests
Browse files Browse the repository at this point in the history
  • Loading branch information
LPanosTT committed Dec 23, 2024
1 parent e5b0317 commit f806dde
Show file tree
Hide file tree
Showing 10 changed files with 107 additions and 1 deletion.
2 changes: 1 addition & 1 deletion lib/Dialect/TTIR/Transforms/Fusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,9 @@ class FuseReduceKeepDims : public OpRewritePattern<ReduceOpTy> {
if (std::find(reduceDims.begin(), reduceDims.end(), i) ==
reduceDims.end()) {
outputShapeIfKeepDimTrue.push_back(inputShape[i]);
outputShapeIfKeepDimFalse.push_back(inputShape[i]);
} else {
outputShapeIfKeepDimTrue.push_back(1);
outputShapeIfKeepDimFalse.push_back(inputShape[i]);
}
}

Expand Down
11 changes: 11 additions & 0 deletions test/ttmlir/Dialect/TTIR/Fusion/broadcast/fuse_nop_broadcast.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// RUN: ttmlir-opt --ttir-fusion %s | FileCheck %s
module attributes {} {
func.func @forward(%arg0: tensor<1x32x128x128xf32>, %arg1: tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32> {
// CHECK-NOT: "ttir.broadcast"
%dps0 = tensor.empty() : tensor<1x32x128x128xf32>
%0 = "ttir.broadcast"(%arg1, %dps0) {dimension = [3 : i64]} : (tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
%dps1 = tensor.empty() : tensor<1x32x128x128xf32>
%1 = "ttir.add"(%arg0, %0, %dps1) {operandSegmentSizes = array<i32: 2, 1>} : (tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
return %1 : tensor<1x32x128x128xf32>
}
}
10 changes: 10 additions & 0 deletions test/ttmlir/Dialect/TTIR/Fusion/maximum/test_fuse_max_to_relu.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// RUN: ttmlir-opt --ttir-fusion %s | FileCheck %s
module attributes {} {
func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> {
%zero = "ttir.constant"() {value = dense<0.000000e+00> : tensor<64x128xf32>} : () -> tensor<64x128xf32>
%0 = tensor.empty() : tensor<64x128xf32>
// CHECK: "ttir.relu"
%1 = "ttir.maximum"(%arg0, %zero, %0) <{operandSegmentSizes = array<i32: 2, 1>}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32>
return %1 : tensor<64x128xf32>
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// RUN: ttmlir-opt --ttir-fusion %s | FileCheck %s
module attributes {} {
func.func @keep_dim_3(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x1x128x1xf32> {
// CHECK-NOT: "ttir.reshape"
%dps1 = tensor.empty() : tensor<1x128xf32>
%1 = "ttir.sum"(%arg0, %dps1) {keep_dim = false, dim_arg = [3 : i32, 1 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
%dps2 = tensor.empty() : tensor<1x1x128x1xf32>
%2 = "ttir.reshape"(%1, %dps2) <{shape = [1: i32, 1: i32, 128: i32, 1: i32]}> : (tensor<1x128xf32>, tensor<1x1x128x1xf32>) -> tensor<1x1x128x1xf32>
return %2 : tensor<1x1x128x1xf32>
}
}


module attributes {} {
func.func @lose_dim_3(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x128xf32> {
// CHECK-NOT: "ttir.reshape"
%dps1 = tensor.empty() : tensor<1x1x128x1xf32>
%1 = "ttir.sum"(%arg0, %dps1) {keep_dim = true, dim_arg = [1 : i32, 3 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x1x128x1xf32>) -> tensor<1x1x128x1xf32>
%dps2 = tensor.empty() : tensor<1x128xf32>
%2 = "ttir.reshape"(%1, %dps2) <{shape = [1: i32, 128: i32]}> : (tensor<1x1x128x1xf32>, tensor<1x128xf32>) -> tensor<1x128xf32>
return %2 : tensor<1x128xf32>
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// RUN: ttmlir-opt --ttir-fusion %s | FileCheck %s
module attributes {} {
func.func @keep_dim_3(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x32x128x1xf32> {
// CHECK-NOT: "ttir.reshape"
%dps1 = tensor.empty() : tensor<1x32x128xf32>
%1 = "ttir.sum"(%arg0, %dps1) {keep_dim = false, dim_arg = [3 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x32x128xf32>) -> tensor<1x32x128xf32>
%dps2 = tensor.empty() : tensor<1x32x128x1xf32>
%2 = "ttir.reshape"(%1, %dps2) <{shape = [1: i32, 32: i32, 128: i32, 1: i32]}> : (tensor<1x32x128xf32>, tensor<1x32x128x1xf32>) -> tensor<1x32x128x1xf32>
return %2 : tensor<1x32x128x1xf32>
}
}


module attributes {} {
func.func @keep_dim_1(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x1x128x128xf32> {
// CHECK-NOT: "ttir.reshape"
%dps1 = tensor.empty() : tensor<1x128x128xf32>
%1 = "ttir.sum"(%arg0, %dps1) {keep_dim = false, dim_arg = [1 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x128x128xf32>) -> tensor<1x128x128xf32>
%dps2 = tensor.empty() : tensor<1x1x128x128xf32>
%2 = "ttir.reshape"(%1, %dps2) <{shape = [1: i32, 1: i32, 128: i32, 128: i32]}> : (tensor<1x128x128xf32>, tensor<1x1x128x128xf32>) -> tensor<1x1x128x128xf32>
return %2 : tensor<1x1x128x128xf32>
}
}


module attributes {} {
func.func @lose_dim_3(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x32x128xf32> {
// CHECK-NOT: "ttir.reshape"
%dps1 = tensor.empty() : tensor<1x32x128x1xf32>
%1 = "ttir.sum"(%arg0, %dps1) {keep_dim = true, dim_arg = [3 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x32x128x1xf32>) -> tensor<1x32x128x1xf32>
%dps2 = tensor.empty() : tensor<1x32x128xf32>
%2 = "ttir.reshape"(%1, %dps2) <{shape = [1: i32, 32: i32, 128: i32]}> : (tensor<1x32x128x1xf32>, tensor<1x32x128xf32>) -> tensor<1x32x128xf32>
return %2 : tensor<1x32x128xf32>
}
}


module attributes {} {
func.func @lose_dim_1(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x128x128xf32> {
// CHECK-NOT: "ttir.reshape"
%dps1 = tensor.empty() : tensor<1x1x128x128xf32>
%1 = "ttir.sum"(%arg0, %dps1) {keep_dim = true, dim_arg = [1 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x1x128x128xf32>) -> tensor<1x1x128x128xf32>
%dps2 = tensor.empty() : tensor<1x128x128xf32>
%2 = "ttir.reshape"(%1, %dps2) <{shape = [1: i32, 128: i32, 128: i32]}> : (tensor<1x1x128x128xf32>, tensor<1x128x128xf32>) -> tensor<1x128x128xf32>
return %2 : tensor<1x128x128xf32>
}
}
15 changes: 15 additions & 0 deletions test/ttmlir/Dialect/TTIR/Fusion/softmax/test_fuse_softmax.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// RUN: ttmlir-opt --ttir-fusion %s | FileCheck %s
module attributes {} {
func.func @forward(%arg0: tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32> {
// CHECK: %[[C:.*]] = "ttir.softmax"[[C:.*]]
%dps1 = tensor.empty() : tensor<1x32x128x128xf32>
%1 = "ttir.exp"(%arg0, %dps1) {operandSegmentSizes = array<i32: 1, 1>} : (tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
%dps2 = tensor.empty() : tensor<1x32x128x1xf32>
%2 = "ttir.sum"(%1, %dps2) {keep_dim = true, dim_arg = [3 : i32]} : (tensor<1x32x128x128xf32>, tensor<1x32x128x1xf32>) -> tensor<1x32x128x1xf32>
%dps3 = tensor.empty() : tensor<1x32x128x128xf32>
%3 = "ttir.broadcast"(%2, %dps3) {dimension = [3 : i64]} : (tensor<1x32x128x1xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
%dps4 = tensor.empty() : tensor<1x32x128x128xf32>
%4 = "ttir.div"(%1, %3, %dps4) {operandSegmentSizes = array<i32: 2, 1>} : (tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>, tensor<1x32x128x128xf32>) -> tensor<1x32x128x128xf32>
return %4 : tensor<1x32x128x128xf32>
}
}

0 comments on commit f806dde

Please sign in to comment.