Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AutoBump] Merge with fixes of 37263b6c (Sep 04) (21) #374

Open
wants to merge 3 commits into
base: bump_to_f4b9839d
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 1 addition & 8 deletions mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1579,7 +1579,7 @@ def Tosa_ConcatOp : Tosa_InferTensorTypeOp<"concat"> {
//===----------------------------------------------------------------------===//
// Operator: pad
//===----------------------------------------------------------------------===//
def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad", [InferTensorType]> {
def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad"> {
let summary = "Pads a tensor with value specified.";

let description = [{
Expand Down Expand Up @@ -1619,13 +1619,6 @@ def Tosa_PadOp : Tosa_InferShapedTypeOp<"pad", [InferTensorType]> {
let hasCanonicalizer = 1;
let hasFolder = 1;
let hasVerifier = 1;

let extraClassDeclaration = [{
/// Returns true when two result types are compatible for this op;
/// Method used by InferTypeOpInterface.
static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
}];

}

//===----------------------------------------------------------------------===//
Expand Down
47 changes: 12 additions & 35 deletions mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -800,8 +800,6 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
MLIRContext *context, ::std::optional<Location> location,
PadOp::Adaptor adaptor,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {

Type inputType = getElementTypeOrSelf(adaptor.getInput1());
ShapeAdaptor inputShape(adaptor.getInput1().getType());
ShapeAdaptor paddingShape(adaptor.getPadding().getType());
SmallVector<int64_t> outputShape;
Expand All @@ -822,17 +820,15 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
}

outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
inferredReturnShapes.push_back(
ShapedTypeComponents(outputShape, inputType));
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}

DenseIntElementsAttr paddings;
// If the paddings value is not a constant, all dimensions must be dynamic.
if (!matchPattern(adaptor.getPadding(), m_Constant(&paddings))) {
outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
inferredReturnShapes.push_back(
ShapedTypeComponents(outputShape, inputType));
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}

Expand All @@ -852,39 +848,21 @@ LogicalResult tosa::PadOp::inferReturnTypeComponents(
paddingValues[i * 2 + 1]);
}

inferredReturnShapes.push_back(ShapedTypeComponents(outputShape, inputType));
inferredReturnShapes.push_back(ShapedTypeComponents(outputShape));
return success();
}

LogicalResult PadOp::verify() {
ShapedType inputType = llvm::cast<ShapedType>(getInput1().getType());
if (inputType.hasRank() && inputType.getRank() == 0) {
return emitOpError() << "input tensor rank must not be 0";
}
LogicalResult tosa::PadOp::verify() {
RankedTensorType inputType = getInput1().getType();
RankedTensorType outputType = getOutput().getType();
TensorType paddingType = getPadding().getType();

ShapedType paddingType = llvm::cast<ShapedType>(getPadding().getType());
if (paddingType.hasRank()) {
if (paddingType.getRank() != 2) {
return emitOpError() << "paddings must be a tensor of rank 2";
}
if (inputType.hasRank() && !paddingType.isDynamicDim(0) &&
inputType.getRank() != paddingType.getDimSize(0)) {
return emitOpError() << "paddings must be a tensor of shape ["
<< inputType.getRank() << ", 2]";
}
if (!paddingType.isDynamicDim(1) && paddingType.getDimSize(1) != 2) {
return emitOpError() << "paddings must be a tensor of shape ["
<< inputType.getRank() << ", 2]";
}
if (inputType.getRank() != outputType.getRank())
return emitOpError() << "expect same input and output tensor rank.";

if (paddingType.hasRank() && paddingType.getRank() != 2)
return emitOpError() << "expect 'padding' tensor rank equal to 2.";

DenseIntElementsAttr paddings;
if (matchPattern(getPadding(), m_Constant(&paddings))) {
if (llvm::any_of(paddings,
[](auto val) { return val.getSExtValue() < 0; })) {
return emitOpError() << "number of pad elements must be positive";
}
}
}
return success();
}

Expand Down Expand Up @@ -1441,7 +1419,6 @@ REDUCE_SHAPE_INFER(tosa::ReduceProdOp)
REDUCE_SHAPE_INFER(tosa::ReduceSumOp)
#undef REDUCE_SHAPE_INFER
COMPATIBLE_RETURN_TYPES(tosa::ConcatOp)
COMPATIBLE_RETURN_TYPES(tosa::PadOp)
#undef COMPATIBLE_RETURN_TYPES

template <typename T>
Expand Down
79 changes: 16 additions & 63 deletions mlir/test/Dialect/Tosa/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -105,83 +105,36 @@ func.func @test_pad_non_const(%arg0: tensor<13x21x3xf32>, %arg1: tensor<3x2xi32>

// -----

func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<i8>) -> tensor<?x?x?xi8> {
func.func @test_pad_non_const(%arg0: tensor<13x21x3xi8>, %arg1: tensor<i8>) -> tensor<13x21x3xi8> {
%0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// expected-error@+1 {{'tosa.pad' op pad_const of pad is not constant}}
%1 = tosa.pad %arg0, %0, %arg1 : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor<i8>) -> tensor<?x?x?xi8>
return %1 : tensor<?x?x?xi8>
%1 = tosa.pad %arg0, %0, %arg1 : (tensor<13x21x3xi8>, tensor<3x2xi32>, tensor<i8>) -> tensor<13x21x3xi8>
return %1 : tensor<13x21x3xi8>
}

// -----

func.func @test_pad_output_shape_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = "tosa.const"() {value = dense<[[1, 1], [1, 1], [1, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// expected-error@+2 {{'tosa.pad' op failed to infer returned types}}
// expected-error@+1 {{'tosa.pad' op inferred type(s) 'tensor<15x23x5xf32>' are incompatible with return type(s) of operation 'tensor<13x21x3xf32>}}
%1 = tosa.pad %arg0, %0 : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<13x21x3xf32>
return %1 : tensor<13x21x3xf32>
}

// -----

func.func @test_pad_type_mismatch(%arg0: tensor<13x21x3xf32>) -> tensor<15x23x5xi32> {
%0 = "tosa.const"() {value = dense<[[1, 1], [1, 1], [1, 1]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
// expected-error@+2 {{'tosa.pad' op failed to infer returned types}}
// expected-error@+1 {{'tosa.pad' op inferred type(s) 'tensor<15x23x5xf32>' are incompatible with return type(s) of operation 'tensor<15x23x5xi32>}}
%1 = tosa.pad %arg0, %0 : (tensor<13x21x3xf32>, tensor<3x2xi32>) -> tensor<15x23x5xi32>
return %1 : tensor<15x23x5xi32>
}

// -----

func.func @test_pad_incorret_padding_rank(%arg0: tensor<13x21xf32>) -> tensor<13x21xf32> {
%0 = "tosa.const"() {value = dense<[0, 1]> : tensor<2xi32>} : () -> tensor<2xi32>
// expected-error@+1 {{'tosa.pad' op paddings must be a tensor of rank 2}}
%1 = tosa.pad %arg0, %0 : (tensor<13x21xf32>, tensor<2xi32>) -> tensor<13x21xf32>
return %1 : tensor<13x21xf32>
}

// -----

func.func @test_pad_incorret_padding_shape(%arg0: tensor<13x21xf32>) -> tensor<13x21xf32> {
%0 = "tosa.const"() {value = dense<[[0, 0], [0, 1], [0, 1], [1, 1]]> : tensor<4x2xi32>} : () -> tensor<4x2xi32>
// expected-error@+1 {{'tosa.pad' op paddings must be a tensor of shape [2, 2]}}
%1 = tosa.pad %arg0, %0 : (tensor<13x21xf32>, tensor<4x2xi32>) -> tensor<13x21xf32>
return %1 : tensor<13x21xf32>
}

// -----

func.func @test_pad_incorret_padding_shape(%arg0: tensor<13x21xf32>) -> tensor<13x21xf32> {
%0 = "tosa.const"() {value = dense<[[0, 0, 0, 1], [0, 1, 1, 1]]> : tensor<2x4xi32>} : () -> tensor<2x4xi32>
// expected-error@+1 {{'tosa.pad' op paddings must be a tensor of shape [2, 2]}}
%1 = tosa.pad %arg0, %0 : (tensor<13x21xf32>, tensor<2x4xi32>) -> tensor<13x21xf32>
return %1 : tensor<13x21xf32>
}

// -----

func.func @test_pad_negative_padding(%arg0: tensor<13x21xf32>) -> tensor<?x?xf32> {
%0 = "tosa.const"() {value = dense<[[0, 0], [0, -1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
// expected-error@+1 {{'tosa.pad' op number of pad elements must be positive}}
%1 = tosa.pad %arg0, %0 : (tensor<13x21xf32>, tensor<2x2xi32>) -> tensor<?x?xf32>
return %1 : tensor<?x?xf32>
func.func @test_pad_io_rank_mismatch(%arg0: tensor<13x21xf32>, %arg1: tensor<2x2xi32>) {
// expected-error@+1 {{'tosa.pad' op expect same input and output tensor rank.}}
%1 = tosa.pad %arg0, %arg1 : (tensor<13x21xf32>, tensor<2x2xi32>) -> tensor<13x21x3xf32>
return
}

// -----

func.func @test_pad_incorrect_input(%arg0: f32, %arg1: i32) -> f32 {
// expected-error@+1 {{'tosa.pad' op operand #0 must be ranked tensor of number values, but got 'f32'}}
%1 = tosa.pad %arg0, %arg1 : (f32, i32) -> f32
return %1 : f32
func.func @test_pad_invalid_padding_rank(%arg0: tensor<13x21xf32>, %arg1: tensor<2xi32>) {
// expected-error@+1 {{'tosa.pad' op expect 'padding' tensor rank equal to 2.}}
%1 = tosa.pad %arg0, %arg1 : (tensor<13x21xf32>, tensor<2xi32>) -> tensor<13x21xf32>
return
}

// -----

func.func @test_pad_zero_rank_input(%arg0: tensor<f32>, %arg1: tensor<i32>) -> tensor<f32> {
// expected-error@+1 {{'tosa.pad' op input tensor rank must not be 0}}
%1 = tosa.pad %arg0, %arg1 : (tensor<f32>, tensor<i32>) -> tensor<f32>
return %1 : tensor<f32>
func.func @test_pad_invalid_padConst_rank(%arg0: tensor<13x21xf32>, %arg1: tensor<2x2xi32>) {
%0 = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
// expected-error@+1 {{'tosa.pad' op operand #2 must be 0D tensor of number values, but got 'tensor<1xf32>'}}
%1 = tosa.pad %arg0, %arg1, %0 : (tensor<13x21xf32>, tensor<2x2xi32>, tensor<1xf32>) -> tensor<13x21xf32>
return
}

// -----
Expand Down