Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

Commit

Permalink
add logical_not unittest (#1534)
Browse files Browse the repository at this point in the history
  • Loading branch information
ccsuzzh authored Jun 26, 2023
1 parent 225afa0 commit 0f31731
Showing 1 changed file with 156 additions and 0 deletions.
156 changes: 156 additions & 0 deletions python/tests/ops/test_logical_not_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
# Copyright (c) 2023 CINN Authors. All Rights Reserved.

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at

# http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
import paddle
import cinn
from cinn.frontend import *
from cinn.common import *


@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestLogicalNotOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.prepare_inputs()

def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["x_shape"],
dtype=self.case["x_dtype"],
low=-10,
high=100)

def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=False)
out = paddle.logical_not(x)
self.paddle_outputs = [out]

def build_cinn_program(self, target):
builder = NetBuilder("logical_not")
x = builder.create_input(
self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
"x")
out = builder.logical_not(x)

prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])

self.cinn_outputs = res

def test_check_results(self):
self.check_outputs_and_grads(all_equal=True)


class TestLogicalNotCase1(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestLogicalNotCase1"
self.cls = TestLogicalNotOp
self.inputs = [{"x_shape": [512, 256]}]
self.dtypes = [{
"x_dtype": "bool"
}, {
"x_dtype": "int8"
}, {
"x_dtype": "int16"
}, {
"x_dtype": "int32"
}, {
"x_dtype": "int64"
}, {
"x_dtype": "float32"
}, {
"x_dtype": "float64"
}]
self.attrs = []


class TestLogicalNotCase2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestLogicalNotCase2"
self.cls = TestLogicalNotOp
self.inputs = [{
"x_shape": [1]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [128, 2048, 32]
}, {
"x_shape": [16, 8, 4, 2]
}, {
"x_shape": [1, 1, 1, 1]
}, {
"x_shape": [16, 8, 4, 2, 1]
}]
self.dtypes = [{"x_dtype": "bool"}]
self.attrs = []


class TestLogicalNotCaseWithBroadcast1(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestLogicalNotCaseWithBroadcast1"
self.cls = TestLogicalNotOp
self.inputs = [{"x_shape": [56]}]
self.dtypes = [{
"x_dtype": "bool"
}, {
"x_dtype": "int8"
}, {
"x_dtype": "int16"
}, {
"x_dtype": "int32"
}, {
"x_dtype": "int64"
}, {
"x_dtype": "float32"
}, {
"x_dtype": "float64"
}]
self.attrs = []


class TestLogicalNotCaseWithBroadcast2(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestLogicalNotCaseWithBroadcast2"
self.cls = TestLogicalNotOp
self.inputs = [{
"x_shape": [56]
}, {
"x_shape": [1024]
}, {
"x_shape": [512, 256]
}, {
"x_shape": [128, 64, 32]
}, {
"x_shape": [16, 1, 1, 2]
}, {
"x_shape": [16, 1, 1, 2, 1]
}]
self.dtypes = [{"x_dtype": "bool"}]
self.attrs = []


if __name__ == "__main__":
TestLogicalNotCase1().run()
TestLogicalNotCase2().run()
TestLogicalNotCaseWithBroadcast1().run()
TestLogicalNotCaseWithBroadcast2().run()

0 comments on commit 0f31731

Please sign in to comment.