Skip to content

Commit

Permalink
Refactor GPU single layer tests (openvinotoolkit#21527)
Browse files Browse the repository at this point in the history
* Refactor GPU single layer tests
  • Loading branch information
olpipi authored Dec 12, 2023
1 parent 5a4f632 commit 358cd4b
Show file tree
Hide file tree
Showing 40 changed files with 2,041 additions and 2,447 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ std::vector<std::string> disabledTestPatterns() {
// unsupported metrics
R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)",
// Issue: 111437
R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.CompareWithRefs.*)",
R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.CompareWithRefs.*)",
R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)",
R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)",
// Issue: 111440
R"(.*smoke_set1/GatherElementsGPUTest.CompareWithRefs.*)",
R"(.*smoke_set1/GatherElementsGPUTest.Inference.*)",
// New plugin work with tensors, so it means that blob in old API can have different pointers
R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)",
R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,17 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "shared_test_classes/single_layer/batch_to_space.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ov_models/builders.hpp"
#include "common_test_utils/test_constants.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/test_enums.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"

using namespace InferenceEngine;
using namespace ov::test;
#include "openvino/op/parameter.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/batch_to_space.hpp"

namespace GPULayerTestsDefinitions {
namespace {
using ov::test::InputShape;

struct BatchToSpaceParams {
std::vector<int64_t> block;
Expand All @@ -22,30 +23,29 @@ struct BatchToSpaceParams {
typedef std::tuple<
InputShape, // Input shapes
BatchToSpaceParams,
ElementType, // Element type
ngraph::helpers::InputLayerType, // block/begin/end input type
ov::element::Type, // Element type
ov::test::utils::InputLayerType, // block/begin/end input type
std::map<std::string, std::string> // Additional network configuration
> BatchToSpaceParamsLayerParamSet;

class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpaceParamsLayerParamSet>,
virtual public SubgraphBaseTest {
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<BatchToSpaceParamsLayerParamSet>& obj) {
InputShape shapes;
BatchToSpaceParams params;
ElementType elementType;
ngraph::helpers::InputLayerType restInputType;
TargetDevice targetDevice;
ov::element::Type model_type;
ov::test::utils::InputLayerType restInputType;
std::map<std::string, std::string> additionalConfig;
std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param;
std::tie(shapes, params, model_type, restInputType, additionalConfig) = obj.param;

std::ostringstream results;
results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_";
results << "TS=";
for (const auto& item : shapes.second) {
results << ov::test::utils::vec2str(item) << "_";
}
results << "netPRC=" << elementType << "_";
results << "netPRC=" << model_type << "_";
results << "block=" << ov::test::utils::vec2str(params.block) << "_";
results << "begin=" << ov::test::utils::vec2str(params.begin) << "_";
results << "end=" << ov::test::utils::vec2str(params.end) << "_";
Expand All @@ -59,7 +59,7 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpace
return results.str();
}

void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
inputs.clear();
const auto& funcInputs = function->inputs();
for (size_t i = 0; i < funcInputs.size(); ++i) {
Expand Down Expand Up @@ -100,7 +100,7 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpace
void SetUp() override {
InputShape shapes;
BatchToSpaceParams ssParams;
ngraph::helpers::InputLayerType restInputType;
ov::test::utils::InputLayerType restInputType;
std::map<std::string, std::string> additionalConfig;
std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam();

Expand All @@ -112,7 +112,7 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpace

std::vector<InputShape> inputShapes;
inputShapes.push_back(shapes);
if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) {
if (restInputType == ov::test::utils::InputLayerType::PARAMETER) {
inputShapes.push_back(InputShape({static_cast<int64_t>(block.size())}, std::vector<ov::Shape>(shapes.second.size(), {block.size()})));
inputShapes.push_back(InputShape({static_cast<int64_t>(begin.size())}, std::vector<ov::Shape>(shapes.second.size(), {begin.size()})));
inputShapes.push_back(InputShape({static_cast<int64_t>(end.size())}, std::vector<ov::Shape>(shapes.second.size(), {end.size()})));
Expand All @@ -122,10 +122,10 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpace

ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(inType, inputDynamicShapes.front())};
std::shared_ptr<ov::Node> blockInput, beginInput, endInput;
if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) {
auto blockNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{block.size()});
auto beginNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{begin.size()});
auto endNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::i64, ov::Shape{end.size()});
if (restInputType == ov::test::utils::InputLayerType::PARAMETER) {
auto blockNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{block.size()});
auto beginNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{begin.size()});
auto endNode = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::Shape{end.size()});

params.push_back(blockNode);
params.push_back(beginNode);
Expand All @@ -135,38 +135,34 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface<BatchToSpace
beginInput = beginNode;
endInput = endNode;
} else {
blockInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{block.size()}, block);
beginInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin);
endInput = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end);
blockInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{block.size()}, block);
beginInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begin.size()}, begin);
endInput = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{end.size()}, end);
}
auto ss = std::make_shared<ngraph::op::v1::BatchToSpace>(params[0], blockInput, beginInput, endInput);
auto ss = std::make_shared<ov::op::v1::BatchToSpace>(params[0], blockInput, beginInput, endInput);

ngraph::ResultVector results;
ov::ResultVector results;
for (size_t i = 0; i < ss->get_output_size(); i++) {
results.push_back(std::make_shared<ngraph::opset1::Result>(ss->output(i)));
results.push_back(std::make_shared<ov::op::v0::Result>(ss->output(i)));
}

function = std::make_shared<ngraph::Function>(results, params, "BatchToSpaceFuncTest");
function = std::make_shared<ov::Model>(results, params, "BatchToSpaceFuncTest");
}
};

TEST_P(BatchToSpaceLayerGPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()

TEST_P(BatchToSpaceLayerGPUTest, Inference) {
run();
}

namespace {

std::map<std::string, std::string> emptyAdditionalConfig;

const std::vector<ElementType> inputPrecisions = {
ElementType::f32
const std::vector<ov::element::Type> inputPrecisions = {
ov::element::f32
};

const std::vector<ngraph::helpers::InputLayerType> restInputTypes = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER
const std::vector<ov::test::utils::InputLayerType> restInputTypes = {
ov::test::utils::InputLayerType::CONSTANT,
ov::test::utils::InputLayerType::PARAMETER
};

const std::vector<InputShape> inputShapesDynamic3D = {
Expand Down Expand Up @@ -224,4 +220,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_5D, BatchToSpaceLay
BatchToSpaceLayerGPUTest::getTestCaseName);

} // namespace
} // namespace GPULayerTestsDefinitions
Original file line number Diff line number Diff line change
Expand Up @@ -2,56 +2,54 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "shared_test_classes/single_layer/broadcast.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ie_precision.hpp"
#include "ov_models/builders.hpp"
#include <common_test_utils/ov_tensor_utils.hpp>
#include <string>

using namespace ngraph;
using namespace InferenceEngine;
using namespace ov::test;
#include "openvino/op/parameter.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/broadcast.hpp"

namespace GPULayerTestsDefinitions {
namespace {
using ov::test::InputShape;

typedef std::tuple<
std::vector<InputShape>, // Shapes
std::vector<int64_t>, // Target shapes
std::vector<int64_t>, // Axes mapping
ov::op::BroadcastType, // Broadcast mode
ov::element::Type_t, // Network precision
ov::element::Type, // Network precision
std::vector<bool>, // Const inputs
std::string // Device name
> BroadcastLayerTestParamsSet;

class BroadcastLayerGPUTest : public testing::WithParamInterface<BroadcastLayerTestParamsSet>,
virtual public SubgraphBaseTest {
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(testing::TestParamInfo<BroadcastLayerTestParamsSet> obj) {
std::vector<ov::test::InputShape> inputShapes;
std::vector<ov::test::InputShape> shapes;
std::vector<int64_t> targetShapes, axesMapping;
ov::op::BroadcastType mode;
ov::element::Type_t netPrecision;
ov::element::Type model_type;
std::vector<bool> isConstInputs;
std::string deviceName;
std::tie(inputShapes, targetShapes, axesMapping, mode, netPrecision, isConstInputs, deviceName) = obj.param;
std::tie(shapes, targetShapes, axesMapping, mode, model_type, isConstInputs, deviceName) = obj.param;

std::ostringstream result;
result << "IS=(";
for (const auto& shape : inputShapes) {
for (const auto& shape : shapes) {
result << ov::test::utils::partialShape2str({shape.first}) << "_";
}
result << ")_TS=(";
for (const auto& shape : inputShapes) {
for (const auto& shape : shapes) {
for (const auto& item : shape.second) {
result << ov::test::utils::vec2str(item) << "_";
}
}
result << "targetShape=" << ov::test::utils::vec2str(targetShapes) << "_";
result << "axesMapping=" << ov::test::utils::vec2str(axesMapping) << "_";
result << "mode=" << mode << "_";
result << "netPrec=" << netPrecision << "_";
result << "netPrec=" << model_type << "_";
result << "constIn=(" << (isConstInputs[0] ? "True" : "False") << "." << (isConstInputs[1] ? "True" : "False") << ")_";
result << "trgDevice=" << deviceName;

Expand All @@ -62,31 +60,31 @@ class BroadcastLayerGPUTest : public testing::WithParamInterface<BroadcastLayerT
std::vector<int64_t> targetShape, axesMapping;

void SetUp() override {
std::vector<InputShape> inputShapes;
std::vector<InputShape> shapes;
ov::op::BroadcastType mode;
ov::element::Type_t netPrecision;
ov::element::Type model_type;
std::vector<bool> isConstInput;
std::tie(inputShapes, targetShape, axesMapping, mode, netPrecision, isConstInput, targetDevice) = this->GetParam();
std::tie(shapes, targetShape, axesMapping, mode, model_type, isConstInput, targetDevice) = this->GetParam();

bool isTargetShapeConst = isConstInput[0];
bool isAxesMapConst = isConstInput[1];

const auto targetShapeRank = targetShape.size();
const auto axesMappingRank = axesMapping.size();

if (inputShapes.front().first.rank() != 0) {
inputDynamicShapes.push_back(inputShapes.front().first);
if (shapes.front().first.rank() != 0) {
inputDynamicShapes.push_back(shapes.front().first);
if (!isTargetShapeConst) {
inputDynamicShapes.push_back({ static_cast<int64_t>(targetShape.size()) });
}
if (!isAxesMapConst) {
inputDynamicShapes.push_back({ static_cast<int64_t>(axesMapping.size()) });
}
}
const size_t targetStaticShapeSize = inputShapes.front().second.size();
const size_t targetStaticShapeSize = shapes.front().second.size();
targetStaticShapes.resize(targetStaticShapeSize);
for (size_t i = 0lu; i < targetStaticShapeSize; ++i) {
targetStaticShapes[i].push_back(inputShapes.front().second[i]);
targetStaticShapes[i].push_back(shapes.front().second[i]);
if (!isTargetShapeConst)
targetStaticShapes[i].push_back({ targetShape.size() });
if (!isAxesMapConst)
Expand All @@ -95,9 +93,9 @@ class BroadcastLayerGPUTest : public testing::WithParamInterface<BroadcastLayerT

ov::ParameterVector functionParams;
if (inputDynamicShapes.empty()) {
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, targetStaticShapes.front().front()));
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, targetStaticShapes.front().front()));
} else {
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes.front()));
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front()));
if (!isTargetShapeConst) {
functionParams.push_back(std::make_shared<ov::op::v0::Parameter>(ov::element::i64, inputDynamicShapes[1]));
functionParams.back()->set_friendly_name("targetShape");
Expand Down Expand Up @@ -140,19 +138,19 @@ class BroadcastLayerGPUTest : public testing::WithParamInterface<BroadcastLayerT
}
}

auto makeFunction = [](ParameterVector &params, const std::shared_ptr<Node> &lastNode) {
ResultVector results;
auto makeFunction = [](ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) {
ov::ResultVector results;

for (size_t i = 0; i < lastNode->get_output_size(); i++)
results.push_back(std::make_shared<opset1::Result>(lastNode->output(i)));
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));

return std::make_shared<Function>(results, params, "BroadcastLayerGPUTest");
return std::make_shared<ov::Model>(results, params, "BroadcastLayerGPUTest");
};

function = makeFunction(functionParams, broadcastOp);
}

void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
inputs.clear();
const auto& funcInputs = function->inputs();
for (size_t i = 0lu; i < funcInputs.size(); i++) {
Expand Down Expand Up @@ -183,19 +181,15 @@ class BroadcastLayerGPUTest : public testing::WithParamInterface<BroadcastLayerT
}
};

TEST_P(BroadcastLayerGPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()

TEST_P(BroadcastLayerGPUTest, Inference) {
run();
}

namespace {

const std::vector<ov::element::Type_t> inputPrecisionsFloat = {
const std::vector<ov::element::Type> inputPrecisionsFloat = {
ov::element::f32,
};

const std::vector<ov::element::Type_t> inputPrecisionsInt = {
const std::vector<ov::element::Type> inputPrecisionsInt = {
ov::element::i32,
};

Expand Down Expand Up @@ -407,5 +401,3 @@ INSTANTIATE_TEST_CASE_P(smoke_broadcast_6d_numpy_compareWithRefs_dynamic,
BroadcastLayerGPUTest::getTestCaseName);

} // namespace

} // namespace GPULayerTestsDefinitions
Loading

0 comments on commit 358cd4b

Please sign in to comment.