From ebca03d28e90c4992ad063f28517eba19ae81e3d Mon Sep 17 00:00:00 2001 From: "Kim, Eddy" Date: Mon, 4 Nov 2024 21:39:39 +0900 Subject: [PATCH] added unit tests --- .../activations_scaling.hpp | 29 ++-- .../activations_scaling.cpp | 101 +++++++---- .../activations_scaling_test.cpp | 162 +++++++++++++++++- .../src/plugin/transformations_pipeline.cpp | 2 +- 4 files changed, 242 insertions(+), 52 deletions(-) diff --git a/src/common/transformations/include/transformations/common_optimizations/activations_scaling.hpp b/src/common/transformations/include/transformations/common_optimizations/activations_scaling.hpp index 050d0e4d58142f..f71a98725c1066 100644 --- a/src/common/transformations/include/transformations/common_optimizations/activations_scaling.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/activations_scaling.hpp @@ -17,9 +17,9 @@ class TRANSFORMATIONS_API ActivationsScaling; namespace activations_scaling { class TRANSFORMATIONS_API ScaleDownSingleLayer; -class TRANSFORMATIONS_API MulGroupNormFusion; -class TRANSFORMATIONS_API MulMulAddFusion; -class TRANSFORMATIONS_API CropTransformation; +class TRANSFORMATIONS_API MulGroupNormTransformation; +class TRANSFORMATIONS_API MulMulAddTransformation; +class TRANSFORMATIONS_API SplitTransformation; class TRANSFORMATIONS_API ReshapeTransformation; class TRANSFORMATIONS_API MulMulMulTransformation; class TRANSFORMATIONS_API MulMVNTransformation; @@ -29,7 +29,10 @@ class TRANSFORMATIONS_API ConcatTransformation; } // namespace pass } // namespace ov -// ActivationsScaling scales down activations to prevent overflow due to the limited range of FP16 +// ActivationsScaling makes activation values smaller to prevent overflow due to the limited range of FP16 +// This feature is controlled by ov::hint::activations_scale_factor. +// For example, when this property is set as 16, activations are divided by 16. +// If ov::hint::activations_scale_factor is less than zero, it is disabled. class ov::pass::ActivationsScaling : public ov::pass::ModelPass { public: OPENVINO_RTTI("ActivationsScaling", "0"); @@ -46,22 +49,22 @@ class ov::pass::activations_scaling::ScaleDownSingleLayer : public ov::pass::Mat ScaleDownSingleLayer(float scale_factor); }; -class ov::pass::activations_scaling::MulGroupNormFusion : public ov::pass::MatcherPass { +class ov::pass::activations_scaling::MulGroupNormTransformation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MulGroupNormFusion", "0"); - MulGroupNormFusion(); + OPENVINO_RTTI("MulGroupNormTransformation", "0"); + MulGroupNormTransformation(); }; -class ov::pass::activations_scaling::MulMulAddFusion : public ov::pass::MatcherPass { +class ov::pass::activations_scaling::MulMulAddTransformation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MulMulAddFusion", "0"); - MulMulAddFusion(); + OPENVINO_RTTI("MulMulAddTransformation", "0"); + MulMulAddTransformation(); }; -class ov::pass::activations_scaling::CropTransformation : public ov::pass::MatcherPass { +class ov::pass::activations_scaling::SplitTransformation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CropTransformation", "0"); - CropTransformation(); + OPENVINO_RTTI("SplitTransformation", "0"); + SplitTransformation(); }; class ov::pass::activations_scaling::ReshapeTransformation : public ov::pass::MatcherPass { diff --git a/src/common/transformations/src/transformations/common_optimizations/activations_scaling.cpp b/src/common/transformations/src/transformations/common_optimizations/activations_scaling.cpp index c89dbddbe5c163..f3ae3d58f40001 100644 --- a/src/common/transformations/src/transformations/common_optimizations/activations_scaling.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/activations_scaling.cpp @@ -53,7 +53,9 @@ using namespace ov::pass::pattern; using ov::pass::pattern::op::Or; // Add scale_down and scale_up layers around Convolution and MatMul nodes -// Conv/MatMul ==> Multiply(scale_down) --> Conv/MatMul --> Multiply(scale_up) +// Conv/MatMul +// ==> +// Multiply(scale_down by scale_factor) --> Conv/MatMul --> Multiply(scale_up by scale_factor) ov::pass::activations_scaling::ScaleDownSingleLayer::ScaleDownSingleLayer(float scale_factor) { MATCHER_SCOPE(ScaleDownSingleLayer); @@ -137,7 +139,7 @@ ov::pass::activations_scaling::ScaleDownSingleLayer::ScaleDownSingleLayer(float this->register_matcher(m, callback); } -// MulMulAddFusion makes the target pattern to be easy to be merged with other nodes. +// MulMulAddTransformation makes the target pattern to be easy to be merged with followig nodes. // // input_a const_a input_b const_b input_a (const_a/const_b) // \ / \ / \ / @@ -148,8 +150,8 @@ ov::pass::activations_scaling::ScaleDownSingleLayer::ScaleDownSingleLayer(float // Add Multiply_b_mma // // (input_a * const_a) + (input_b * const_b) ==> ((input_a * (const_a / const_b)) + input_b) * const_b -ov::pass::activations_scaling::MulMulAddFusion::MulMulAddFusion() { - MATCHER_SCOPE(MulMulAddFusion); +ov::pass::activations_scaling::MulMulAddTransformation::MulMulAddTransformation() { + MATCHER_SCOPE(MulMulAddTransformation); auto activation0_m = any_input(is_non_const_node); auto scale_const0_m = ov::pass::pattern::wrap_type(is_scalar_node); @@ -204,7 +206,7 @@ ov::pass::activations_scaling::MulMulAddFusion::MulMulAddFusion() { return true; }; - auto m = std::make_shared(add_m, "MulMulAddFusion"); + auto m = std::make_shared(add_m, "MulMulAddTransformation"); this->register_matcher(m, callback); } @@ -214,9 +216,11 @@ ov::pass::activations_scaling::MulMulAddFusion::MulMulAddFusion() { // // So, we can skip Multiply that is connected to GroupNormalization. // -// input --> Multiply --> GroupNormalization ==> input --> GroupNormalization -ov::pass::activations_scaling::MulGroupNormFusion::MulGroupNormFusion() { - MATCHER_SCOPE(MulGroupNormFusion); +// input --> Multiply --> GroupNormalization +// ==> +// input --> GroupNormalization +ov::pass::activations_scaling::MulGroupNormTransformation::MulGroupNormTransformation() { + MATCHER_SCOPE(MulGroupNormTransformation); auto activation_m = any_input(is_non_const_node); auto scale_const_m = ov::pass::pattern::wrap_type(is_scalar_node); @@ -239,13 +243,14 @@ ov::pass::activations_scaling::MulGroupNormFusion::MulGroupNormFusion() { } if (mul && norm) { - norm->input(0).replace_source_output(mul->get_input_source_output(0)); + size_t activation_index = ov::is_type(mul->get_input_source_output(1).get_node()) ? 0 : 1; + norm->input(0).replace_source_output(mul->get_input_source_output(activation_index)); return true; } return false; }; - auto m = std::make_shared(norm_m, "MulGroupNormFusion"); + auto m = std::make_shared(norm_m, "MulGroupNormTransformation"); this->register_matcher(m, callback); } @@ -255,7 +260,9 @@ ov::pass::activations_scaling::MulGroupNormFusion::MulGroupNormFusion() { // // So, we can skip Multiply that is connected to MVN. // -// input --> Multiply --> MVN ==> input --> MVN +// input --> Multiply --> MVN +// ==> +// input --> MVN ov::pass::activations_scaling::MulMVNTransformation::MulMVNTransformation() { MATCHER_SCOPE(MulMVNTransformation); @@ -279,7 +286,8 @@ ov::pass::activations_scaling::MulMVNTransformation::MulMVNTransformation() { } if (mul && norm) { - norm->input(0).replace_source_output(mul->get_input_source_output(0)); + size_t activation_index = ov::is_type(mul->get_input_source_output(1).get_node()) ? 0 : 1; + norm->input(0).replace_source_output(mul->get_input_source_output(activation_index)); return true; } return false; @@ -289,8 +297,16 @@ ov::pass::activations_scaling::MulMVNTransformation::MulMVNTransformation() { this->register_matcher(m, callback); } -ov::pass::activations_scaling::CropTransformation::CropTransformation() { - MATCHER_SCOPE(CropTransformation); +// input const input +// \ / | +// Multiply ==> VariadicSplit +// | const / | const \ const +// VariadicSplit | / | / \ / +// / | \ Multiply_a Multiply_b Multiply_c +// output_a output_b output_c | | | +// output_a output_b output_c +ov::pass::activations_scaling::SplitTransformation::SplitTransformation() { + MATCHER_SCOPE(SplitTransformation); auto activation_m = any_input(is_non_const_node); auto scale_const_m = ov::pass::pattern::wrap_type(is_scalar_node); @@ -321,12 +337,14 @@ ov::pass::activations_scaling::CropTransformation::CropTransformation() { target_inputs[i] = split->get_output_target_inputs(i); } - split->input(0).replace_source_output(mul->input(0).get_source_output()); + size_t activation_index = ov::is_type(mul->get_input_source_output(1).get_node()) ? 0 : 1; + size_t const_index = (activation_index == 1) ? 0 : 1; + split->input(0).replace_source_output(mul->input(activation_index).get_source_output()); for (size_t i = 0; i < num_split_outputs; i++) { auto new_mul = register_new_node( split->output(i), - mul->input(1).get_source_output()); + mul->input(const_index).get_source_output()); new_mul->set_friendly_name(mul->get_friendly_name() + "_" + std::to_string(i)); ov::copy_runtime_info(mul, new_mul); @@ -340,10 +358,15 @@ ov::pass::activations_scaling::CropTransformation::CropTransformation() { return false; }; - auto m = std::make_shared(split_m, "CropTransformation"); + auto m = std::make_shared(split_m, "SplitTransformation"); this->register_matcher(m, callback); } +// input const input +// \ / | +// Multiply ==> Reshape const +// | | / +// Reshape Multiply ov::pass::activations_scaling::ReshapeTransformation::ReshapeTransformation() { MATCHER_SCOPE(ReshapeTransformation); @@ -389,7 +412,7 @@ ov::pass::activations_scaling::ReshapeTransformation::ReshapeTransformation() { this->register_matcher(m, callback); } -// MulMulAddFusion makes the target pattern to be easy to be merged with other nodes. +// MulMulAddTransformation makes the target pattern to be easy to be merged with other nodes. // // input_a const_a input_b const_b input_a input_b // \ / \ / \ / @@ -456,6 +479,23 @@ ov::pass::activations_scaling::MulMulMulTransformation::MulMulMulTransformation( this->register_matcher(m, callback); } +// input_a const_a input_b const_b input_c const_c +// \ / \ / \ / +// Multiply_a Multiply_b Multiply_c +// \ | / +// \ | / +// ---------- Concat ------------ +// ==> +// (const_a (const_b (const_c +// input_a /const_c) input_b /const_c) input_c /const_c) +// \ / \ / \ / +// Multiply_a Multiply_b Multiply_c +// \ | / +// \ | / +// ---------- Concat ------------ +// | const_c +// | / +// Multiply ov::pass::activations_scaling::ConcatTransformation::ConcatTransformation() { MATCHER_SCOPE(ConcatTransformation); @@ -473,30 +513,23 @@ ov::pass::activations_scaling::ConcatTransformation::ConcatTransformation() { } // check if all inputs are Multiply with scalar operand - bool can_be_transformed = true; ov::Output last_dep_const; for (auto &input : concat->inputs()) { auto dep_node = std::dynamic_pointer_cast(input.get_source_output().get_node_shared_ptr()); if (!dep_node) { - can_be_transformed = false; - break; + return false; } auto dep_const0 = std::dynamic_pointer_cast(dep_node->input(0).get_source_output().get_node_shared_ptr()); auto dep_const1 = std::dynamic_pointer_cast(dep_node->input(1).get_source_output().get_node_shared_ptr()); if (!dep_const0 && !dep_const1) { - can_be_transformed = false; - break; + return false; } last_dep_const = dep_const0 ? dep_node->input(0).get_source_output() : dep_node->input(1).get_source_output(); if (!is_scalar_node(last_dep_const)) { - can_be_transformed = false; - break; + return false; } } - if (!can_be_transformed) - return false; - auto target_inputs = concat->get_output_target_inputs(0); for (auto &input : concat->inputs()) { @@ -540,17 +573,17 @@ bool ov::pass::ActivationsScaling::run_on_model(const std::shared_ptr manager.register_pass(m_scale_factor); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.register_pass(); - manager.register_pass(); - manager.register_pass(); - manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.run_passes(f); diff --git a/src/common/transformations/tests/common_optimizations/activations_scaling_test.cpp b/src/common/transformations/tests/common_optimizations/activations_scaling_test.cpp index 9dd34ff27dbce6..8664dbffdfc6ca 100644 --- a/src/common/transformations/tests/common_optimizations/activations_scaling_test.cpp +++ b/src/common/transformations/tests/common_optimizations/activations_scaling_test.cpp @@ -12,11 +12,15 @@ #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "openvino/op/add.hpp" +#include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convolution.hpp" #include "openvino/op/group_normalization.hpp" #include "openvino/op/multiply.hpp" +#include "openvino/op/mvn.hpp" #include "openvino/op/parameter.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/variadic_split.hpp" #include "openvino/pass/manager.hpp" #include "transformations/utils/utils.hpp" @@ -60,7 +64,7 @@ TEST_F(TransformationTestsF, ScaleDownSingleLayerTest) { } } -TEST_F(TransformationTestsF, MulMulAddFusionTest) { +TEST_F(TransformationTestsF, MulMulAddTransformationTest) { { auto input0 = std::make_shared(ov::element::f16, ov::PartialShape{1, 3, 16, 16}); auto scale_const_0 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); @@ -73,7 +77,7 @@ TEST_F(TransformationTestsF, MulMulAddFusionTest) { auto result = std::make_shared(convert); model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input0, input1}); - manager.register_pass(); + manager.register_pass(); } { auto input0 = std::make_shared(ov::element::f16, ov::PartialShape{1, 3, 16, 16}); @@ -90,7 +94,7 @@ TEST_F(TransformationTestsF, MulMulAddFusionTest) { } } -TEST_F(TransformationTestsF, MulGroupNormFusionTest) { +TEST_F(TransformationTestsF, MulGroupNormTransformationTest) { { auto input = std::make_shared(ov::element::f16, ov::PartialShape{1, 3, 16, 16}); auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); @@ -103,7 +107,7 @@ TEST_F(TransformationTestsF, MulGroupNormFusionTest) { auto result = std::make_shared(convert); model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input}); - manager.register_pass(); + manager.register_pass(); } { auto input = std::make_shared(ov::element::f16, ov::PartialShape{1, 3, 16, 16}); @@ -117,3 +121,153 @@ TEST_F(TransformationTestsF, MulGroupNormFusionTest) { model_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input}); } } + +TEST_F(TransformationTestsF, MulMVNTransformationTest) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{1, 3, 224, 224}); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul = std::make_shared(input, scale_const); + auto norm_axes_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {1, 2, 3}); + auto mvn = + std::make_shared(mul, norm_axes_const, true, 0.01f, ov::op::MVNEpsMode::INSIDE_SQRT); + auto convert = std::make_shared(mvn, ov::element::f32); + auto result = std::make_shared(convert); + + model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{1, 3, 224, 224}); + auto norm_axes_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {1, 2, 3}); + auto mvn = + std::make_shared(input, norm_axes_const, true, 0.01f, ov::op::MVNEpsMode::INSIDE_SQRT); + auto convert = std::make_shared(mvn, ov::element::f32); + auto result = std::make_shared(convert); + + model_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input}); + } +} + +TEST_F(TransformationTestsF, SplitTransformationTest) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul = std::make_shared(input, scale_const); + auto axis = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {0}); + auto split_length = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {1, 2, 3}); + auto split = std::make_shared(mul, axis, split_length); + auto convert0 = std::make_shared(split->output(0), ov::element::f32); + auto result0 = std::make_shared(convert0); + auto convert1 = std::make_shared(split->output(1), ov::element::f32); + auto result1 = std::make_shared(convert1); + auto convert2 = std::make_shared(split->output(2), ov::element::f32); + auto result2 = std::make_shared(convert2); + + model = std::make_shared(ov::ResultVector{result0, result1, result2}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto axis = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {0}); + auto split_length = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {1, 2, 3}); + auto split = std::make_shared(input, axis, split_length); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul0 = std::make_shared(split->output(0), scale_const); + auto convert0 = std::make_shared(mul0, ov::element::f32); + auto result0 = std::make_shared(convert0); + auto mul1 = std::make_shared(split->output(1), scale_const); + auto convert1 = std::make_shared(mul1, ov::element::f32); + auto result1 = std::make_shared(convert1); + auto mul2 = std::make_shared(split->output(2), scale_const); + auto convert2 = std::make_shared(mul2, ov::element::f32); + auto result2 = std::make_shared(convert2); + + model_ref = std::make_shared(ov::ResultVector{result0, result1, result2}, ov::ParameterVector{input}); + } +} + +TEST_F(TransformationTestsF, ReshapeTransformationTest) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul = std::make_shared(input, scale_const); + auto shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 0, 1, -1}); + auto reshape = std::make_shared(mul, shape, true); + auto convert = std::make_shared(reshape, ov::element::f32); + auto result = std::make_shared(convert); + + model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 0, 1, -1}); + auto reshape = std::make_shared(input, shape, true); + auto scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul = std::make_shared(reshape, scale_const); + auto convert = std::make_shared(mul, ov::element::f32); + auto result = std::make_shared(convert); + + model_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input}); + } +} + +TEST_F(TransformationTestsF, MulMulMulTransformationTest) { + { + auto input0 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const0 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul0 = std::make_shared(input0, scale_const0); + auto input1 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const1 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul1 = std::make_shared(input1, scale_const1); + auto mul2 = std::make_shared(mul0, mul1); + auto convert = std::make_shared(mul2, ov::element::f32); + auto result = std::make_shared(convert); + + model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input0, input1}); + manager.register_pass(); + } + { + auto input0 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto input1 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto mul = std::make_shared(input0, input1); + auto new_scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto new_mul = std::make_shared(mul, new_scale_const); + auto convert = std::make_shared(new_mul, ov::element::f32); + auto result = std::make_shared(convert); + + model_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input0, input1}); + } +} + +TEST_F(TransformationTestsF, ConcatTransformationTest) { + { + auto input0 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const0 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul0 = std::make_shared(input0, scale_const0); + auto input1 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const1 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul1 = std::make_shared(input1, scale_const1); + auto concat = std::make_shared(OutputVector{mul0, mul1}, 0); + auto convert = std::make_shared(concat, ov::element::f32); + auto result = std::make_shared(convert); + + model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input0, input1}); + manager.register_pass(); + } + { + auto input0 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const0 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul0 = std::make_shared(input0, scale_const0); + auto input1 = std::make_shared(ov::element::f16, ov::PartialShape{6, 12, 10, 24}); + auto scale_const1 = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto mul1 = std::make_shared(input1, scale_const1); + auto concat = std::make_shared(OutputVector{mul0, mul1}, 0); + auto new_scale_const = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{1}, {10}); + auto new_mul = std::make_shared(concat, new_scale_const); + auto convert = std::make_shared(new_mul, ov::element::f32); + auto result = std::make_shared(convert); + + model_ref = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{input0, input1}); + } +} diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index a815bf600d6995..db70922b48aa0e 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -847,7 +847,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { return static_cast((gamma_shape.back() / vec_size)) > static_cast(device_info.max_work_group_size); }); - manager.register_pass(); + // manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(device_info.supports_immad);