Skip to content

Commit

Permalink
Support OpExtension in frontends (openvinotoolkit#22961)
Browse files Browse the repository at this point in the history
### Details:
 - *Support OpExtension in frontends*
 - *Support OPENVINO_FRAMEWORK_MAP for PyTorch FE*

### Tickets:
 - *CVS-132471*

---------

Co-authored-by: Sergey Lyalin <[email protected]>
  • Loading branch information
mvafin and slyalin authored Feb 23, 2024
1 parent 05b0fca commit 5f64019
Show file tree
Hide file tree
Showing 11 changed files with 291 additions and 28 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class that works well if all the following conditions are satisfied:

.. note::

``OpExtension`` class is currently available for ONNX and TensorFlow frontends.
``OpExtension`` class is currently available for ONNX, TensorFlow and PyTorch frontends.
PaddlePaddle frontend has named inputs and outputs for operation (not indexed)
therefore OpExtension mapping is not applicable for this case.

Expand Down
2 changes: 1 addition & 1 deletion docs/snippets/ov_extensions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ core.add_extension("openvino_template_extension.so");
{
//! [frontend_extension_framework_map_macro_add_extension]
ov::Core core;
core.add_extension(ov::frontend::OpExtension<CustomOp>());
core.add_extension(ov::OpExtension<CustomOp>());
//! [frontend_extension_framework_map_macro_add_extension]
}
return 0;
Expand Down
2 changes: 2 additions & 0 deletions src/core/include/openvino/core/op_extension.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ namespace detail {
OV_COLLECT_ATTACHED_EXTENSIONS(onnx)
OV_COLLECT_ATTACHED_EXTENSIONS(paddle)
OV_COLLECT_ATTACHED_EXTENSIONS(tensorflow)
OV_COLLECT_ATTACHED_EXTENSIONS(pytorch)
} // namespace detail

/**
Expand Down Expand Up @@ -98,6 +99,7 @@ class OpExtension : public BaseOpExtension {
detail::collect_attached_extensions_onnx<T>(res);
detail::collect_attached_extensions_paddle<T>(res);
detail::collect_attached_extensions_tensorflow<T>(res);
detail::collect_attached_extensions_pytorch<T>(res);
return res;
}
};
Expand Down
4 changes: 4 additions & 0 deletions src/frontends/onnx/frontend/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,5 +207,9 @@ void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
m_extensions.conversions.push_back(onnx_conv_ext);
} else if (auto progress_reporter = std::dynamic_pointer_cast<ProgressReporterExtension>(extension)) {
m_extensions.progress_reporter = progress_reporter;
} else if (auto op_base_ext = std::dynamic_pointer_cast<ov::BaseOpExtension>(extension)) {
for (const auto& attached_ext : op_base_ext->get_attached_extensions()) {
add_extension(attached_ext);
}
}
}
4 changes: 4 additions & 0 deletions src/frontends/paddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -559,6 +559,10 @@ void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
m_op_translators[paddle_conv_ext->get_op_type()] = [=](const NodeContext& context) {
return paddle_conv_ext->get_converter()(context);
};
} else if (auto op_base_ext = std::dynamic_pointer_cast<ov::BaseOpExtension>(extension)) {
for (const auto& attached_ext : op_base_ext->get_attached_extensions()) {
add_extension(attached_ext);
}
}
}

Expand Down
4 changes: 4 additions & 0 deletions src/frontends/pytorch/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,10 @@ void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
m_extensions.push_back(so_ext);
} else if (const auto& telemetry = std::dynamic_pointer_cast<TelemetryExtension>(extension)) {
m_telemetry = telemetry;
} else if (auto op_base_ext = std::dynamic_pointer_cast<ov::BaseOpExtension>(extension)) {
for (const auto& attached_ext : op_base_ext->get_attached_extensions()) {
add_extension(attached_ext);
}
}
}

Expand Down
4 changes: 4 additions & 0 deletions src/frontends/tensorflow/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -559,5 +559,9 @@ void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
std::dynamic_pointer_cast<ov::frontend::tensorflow::ConversionExtension>(extension)) {
m_conversion_extensions.push_back(tensorflow_conv_ext);
m_op_translators[tensorflow_conv_ext->get_op_type()] = tensorflow_conv_ext->get_converter();
} else if (auto op_base_ext = std::dynamic_pointer_cast<ov::BaseOpExtension>(extension)) {
for (const auto& attached_ext : op_base_ext->get_attached_extensions()) {
add_extension(attached_ext);
}
}
}
4 changes: 4 additions & 0 deletions src/frontends/tensorflow_lite/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -315,5 +315,9 @@ void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
m_op_translators[tensorflow_conv_ext->get_op_type()] = [=](const NodeContext& context) {
return tensorflow_conv_ext->get_converter()(context);
};
} else if (auto op_base_ext = std::dynamic_pointer_cast<ov::BaseOpExtension>(extension)) {
for (const auto& attached_ext : op_base_ext->get_attached_extensions()) {
add_extension(attached_ext);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -147,14 +147,23 @@ class CustomElu : public ov::op::Op {
};

#ifdef ENABLE_OV_PYTORCH_FRONTEND
# include <openvino/frontend/extension/op.hpp>
# include <openvino/frontend/pytorch/extension/conversion.hpp>
# include <openvino/frontend/pytorch/extension/op.hpp>
# define PT_EXT \
std::make_shared<ov::frontend::pytorch::OpExtension<CustomElu>>( \
"aten::elu", \
std::map<std::string, size_t>{{"m_alpha", 1}}, \
std::map<std::string, ov::Any>{{"m_beta", 1.0f}}), \
std::make_shared<ov::frontend::pytorch::ConversionExtension>("Relu", ReluToSwishTranslator),
# include <openvino/op/relu.hpp>
class ReluCustom : public ov::op::v0::Relu {
public:
OPENVINO_OP("ReluCustom");
OPENVINO_FRAMEWORK_MAP(pytorch, "aten::relu");
};
# define PT_EXT \
std::make_shared<ov::frontend::pytorch::OpExtension<CustomElu>>( \
"aten::elu", \
std::map<std::string, size_t>{{"m_alpha", 1}}, \
std::map<std::string, ov::Any>{{"m_beta", 1.0f}}), \
std::make_shared<ov::frontend::pytorch::ConversionExtension>("Relu", ReluToSwishTranslator), \
std::make_shared<ov::OpExtension<ReluCustom>>(),

#else
# define PT_EXT
#endif
Expand Down
159 changes: 146 additions & 13 deletions tests/layer_tests/mo_python_api_tests/test_mo_convert_extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from openvino.runtime import PartialShape, Model


class TestExtensions(CommonMOConvertTest):
class TestONNXExtensions(CommonMOConvertTest):
def create_onnx_model(self, tmp_dir):
#
# Create ONNX model
Expand All @@ -23,8 +23,10 @@ def create_onnx_model(self, tmp_dir):

shape = [2, 3, 4]

input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
input = helper.make_tensor_value_info(
'input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info(
'output', TensorProto.FLOAT, shape)

node_def = onnx.helper.make_node(
'LeakyRelu',
Expand Down Expand Up @@ -57,7 +59,7 @@ def create_custom_extension_leaky_relu_to_relu():
# replaces LeakyRelu with Relu
from openvino.frontend import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset8 as ops
import openvino.runtime.opset14 as ops

def custom_converter(node: NodeContext):
input = node.get_input(0)
Expand All @@ -66,11 +68,17 @@ def custom_converter(node: NodeContext):

return ConversionExtension("LeakyRelu", custom_converter)

def create_custom_op_extension_leaky_relu_to_relu():
# replaces LeakyRelu with Relu
from openvino.frontend import OpExtension

return OpExtension("Relu", "LeakyRelu")

def create_custom_extension_elu_to_sigmoid():
# replaces Elu with Sigmoid
from openvino.frontend import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset8 as ops
import openvino.runtime.opset14 as ops

def custom_converter(node: NodeContext):
input = node.get_input(0)
Expand All @@ -81,29 +89,31 @@ def custom_converter(node: NodeContext):

def create_ref_graph1():
shape = PartialShape([2, 3, 4])
param = ov.opset8.parameter(shape, dtype=np.float32)
param = ov.opset14.parameter(shape, dtype=np.float32)
param.get_output_tensor(0).set_names({"input"})
relu = ov.opset8.relu(param)
relu = ov.opset14.relu(param)
relu.get_output_tensor(0).set_names({"LeakyRelu_data"})
elu = ov.opset8.elu(relu, alpha=0.1)
elu = ov.opset14.elu(relu, alpha=0.1)
elu.get_output_tensor(0).set_names({"output"})

return Model([elu], [param], "test")

def create_ref_graph2():
shape = PartialShape([2, 3, 4])
param = ov.opset8.parameter(shape, dtype=np.float32)
param = ov.opset14.parameter(shape, dtype=np.float32)
param.get_output_tensor(0).set_names({"input"})
relu = ov.opset8.relu(param)
relu = ov.opset14.relu(param)
relu.get_output_tensor(0).set_names({"LeakyRelu_data"})
sigmoid = ov.opset8.sigmoid(relu)
sigmoid = ov.opset14.sigmoid(relu)
sigmoid.get_output_tensor(0).set_names({"output"})

return Model([sigmoid], [param], "test")

test_data = [
{'params_test': {'extensions': create_custom_extension_leaky_relu_to_relu()},
'ref_graph': create_ref_graph1()},
{'params_test': {'extensions': create_custom_op_extension_leaky_relu_to_relu()},
'ref_graph': create_ref_graph1()},
{'params_test': {'extensions': [create_custom_extension_leaky_relu_to_relu(),
create_custom_extension_elu_to_sigmoid()]},
'ref_graph': create_ref_graph2()}
Expand All @@ -112,11 +122,134 @@ def create_ref_graph2():
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
@pytest.mark.precommit
def test_mo_convert_extensions(self, params, ie_device, precision, ir_version,
temp_dir, use_legacy_frontend):
def test_onnx_mo_convert_extensions(self, params, ie_device, precision, ir_version,
temp_dir, use_legacy_frontend):
onnx_net_path = self.create_onnx_model(temp_dir)

test_params = params['params_test']
test_params.update({'input_model': onnx_net_path})
test_params.update({'use_convert_model_from_mo': True})
self._test_by_ref_graph(temp_dir, test_params, params['ref_graph'])


class TestPyTorchExtensions(CommonMOConvertTest):
def create_model(self, tmp_dir):
import torch

class CosModel(torch.nn.Module):
def __init__(self):
super(CosModel, self).__init__()

def forward(self, x):
return torch.cos(x.to(torch.float32))

return CosModel()

def create_custom_extension_cos_to_sin():
from openvino.frontend import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset14 as ops

def custom_converter(node: NodeContext):
input = node.get_input(0)
sin = ops.sin(input)
return sin.outputs()

return ConversionExtension("aten::cos", custom_converter)

def create_custom_op_extension_cos_to_sin():
from openvino.frontend import OpExtension

return OpExtension("Sin", "aten::cos")

def create_ref_graph():
shape = PartialShape.dynamic()
param = ov.opset14.parameter(shape, dtype=ov.Type.dynamic)
param.get_output_tensor(0).set_names({"x"})
convert = ov.opset14.convert(param, ov.Type.f32)
convert.get_output_tensor(0).set_names({"5"})
sin = ov.opset14.sin(convert)

return Model([sin], [param], "test")

test_data = [
{'params_test': {'extension': create_custom_extension_cos_to_sin()},
'ref_graph': create_ref_graph()},
{'params_test': {'extension': create_custom_op_extension_cos_to_sin()},
'ref_graph': create_ref_graph()},
]

@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
@pytest.mark.precommit
def test_pt_mo_convert_extensions(self, params, ie_device, precision, ir_version,
temp_dir, use_legacy_frontend):
model = self.create_model(temp_dir)

test_params = params['params_test']
test_params.update({'input_model': model})
self._test_by_ref_graph(temp_dir, test_params, params['ref_graph'])


class TestTfExtensions(CommonMOConvertTest):
def create_keras_model(self, temp_dir):
import tensorflow as tf

tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()

input_name = "Input1"
input_shape = [1, 2, 3]

x = tf.keras.Input(shape=input_shape, name=input_name)
y = tf.cos(x)
keras_net = tf.keras.Model(inputs=[x], outputs=[y])
tf.keras.backend.clear_session()

return keras_net

def create_custom_extension_cos_to_sin():
from openvino.frontend import ConversionExtension
from openvino.frontend import NodeContext
import openvino.runtime.opset14 as ops

def custom_converter(node: NodeContext):
input = node.get_input(0)
sin = ops.sin(input)
return sin.outputs()

return ConversionExtension("Cos", custom_converter)

def create_custom_op_extension_cos_to_sin():
from openvino.frontend import OpExtension

return OpExtension("Sin", "Cos")

def create_ref_graph():
shape = PartialShape([-1, 1, 2, 3])
param = ov.opset14.parameter(shape, dtype=np.float32)
param.get_output_tensor(0).set_names({"Input1"})
y = ov.opset14.sin(param)
y.get_output_tensor(0).set_names({"tf.math.cos/Cos:0"})

parameter_list = [param]

return Model([y], parameter_list, "test")

test_data = [
{'params_test': {'extension': create_custom_extension_cos_to_sin()},
'ref_graph': create_ref_graph()},
{'params_test': {'extension': create_custom_op_extension_cos_to_sin()},
'ref_graph': create_ref_graph()},
]

@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
@pytest.mark.precommit
def test_tf_mo_convert_extensions(self, params, ie_device, precision, ir_version,
temp_dir, use_legacy_frontend):
model = self.create_keras_model(temp_dir)

test_params = params['params_test']
test_params.update({'input_model': model})
self._test_by_ref_graph(temp_dir, test_params, params['ref_graph'])
Loading

0 comments on commit 5f64019

Please sign in to comment.