diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml index b488f0a63730bf..e39d9ad860aabc 100644 --- a/.github/workflows/job_python_unit_tests.yml +++ b/.github/workflows/job_python_unit_tests.yml @@ -259,8 +259,6 @@ jobs: - name: TensorFlow 1 Layer Tests - TF FE if: fromJSON(inputs.affected-components).TF_FE.test run: | - # requires 'unit_tests' from 'mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ -m precommit -n logical --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml env: TEST_DEVICE: CPU @@ -269,9 +267,7 @@ jobs: - name: TensorFlow 2 Layer Tests - TF FE if: fromJSON(inputs.affected-components).TF_FE.test && runner.os != 'macOS' # Ticket: 123322 run: | - # requires 'unit_tests' from 'mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ -n logical -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index becd5bf6087f53..a457a43558f8db 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -617,8 +617,6 @@ jobs: if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | - :: requires 'unit_tests' from 'tools/mo' - set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH% python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ -n logical -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml env: TEST_DEVICE: CPU @@ -628,18 +626,19 @@ jobs: if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | - :: requires 'unit_tests' from 'tools/mo' - set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\mo;%PYTHONPATH% - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/ -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_fe.xml + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/ -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_fe.xml env: TEST_DEVICE: CPU + TEST_PRECISION: FP16 - name: TensorFlow 1 Layer Tests - Legacy FE if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --use_legacy_frontend --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 - name: TensorFlow 2 Layer Tests - Legacy FE if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test diff --git a/cmake/packaging/debian.cmake b/cmake/packaging/debian.cmake index 3fb78283d76773..49b489da23799d 100644 --- a/cmake/packaging/debian.cmake +++ b/cmake/packaging/debian.cmake @@ -53,6 +53,8 @@ macro(ov_cpack_settings) (NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*" OR ENABLE_PYTHON_PACKAGING) AND # temporary block nvidia NOT item STREQUAL "nvidia" AND + # don't install node_addon + NOT item MATCHES "node_addon" AND # don't install Intel OpenMP NOT item STREQUAL "omp" AND # the same for pugixml diff --git a/cmake/packaging/rpm.cmake b/cmake/packaging/rpm.cmake index b3863f68aa173f..3f88d22ad7616c 100644 --- a/cmake/packaging/rpm.cmake +++ b/cmake/packaging/rpm.cmake @@ -39,6 +39,8 @@ macro(ov_cpack_settings) (NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*" OR ENABLE_PYTHON_PACKAGING) AND # temporary block nvidia NOT item STREQUAL "nvidia" AND + # don't install node_addon + NOT item MATCHES "node_addon" AND # temporary block npu NOT item STREQUAL "npu" AND # don't install Intel OpenMP diff --git a/docs/sphinx_setup/_static/css/homepage_style.css b/docs/sphinx_setup/_static/css/homepage_style.css index 773833257ab8a4..395bbd27dddc6a 100644 --- a/docs/sphinx_setup/_static/css/homepage_style.css +++ b/docs/sphinx_setup/_static/css/homepage_style.css @@ -6,10 +6,6 @@ h1 { font-size: 60px !important; } -.homepage-begin-container { - padding: 0px; -} - #ov-homepage-banner { border-bottom: 0px; } @@ -21,7 +17,7 @@ h1 { .ov-homepage-slide-title { color: white !important; - font-size: 22px !important; + font-size: 27px !important; font-weight: lighter !important; } @@ -36,31 +32,26 @@ h1 { } #ov-homepage-banner #splide01-slide01 { - background-color: #070862; background-image: linear-gradient(350deg, #004CA9 0%, #381965 50%, #070862 100%); padding: 32px 48px !important; } #ov-homepage-banner #splide01-slide02 { - background-color: #034CAA; background-image: linear-gradient(270deg, #034CAA 00%, #4B9D77 50%, #034CAA 100%); padding: 32px 48px !important; } #ov-homepage-banner #splide01-slide03 { - background-color: #030B5E; background-image: linear-gradient(230deg, #030B5E 0%, #285455 40%, #030B5E 100%); padding: 32px 48px !important; } #ov-homepage-banner #splide01-slide04 { - background-color: #214DA4; background-image: linear-gradient(110deg, #214DA4 0%, #03aadd 100%); padding: 32px 48px !important; } #ov-homepage-banner #splide01-slide05 { - background-color: #034CAA; background-image: linear-gradient(350deg, #034CAA 20%, #034CAA 30%, #4B9D77 100%); padding: 32px 48px !important; } @@ -70,6 +61,15 @@ h1 { margin: 0.4rem 0 1.2rem 0; } } +.splide__pagination>li { + padding-right: 6px; +} +.is-active{ + color: white !important; +} +.homepage-begin-container { + padding: 0px; +} #ov-homepage-banner p:first-of-type { margin-top: 0; @@ -89,12 +89,8 @@ h1 { color: white; } -#splide01-track { - height: calc(180px) !important; -} - .ov-homepage-banner-btn { - font-size: 14px !important; + font-size: 12px !important; font-weight: bold !important; color: #ffffff !important; border: 1px solid white; @@ -172,7 +168,6 @@ h1 { .homepage-begin-tile { border-radius: 0; - /* margin: 20px !important; */ margin-bottom: 5px; position: relative; border-width: 0.2cqb; diff --git a/docs/sphinx_setup/_static/js/custom.js b/docs/sphinx_setup/_static/js/custom.js index e9368ebda7145c..404332dce91173 100644 --- a/docs/sphinx_setup/_static/js/custom.js +++ b/docs/sphinx_setup/_static/js/custom.js @@ -259,14 +259,15 @@ function addFooter() { function initSplide() { var splide = new Splide('.splide', { - type : 'loop', - height : `230px`, + type : 'fade', + autoHeight : true, perPage : 1, autoplay : true, arrows : false, waitForTransition : true, wheel : true, wheelSleep : 250, + interval : 3000, }); splide.mount(); } diff --git a/docs/sphinx_setup/_static/js/graphs_ov_tf.js b/docs/sphinx_setup/_static/js/graphs_ov_tf.js deleted file mode 100644 index bf16e9dacc5fe6..00000000000000 --- a/docs/sphinx_setup/_static/js/graphs_ov_tf.js +++ /dev/null @@ -1,109 +0,0 @@ -$(document).ready(function () { - var chartBlock = $('.chart-block-tf-ov'); - chartBlock.each(function () { - var url = $(this).data('loadcsv'); - Papa.parse(url, { - download: true, - complete: renderData($(this)) - }) - }); - - function getLabels(data) { - return data - .map((item) => item[1]); - } - - function getChartOptions(title, displayLabels) { - return { - responsive: false, - maintainAspectRatio: false, - legend: { display: true, position: 'bottom' }, - title: { - display: true, - text: title - }, - scales: { - xAxes: [{ - ticks: { - beginAtZero: true - } - }], - yAxes: [{ - ticks: { - display: displayLabels, //this will remove only the label - beginAtZero: true - } - }] - }, - plugins: { - datalabels: { - color: "#4A4A4A", - anchor: "end", - align: "end", - clamp: false, - offset: 0, - display: true, - font: { - size: 8, - family: 'Roboto' - } - } - } - } - } - - function getChartData(data) { - function getDataset(data, col, label, color) { - return { - label: label, - data: data.map(function (item) { - return item[col] - }), - backgroundColor: color, - borderColor: 'rgba(170,170,170,0)', - barThickness: 12 - } - } - return { - labels: getLabels(data), - datasets: [getDataset(data, 2, 'openvino', '#00C7FD'), getDataset(data, 3, 'TF', '#8F5DA2')] - }; - } - - function renderData(currentChart) { - return function (result) { - var data = result.data; - // remove col names - data.shift(0); - var chartName = data[1][0]; - var chartSlug = chartName.replace(')', '').replace(' (', '-'); - var graphContainer = $('
'); - var chartContainer = $('
'); - graphContainer.attr('id', 'ov-graph-container-' + chartSlug); - chartContainer.addClass('chart-container'); - chartContainer.addClass('container'); - var chartWrap = $('
'); - chartWrap.addClass('chart-wrap'); - chartWrap.addClass('container'); - chartContainer.append(chartWrap); - var chart = $('
'); - chart.addClass('chart'); - chart.addClass('col-md-12'); - var canvas = $(''); - chart.append(canvas); - var container = $('
'); - container.addClass('row'); - container.append(chart); - var context = canvas.get(0).getContext('2d'); - context.canvas.width = context.canvas.width * 2.5; - var chartTitle = chartName + ', Throughput (FPS) Precision: FP32 (Higher is better)'; - new Chart(context, { - type: 'horizontalBar', - data: getChartData(data), - options: getChartOptions(chartTitle, true) - }); - chartContainer.append(container); - currentChart.append(chartContainer); - } - } -}); diff --git a/docs/sphinx_setup/api/ie_python_api/api.rst b/docs/sphinx_setup/api/ie_python_api/api.rst index 9ef3598355c6ec..29665c35141bc3 100644 --- a/docs/sphinx_setup/api/ie_python_api/api.rst +++ b/docs/sphinx_setup/api/ie_python_api/api.rst @@ -113,6 +113,12 @@ OpenVINO Python API openvino.runtime.opset14 +.. autosummary:: + :toctree: _autosummary + :template: custom-module-template.rst + + openvino.runtime.opset15 + .. autosummary:: :toctree: _autosummary :template: custom-module-template.rst diff --git a/src/bindings/js/node/CMakeLists.txt b/src/bindings/js/node/CMakeLists.txt index 5081a215b3cd80..47686902b1620f 100644 --- a/src/bindings/js/node/CMakeLists.txt +++ b/src/bindings/js/node/CMakeLists.txt @@ -103,6 +103,8 @@ ov_set_install_rpath(${PROJECT_NAME} ov_add_clang_format_target(${PROJECT_NAME}_clang FOR_TARGETS ${PROJECT_NAME}) +ov_cpack_add_component(${OV_CPACK_COMP_NPM} HIDDEN) + install(TARGETS ${PROJECT_NAME} LIBRARY DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${PROJECT_NAME} ${OV_CPACK_COMP_NPM_EXCLUDE_ALL} RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${PROJECT_NAME} ${OV_CPACK_COMP_NPM_EXCLUDE_ALL} diff --git a/src/bindings/js/node/include/core_wrap.hpp b/src/bindings/js/node/include/core_wrap.hpp index f973a5fa751925..169812234f901a 100644 --- a/src/bindings/js/node/include/core_wrap.hpp +++ b/src/bindings/js/node/include/core_wrap.hpp @@ -68,6 +68,7 @@ class CoreWrap : public Napi::ObjectWrap { Napi::Value set_property(const Napi::CallbackInfo& info); Napi::Value get_property(const Napi::CallbackInfo& info); + void add_extension(const Napi::CallbackInfo& info); protected: Napi::Value compile_model_sync(const Napi::CallbackInfo& info, const Napi::Object& model, @@ -93,6 +94,9 @@ class CoreWrap : public Napi::ObjectWrap { /** @brief Returns devices available for inference. */ Napi::Value get_available_devices(const Napi::CallbackInfo& info); + /** @brief Returns versions of the specified device. */ + Napi::Value get_versions(const Napi::CallbackInfo& info); + private: ov::Core _core; }; diff --git a/src/bindings/js/node/lib/addon.ts b/src/bindings/js/node/lib/addon.ts index a5ed1598616a38..1909a15042e8da 100644 --- a/src/bindings/js/node/lib/addon.ts +++ b/src/bindings/js/node/lib/addon.ts @@ -38,6 +38,12 @@ interface Core { readModelSync(modelBuffer: Uint8Array, weightsBuffer?: Uint8Array): Model; importModelSync(modelStream: Buffer, device: string): CompiledModel; getAvailableDevices(): string[]; + getVersions(deviceName: string): { + [deviceName: string]: { + buildNumber: string, + description: string, + }, + }; setProperty(props: { [key: string]: string | number | boolean }): void; setProperty( deviceName: string, @@ -47,7 +53,8 @@ interface Core { getProperty( deviceName: string, propertyName: string, - ): string | number | boolean, + ): string | number | boolean; + addExtension(libraryPath: string): void; } interface CoreConstructor { new(): Core; diff --git a/src/bindings/js/node/src/core_wrap.cpp b/src/bindings/js/node/src/core_wrap.cpp index 17c14d037990f2..cbcf49281e248a 100644 --- a/src/bindings/js/node/src/core_wrap.cpp +++ b/src/bindings/js/node/src/core_wrap.cpp @@ -61,8 +61,10 @@ Napi::Function CoreWrap::get_class(Napi::Env env) { InstanceMethod("getAvailableDevices", &CoreWrap::get_available_devices), InstanceMethod("importModelSync", &CoreWrap::import_model), InstanceMethod("getAvailableDevices", &CoreWrap::get_available_devices), + InstanceMethod("getVersions", &CoreWrap::get_versions), InstanceMethod("setProperty", &CoreWrap::set_property), - InstanceMethod("getProperty", &CoreWrap::get_property)}); + InstanceMethod("getProperty", &CoreWrap::get_property), + InstanceMethod("addExtension", &CoreWrap::add_extension)}); } Napi::Value CoreWrap::read_model_sync(const Napi::CallbackInfo& info) { @@ -273,6 +275,31 @@ Napi::Value CoreWrap::get_available_devices(const Napi::CallbackInfo& info) { return js_devices; } +Napi::Value CoreWrap::get_versions(const Napi::CallbackInfo& info) { + if (info.Length() == 0) { + reportError(info.Env(), "getVersions() method expects 1 argument of string type."); + return info.Env().Undefined(); + } + auto device_arg = info[0]; + if (!device_arg.IsString()) { + reportError(info.Env(), "The argument in getVersions() method must be a string or convertible to a string."); + return info.Env().Undefined(); + } + const auto& devices_map = _core.get_versions(device_arg.ToString()); + Napi::Object versions_object = Napi::Object::New(info.Env()); + + for (const auto& dev : devices_map) { + Napi::Object device_properties = Napi::Object::New(info.Env()); + + device_properties.Set("buildNumber", Napi::String::New(info.Env(), dev.second.buildNumber)); + device_properties.Set("description", Napi::String::New(info.Env(), dev.second.description)); + + versions_object.Set(dev.first, device_properties); + } + + return versions_object; +} + Napi::Value CoreWrap::import_model(const Napi::CallbackInfo& info) { if (info.Length() != 2) { reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length())); @@ -333,3 +360,15 @@ Napi::Value CoreWrap::get_property(const Napi::CallbackInfo& info) { return any_to_js(info, value); } + +void CoreWrap::add_extension(const Napi::CallbackInfo& info) { + try { + if (!info[0].IsString()) + OPENVINO_THROW("addExtension method applies one argument of string type"); + + std::string library_path = info[0].ToString(); + _core.add_extension(library_path); + } catch (std::runtime_error& err) { + reportError(info.Env(), err.what()); + } +} diff --git a/src/bindings/js/node/tests/basic.test.js b/src/bindings/js/node/tests/basic.test.js index 07e0502053edd1..1236bd9c553520 100644 --- a/src/bindings/js/node/tests/basic.test.js +++ b/src/bindings/js/node/tests/basic.test.js @@ -20,6 +20,33 @@ it('Core.getAvailableDevices()', () => { assert.ok(devices.includes('CPU')); }); +describe('Core.getVersions()', () => { + + it('getVersions(validDeviceName: string)', () => { + const deviceVersion = core.getVersions('CPU'); + assert.strictEqual(typeof deviceVersion, 'object'); + assert.strictEqual(typeof deviceVersion.CPU, 'object'); + assert.strictEqual(typeof deviceVersion.CPU.buildNumber, 'string'); + assert.strictEqual(typeof deviceVersion.CPU.description, 'string'); + }); + + it('getVersions() throws if no arguments are passed into the function', () => { + assert.throws( + () => core.getVersions(), + {message: 'getVersions() method expects 1 argument of string type.'} + ); + }); + + it('getVersions() throws if non string coercable arguments are passed into the function', () => { + assert.throws( + () => core.getVersions({ deviceName: 'CPU' }), + {message: 'The argument in getVersions() method must be a string or convertible to a string.'} + ); + }); + +}); + + it('CompiledModel type', () => { assert.ok(compiledModel instanceof ov.CompiledModel); }); diff --git a/src/bindings/js/node/tests/core.test.js b/src/bindings/js/node/tests/core.test.js index ca45ae4dff91d9..e0fda3912575de 100644 --- a/src/bindings/js/node/tests/core.test.js +++ b/src/bindings/js/node/tests/core.test.js @@ -8,7 +8,7 @@ const { describe, it } = require('node:test'); const core = new ov.Core(); -it('Core.setProperty()', () => { +it('Core.setProperty()', () => { const tmpDir = '/tmp'; core.setProperty({ 'CACHE_DIR': tmpDir }); @@ -18,7 +18,7 @@ it('Core.setProperty()', () => { assert.equal(cacheDir, tmpDir); }); -it('Core.setProperty(\'CPU\')', () => { +it('Core.setProperty(\'CPU\')', () => { const tmpDir = '/tmp'; core.setProperty('CPU', { 'CACHE_DIR': tmpDir }); @@ -28,13 +28,13 @@ it('Core.setProperty(\'CPU\')', () => { assert.equal(cacheDir, tmpDir); }); -it('Core.getProperty(\'CPU\', \'SUPPORTED_PROPERTIES\') is Array', () => { +it('Core.getProperty(\'CPU\', \'SUPPORTED_PROPERTIES\') is Array', () => { const supportedPropertiesArray = core.getProperty('CPU', 'SUPPORTED_PROPERTIES'); assert.ok(Array.isArray(supportedPropertiesArray)); }); -it('Core.setProperty(\'CPU\', { \'NUM_STREAMS\': 5 })', () => { +it('Core.setProperty(\'CPU\', { \'NUM_STREAMS\': 5 })', () => { const streams = 5; core.setProperty('CPU', { 'NUM_STREAMS': streams }); @@ -43,7 +43,7 @@ it('Core.setProperty(\'CPU\', { \'NUM_STREAMS\': 5 })', () => { assert.equal(result, streams); }); -it('Core.setProperty(\'CPU\', { \'INFERENCE_NUM_THREADS\': 3 })', () => { +it('Core.setProperty(\'CPU\', { \'INFERENCE_NUM_THREADS\': 3 })', () => { const threads = 3; core.setProperty('CPU', { 'INFERENCE_NUM_THREADS': threads }); @@ -51,3 +51,19 @@ it('Core.setProperty(\'CPU\', { \'INFERENCE_NUM_THREADS\': 3 })', () => { assert.equal(result, threads); }); + +it('Core.addExtension() with empty parameters', () => { + assert.throws( + () => core.addExtension(), + /addExtension method applies one argument of string type/ + ); +}); + +it('Core.addExtension(\'not_exists\') with non-existed library', () => { + const notExistsExt = 'not_exists'; + + assert.throws( + () => core.addExtension(notExistsExt), + /Cannot load library 'not_exists'/ + ); +}); diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py index 0c7ce39c5f3572..be5391d0d8ad8a 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -15,7 +15,7 @@ from openvino.runtime.op import Constant, Result from openvino.runtime.opset1 import convert_like from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import apply_affix_on, binary_op, nameable_op, unary_op +from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op from openvino.runtime.utils.types import ( NumericData, NodeInput, @@ -350,7 +350,6 @@ def result(data: Union[Node, Output, NumericData], name: Optional[str] = None) - @nameable_op -@apply_affix_on("data", "input_low", "input_high", "output_low", "output_high") def fake_quantize( data: NodeInput, input_low: NodeInput, @@ -360,9 +359,6 @@ def fake_quantize( levels: int, auto_broadcast: str = "NUMPY", name: Optional[str] = None, - *, - prefix: Optional[str] = None, - suffix: Optional[str] = None, ) -> Node: r"""Perform an element-wise linear quantization on input data. @@ -375,10 +371,6 @@ def fake_quantize( :param auto_broadcast: The type of broadcasting specifies rules used for auto-broadcasting of input tensors. :param name: Optional name of the new node. - :param prefix: Optional keyword-only string to apply before original names of - all generated input nodes (for example: passed as numpy arrays). - :param suffix: Optional keyword-only string to apply after original names of - all generated input nodes (for example: passed as numpy arrays). :return: New node with quantized value. Input floating point values are quantized into a discrete set of floating point values. @@ -400,6 +392,6 @@ def fake_quantize( """ return _get_node_factory_opset13().create( "FakeQuantize", - as_nodes(data, input_low, input_high, output_low, output_high), + as_nodes(data, input_low, input_high, output_low, output_high, name=name), {"levels": levels, "auto_broadcast": auto_broadcast.upper()}, ) diff --git a/src/bindings/python/src/openvino/runtime/opset15/__init__.py b/src/bindings/python/src/openvino/runtime/opset15/__init__.py new file mode 100644 index 00000000000000..52af752e930342 --- /dev/null +++ b/src/bindings/python/src/openvino/runtime/opset15/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Inlcudes new operators added in Opset15 + +# TODO (ticket 138273): Add previous opset operators at the end of opset15 development diff --git a/src/bindings/python/src/openvino/runtime/opset15/ops.py b/src/bindings/python/src/openvino/runtime/opset15/ops.py new file mode 100644 index 00000000000000..24b5e9962138ac --- /dev/null +++ b/src/bindings/python/src/openvino/runtime/opset15/ops.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for ops added to openvino opset15.""" +from functools import partial + +from openvino.runtime.opset_utils import _get_node_factory + +_get_node_factory_opset15 = partial(_get_node_factory, "opset15") + +# -------------------------------------------- ops ------------------------------------------------ diff --git a/src/bindings/python/src/openvino/runtime/utils/decorators.py b/src/bindings/python/src/openvino/runtime/utils/decorators.py index 991edc519dbe9c..5e446943ffa517 100644 --- a/src/bindings/python/src/openvino/runtime/utils/decorators.py +++ b/src/bindings/python/src/openvino/runtime/utils/decorators.py @@ -4,15 +4,21 @@ from functools import wraps from inspect import getfullargspec -from typing import Any, Callable, List +from typing import Any, Callable, List, Optional from openvino.runtime import Node, Output from openvino.runtime.utils.types import NodeInput, as_node, as_nodes -def _set_node_friendly_name(node: Node, /, **kwargs: Any) -> Node: +def _get_name(**kwargs: Any) -> Node: if "name" in kwargs: - node.friendly_name = kwargs["name"] + return kwargs["name"] + return None + + +def _set_node_friendly_name(node: Node, *, name: Optional[str] = None) -> Node: + if name is not None: + node.friendly_name = name return node @@ -22,47 +28,20 @@ def nameable_op(node_factory_function: Callable) -> Callable: @wraps(node_factory_function) def wrapper(*args: Any, **kwargs: Any) -> Node: node = node_factory_function(*args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) + node = _set_node_friendly_name(node, name=_get_name(**kwargs)) return node return wrapper -def _apply_affix(node: Node, prefix: str = "", suffix: str = "") -> Node: - node.friendly_name = prefix + node.friendly_name + suffix - return node - - -def apply_affix_on(*node_names: Any) -> Callable: - """Add prefix and/or suffix to all openvino names of operators defined as arguments.""" - - def decorator(func: Callable) -> Callable: - @wraps(func) - def wrapper(*args: Any, **kwargs: Any) -> Node: - arg_names = getfullargspec(func).args - arg_mapping = dict(zip(arg_names, args)) - for node_name in node_names: - # Apply only on auto-generated nodes. Create such node and apply affixes. - # Any Node instance supplied by the user is keeping the name as-is. - if node_name in arg_mapping and not isinstance(arg_mapping[node_name], (Node, Output)): - arg_mapping[node_name] = _apply_affix(as_node(arg_mapping[node_name]), - prefix=kwargs.get("prefix", ""), - suffix=kwargs.get("suffix", ""), - ) - results = func(**arg_mapping, **kwargs) - return results - return wrapper - return decorator - - def unary_op(node_factory_function: Callable) -> Callable: """Convert the first input value to a Constant Node if a numeric value is detected.""" @wraps(node_factory_function) def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node: - input_node = as_node(input_value) + input_node = as_node(input_value, name=_get_name(**kwargs)) node = node_factory_function(input_node, *args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) + node = _set_node_friendly_name(node, name=_get_name(**kwargs)) return node return wrapper @@ -73,9 +52,9 @@ def binary_op(node_factory_function: Callable) -> Callable: @wraps(node_factory_function) def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node: - left, right = as_nodes(left, right) + left, right = as_nodes(left, right, name=_get_name(**kwargs)) node = node_factory_function(left, right, *args, **kwargs) - node = _set_node_friendly_name(node, **kwargs) + node = _set_node_friendly_name(node, name=_get_name(**kwargs)) return node return wrapper diff --git a/src/bindings/python/src/openvino/runtime/utils/types.py b/src/bindings/python/src/openvino/runtime/utils/types.py index 61214a386d3738..52f1faf8e1e839 100644 --- a/src/bindings/python/src/openvino/runtime/utils/types.py +++ b/src/bindings/python/src/openvino/runtime/utils/types.py @@ -5,7 +5,7 @@ """Functions related to converting between Python and numpy types and openvino types.""" import logging -from typing import List, Union +from typing import List, Union, Optional import numpy as np @@ -145,7 +145,7 @@ def get_shape(data: NumericData) -> TensorShape: return [] -def make_constant_node(value: NumericData, dtype: Union[NumericType, Type] = None) -> Constant: +def make_constant_node(value: NumericData, dtype: Union[NumericType, Type] = None, *, name: Optional[str] = None) -> Constant: """Return an openvino Constant node with the specified value.""" ndarray = get_ndarray(value) if dtype is not None: @@ -153,18 +153,23 @@ def make_constant_node(value: NumericData, dtype: Union[NumericType, Type] = Non else: element_type = get_element_type(ndarray.dtype) - return Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist()) + const = Constant(element_type, Shape(ndarray.shape), ndarray.flatten().tolist()) + if name: + const.friendly_name = name + "/" + const.friendly_name -def as_node(input_value: NodeInput) -> Node: + return const + + +def as_node(input_value: NodeInput, name: Optional[str] = None) -> Node: """Return input values as nodes. Scalars will be converted to Constant nodes.""" if issubclass(type(input_value), Node): return input_value if issubclass(type(input_value), Output): return input_value - return make_constant_node(input_value) + return make_constant_node(input_value, name=name) -def as_nodes(*input_values: NodeInput) -> List[Node]: +def as_nodes(*input_values: NodeInput, name: Optional[str] = None) -> List[Node]: """Return input values as nodes. Scalars will be converted to Constant nodes.""" - return [as_node(input_value) for input_value in input_values] + return [as_node(input_value, name=name) for input_value in input_values] diff --git a/src/bindings/python/tests/test_graph/test_affix_ops.py b/src/bindings/python/tests/test_graph/test_affix_ops.py index 7e42b38172cc0c..f9e196348a9851 100644 --- a/src/bindings/python/tests/test_graph/test_affix_ops.py +++ b/src/bindings/python/tests/test_graph/test_affix_ops.py @@ -10,15 +10,13 @@ from openvino import Type -@pytest.mark.parametrize("prefix_string", [ +@pytest.mark.parametrize("op_name", [ "ABC", - "custom_prefix_", + "Fakeee", + "123456", + "FakeQuantize", ]) -@pytest.mark.parametrize("suffix_string", [ - "XYZ", - "_custom_suffix", -]) -def test_affix_not_applied_on_nodes(prefix_string, suffix_string): +def test_affix_not_applied_on_nodes(op_name): levels = np.int32(4) data_shape = [1, 2, 3, 4] bound_shape = [] @@ -45,12 +43,12 @@ def test_affix_not_applied_on_nodes(prefix_string, suffix_string): parameter_output_low, parameter_output_high, levels, - prefix=prefix_string, - suffix=suffix_string, + name=op_name, ) # Check if node was created correctly assert model.get_type_name() == "FakeQuantize" + assert model.get_friendly_name() == op_name assert model.get_output_size() == 1 assert list(model.get_output_shape(0)) == [1, 2, 3, 4] @@ -61,15 +59,13 @@ def test_affix_not_applied_on_nodes(prefix_string, suffix_string): assert output_high_name == parameter_output_high.friendly_name -@pytest.mark.parametrize("prefix_string", [ +@pytest.mark.parametrize("op_name", [ "ABC", - "custom_prefix_", -]) -@pytest.mark.parametrize("suffix_string", [ - "XYZ", - "_custom_suffix", + "Fakeee", + "123456", + "FakeQuantize", ]) -def test_affix_not_applied_on_output(prefix_string, suffix_string): +def test_affix_not_applied_on_output(op_name): levels = np.int32(4) data_shape = [1, 2, 3, 4] bound_shape = [] @@ -100,12 +96,12 @@ def test_affix_not_applied_on_output(prefix_string, suffix_string): parameter_output_low, parameter_output_high, levels, - prefix=prefix_string, - suffix=suffix_string, + name=op_name, ) # Check if node was created correctly assert model.get_type_name() == "FakeQuantize" + assert model.get_friendly_name() == op_name assert model.get_output_size() == 1 assert list(model.get_output_shape(0)) == [1, 2, 3, 4] @@ -116,17 +112,13 @@ def test_affix_not_applied_on_output(prefix_string, suffix_string): assert output_high_name == parameter_output_high.friendly_name -@pytest.mark.parametrize("prefix_string", [ - "", +@pytest.mark.parametrize("op_name", [ "ABC", - "custom_prefix_", -]) -@pytest.mark.parametrize("suffix_string", [ - "", - "XYZ", - "_custom_suffix", + "Fakeee", + "123456", + "FakeQuantize", ]) -def test_fake_quantize_affix(prefix_string, suffix_string): +def test_fake_quantize_prefix(op_name): levels = np.int32(4) data_shape = [1, 2, 3, 4] bound_shape = [1] @@ -144,21 +136,15 @@ def test_fake_quantize_affix(prefix_string, suffix_string): d_arr, e_arr, levels, - prefix=prefix_string, - suffix=suffix_string, + name=op_name, ) # Check if node was created correctly assert model.get_type_name() == "FakeQuantize" + assert model.get_friendly_name() == op_name assert model.get_output_size() == 1 assert list(model.get_output_shape(0)) == [1, 2, 3, 4] - # Check that data parameter and node itself do not change: - if prefix_string != "": - assert prefix_string not in model.friendly_name - if suffix_string != "": - assert suffix_string not in model.friendly_name # Check that other parameters change: for node_input in model.inputs(): generated_node = node_input.get_source_output().get_node() - assert prefix_string in generated_node.friendly_name - assert suffix_string in generated_node.friendly_name + assert op_name + "/" in generated_node.friendly_name diff --git a/src/bindings/python/tests/test_transformations/test_pattern_ops.py b/src/bindings/python/tests/test_transformations/test_pattern_ops.py index fbfbc62fc2b173..390daef3254549 100644 --- a/src/bindings/python/tests/test_transformations/test_pattern_ops.py +++ b/src/bindings/python/tests/test_transformations/test_pattern_ops.py @@ -22,7 +22,7 @@ def test_wrap_type_pattern_type(): - last_opset_number = 14 + last_opset_number = 15 for i in range(1, last_opset_number + 1): WrapType(f"opset{i}.Parameter") WrapType(f"opset{i}::Parameter") diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index 1b7af69eceb230..9f6103302559ce 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -38,6 +38,8 @@ ARCH = "arm" elif machine == "aarch64" or machine == "arm64" or machine == "ARM64": ARCH = "arm64" +elif machine == "riscv64": + ARCH = "riscv64" # The following variables can be defined in environment or .env file SCRIPT_DIR = Path(__file__).resolve().parents[0] diff --git a/src/core/include/openvino/op/ops.hpp b/src/core/include/openvino/op/ops.hpp index f6c91269215f8f..7a17f120f735a5 100644 --- a/src/core/include/openvino/op/ops.hpp +++ b/src/core/include/openvino/op/ops.hpp @@ -153,6 +153,7 @@ #include "openvino/op/result.hpp" #include "openvino/op/reverse.hpp" #include "openvino/op/reverse_sequence.hpp" +#include "openvino/op/rms_norm.hpp" #include "openvino/op/rnn_cell.hpp" #include "openvino/op/rnn_sequence.hpp" #include "openvino/op/roi_align.hpp" diff --git a/src/core/include/openvino/op/rms_norm.hpp b/src/core/include/openvino/op/rms_norm.hpp new file mode 100644 index 00000000000000..43bfd7e213bab0 --- /dev/null +++ b/src/core/include/openvino/op/rms_norm.hpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace op { +namespace v14 { +/// \brief Operator performing Root Mean Square Normalization +/// \ingroup ov_ops_cpp_api +class OPENVINO_API RMSNorm : public ov::op::Op { +public: + OPENVINO_OP("RMSNorm", "opset14", ov::op::Op); + + RMSNorm() = default; + /// \brief Constructs an RMSNorm operation without scaling. + /// + /// \param data Input tensor with data + /// \param axes Axes for reduce mean calculation + /// \param eps Epsilon for not dividing by zero while normalizing the value + /// \param compute_type Precision for the internal computation, if undefined it's the same as the input type + RMSNorm(const Output& data, + const Output& axes, + double epsilson, + const ov::element::Type& compute_type = ov::element::undefined); + + /// \brief Constructs an RMSNorm operation with scaling. + /// + /// \param data Input tensor with data + /// \param axes Axes for reduce mean calculation + /// \param scale Scale values for weight + /// \param eps Epsilon for not dividing by zero while normalizing the value + /// \param compute_type Precision for the internal computation, if undefined it's the same as the input type + RMSNorm(const Output& data, + const Output& axes, + const Output& scale, + double epsilson, + const ov::element::Type& compute_type = ov::element::undefined); + + bool visit_attributes(ov::AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + + double get_epsilon() const; + const ov::element::Type& get_compute_type() const; + +private: + double m_epsilon{0}; + ov::element::Type m_compute_type{ov::element::undefined}; +}; + +} // namespace v14 +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/opsets/opset.hpp b/src/core/include/openvino/opsets/opset.hpp index a3f88d0d308320..7ab8c43fcec6ab 100644 --- a/src/core/include/openvino/opsets/opset.hpp +++ b/src/core/include/openvino/opsets/opset.hpp @@ -167,6 +167,11 @@ const OPENVINO_API OpSet& get_opset13(); * @ingroup ov_opset_cpp_api */ const OPENVINO_API OpSet& get_opset14(); +/** + * @brief Returns map of available opsets + * @ingroup ov_opset_cpp_api + */ +const OPENVINO_API OpSet& get_opset15(); /** * @brief Returns map of available opsets * @ingroup ov_opset_cpp_api diff --git a/src/core/include/openvino/opsets/opset15.hpp b/src/core/include/openvino/opsets/opset15.hpp new file mode 100644 index 00000000000000..f80423014d9a31 --- /dev/null +++ b/src/core/include/openvino/opsets/opset15.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset15 { +#define _OPENVINO_OP_REG(a, b) using b::a; +#include "openvino/opsets/opset15_tbl.hpp" +#undef _OPENVINO_OP_REG +} // namespace opset15 +} // namespace ov diff --git a/src/core/include/openvino/opsets/opset15_tbl.hpp b/src/core/include/openvino/opsets/opset15_tbl.hpp new file mode 100644 index 00000000000000..cd6274c757ece4 --- /dev/null +++ b/src/core/include/openvino/opsets/opset15_tbl.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef _OPENVINO_OP_REG +# warning "_OPENVINO_OP_REG not defined" +# define _OPENVINO_OP_REG(x, y) +#endif + +// Previous opsets operators +_OPENVINO_OP_REG(Parameter, ov::op::v0) +_OPENVINO_OP_REG(Convert, ov::op::v0) +_OPENVINO_OP_REG(ShapeOf, ov::op::v3) + +// New operations added in opset15 diff --git a/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp b/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp index cdae05772ea36d..9a90e4a9b14a0f 100644 --- a/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp +++ b/src/core/reference/include/openvino/reference/autobroadcast_binop.hpp @@ -19,7 +19,7 @@ namespace internal { inline void row_major_strides(const Shape& shape, size_t* strides, size_t size) noexcept { size_t* st = strides + size - 1; size_t s = 1; - for (auto d = shape.rbegin(); d != shape.rend(); d++) { + for (auto d = shape.rbegin(), last = shape.rend(); d != last; ++d) { *st-- = s; s *= *d; } @@ -44,10 +44,11 @@ inline void numpy_autobroadcast_binop(const T* arg0, const Shape& output_shape, const size_t axis, const size_t stride, - Functor elementwise_functor) { + Functor&& elementwise_functor) { for (CoordinateIterator it(output_shape), ite = CoordinateIterator::end();;) { - for (size_t i = 0; i < stride; ++i) - *out++ = elementwise_functor(arg0[i * A0], arg1[i * A1]); + for (size_t i = 0; i < stride; ++i, ++out) { + *out = elementwise_functor(arg0[i * A0], arg1[i * A1]); + } arg0 += A0 ? stride : 1; arg1 += A1 ? stride : 1; @@ -72,23 +73,233 @@ inline size_t calculate_fixed_axis(size_t axis, const size_t* strides) { } } // namespace internal -/// \brief Helper function to implement autobroadcasting elementwise binop references. -/// -/// \tparam T Element type of the input tensors. -/// \tparam U Element type of the output tensor. -/// \tparam Functor Type of the functor for the elementwise operation. Must support -/// operator()(T,T), and operator()(T,T) must return a value of type -/// U. -/// -/// \param arg0 Pointer to the buffer for left operand input tensor. -/// \param arg1 Pointer to the buffer for right operand input tensor. -/// \param out Pointer to the buffer for output tensor. This must be pre-allocated by -/// the caller, and must be large enough to hold a tensor of the correct -/// shape. -/// \param broadcast_spec Specification of the auto-broadcasting scheme. -/// \param elementwise_functor Functor implementing the elementwise operation to be -/// applied across the input tensors. Must accept two -/// arguments of type T, and return a value of type U. +/** + * @brief Apply elementwise function for 2 inputs of same size. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param count Number of elements in inputs + * @param f Binary elementwise functions. + */ +template +void no_broadcast_binop(const T* arg0, const T* arg1, U* out, const size_t count, Functor f) { + for (auto last = arg0 + count; arg0 != last; ++arg0, ++arg1, ++out) { + *out = f(*arg0, *arg1); + } +} + +/** + * @brief Apply elementwise function for 2 inputs and apply NUMPY broadcasting. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg0_shape Shape of input 0. + * @param arg1_shape Shape of input 1. + * @param f Binary elementwise functions. + */ +template +void numpy_broadcast_binop(const T* arg0, + const T* arg1, + U* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + Functor f) { + // We'll be using CoordinateTransformBasic to handle the broadcasting. The general procedure is as follows: + // + // (1) Left pad the shorter of the two shapes with ones. + // (2) Squeeze (remove ones from) both shapes, and record the squeezed axis indices. + // (3) Using CoordinateTransformBasic, broadcast both args to the final output shape. The "broadcasted axes" will be + // those that were squeezed in step 2. + // + // Example: + // + // Input shape->Padded shape->Squeezed Shape/Squeezed Axes + // ----------- ------------ ---------------------------- + // a: [ 3, 2, 1] [ 3, 2, 1] [ 3, 2 ] {2} + // b: [ 1, 6] [ 1, 1, 6] [ 6] {0,1} + // | | | + // v v v + // Output shape + // ------------ + // [ 3, 2, 6] + using namespace internal; + + size_t const shape_rank = std::max(arg0_shape.size(), arg1_shape.size()) + 1; + + // TODO: Use compiler-specific alloca() or variable-length array + std::vector tmp(shape_rank * 2); + + size_t* strides0 = tmp.data(); + size_t* strides1 = tmp.data() + shape_rank; + + row_major_strides(arg0_shape, strides0, shape_rank); + row_major_strides(arg1_shape, strides1, shape_rank); + + size_t const padding0 = shape_rank - arg0_shape.size(); + size_t const padding1 = shape_rank - arg1_shape.size(); + + Shape output_shape(shape_rank, 0); + + size_t axis = 0; + + for (size_t i = 0; i < shape_rank; ++i) { + auto const dim0 = value_with_padding_or(arg0_shape, padding0, i, 1); + auto const dim1 = value_with_padding_or(arg1_shape, padding1, i, 1); + + output_shape[i] = std::max(dim0, dim1); + + if (dim0 != dim1) + axis = std::max(axis, i); + } + + if (axis == 0) { + no_broadcast_binop(arg0, arg1, out, strides0[0], f); + } else if (strides0[axis] == 1 && value_with_padding_or(arg0_shape, padding0, axis, 1) == 1) { + axis = calculate_fixed_axis(axis, strides0); + + internal::numpy_autobroadcast_binop<0, 1>(arg0, + arg1, + out, + arg0_shape, + arg1_shape, + strides0, + strides1, + padding0, + padding1, + output_shape, + axis, + strides1[axis], + f); + } else if (strides1[axis] == 1 && value_with_padding_or(arg1_shape, padding1, axis, 1) == 1) { + axis = calculate_fixed_axis(axis, strides1); + + internal::numpy_autobroadcast_binop<1, 0>(arg0, + arg1, + out, + arg0_shape, + arg1_shape, + strides0, + strides1, + padding0, + padding1, + output_shape, + axis, + strides0[axis], + f); + } else + internal::numpy_autobroadcast_binop<1, 1>(arg0, + arg1, + out, + arg0_shape, + arg1_shape, + strides0, + strides1, + padding0, + padding1, + output_shape, + axis, + strides0[axis], + f); +} + +/** + * @brief Apply elementwise function for 2 inputs and apply PDPP broadcasting. + * + * @param arg0 Pointer to input 0 data. + * @param arg1 Pointer to input 1 data. + * @param out Pointer to output data. + * @param arg0_shape Shape of input 0. + * @param arg1_shape Shape of input 1. + * @param axis Start dimension index for broadcast arg1 shape into arg0. + * @param f Binary elementwise functions. + */ +template +void pdpd_broadcast_binop(const T* arg0, + const T* arg1, + U* out, + const Shape& arg0_shape, + const Shape& arg1_shape, + int64_t axis, + Functor f) { + // We'll be using CoordinateTransformBasic to handle the broadcasting. No need to process arg0 and output shape will + // be the same as arg0. We need to process arg1 and the general procedure is as follows: + // + // (1) Trim trailing ones from arg1 shape. + // (2) Left and right pad arg1 to match arg0 shape. Axis is the index start to align between arg0 and arg1. + // (3) Squeeze (remove ones from) arg1 shape, and record the squeezed axis indices. + // (3) Using CoordinateTransformBasic, broadcast arg1 to the final output shape. The "broadcasted axes" will be + // those that were squeezed in step 23. + // + // Example: + // + // Input shape-> Padded shape-> Squeezed Shape/Squeezed Axes + // ----------- ------------ ---------------------------- + // a: [ 3, 4, 5, 6] [ 3, 4, 5, 6] [ 3, 4, 5, 6] + // b: [ 4, 5, ] [ 1, 4, 5, 1] [ 4, 5 ] {0,3} + // | | | + // v v v + // Output shape + // ------------ + // [ 3, 4, 5, 6] + + if (axis == -1) { + axis = arg0_shape.size() - arg1_shape.size(); + } + + Shape arg1_padded_shape = arg1_shape; + // Trim trailing ones + while (arg1_padded_shape.size() > 0 && arg1_padded_shape.back() == 1) { + arg1_padded_shape.pop_back(); + } + + for (int64_t i = 0; i < axis; ++i) { + arg1_padded_shape.insert(arg1_padded_shape.begin(), 1); + } + + while (arg1_padded_shape.size() < arg0_shape.size()) { + arg1_padded_shape.insert(arg1_padded_shape.end(), 1); + } + + Shape arg1_squeezed_shape; + AxisSet arg1_squeezed_axes; + + for (size_t i = 0, size = arg0_shape.size(); i < size; i++) { + if (arg1_padded_shape[i] == 1) { + arg1_squeezed_axes.insert(i); + } else { + arg1_squeezed_shape.push_back(arg1_padded_shape[i]); + } + } + + const CoordinateTransformBasic output_transform{arg0_shape}; + + for (const Coordinate& output_coord : output_transform) { + const auto arg1_coord = util::reduce(output_coord, arg1_squeezed_axes); + const auto out_index = coordinate_index(output_coord, arg0_shape); + const auto arg0_index = coordinate_index(output_coord, arg0_shape); + const auto arg1_index = coordinate_index(arg1_coord, arg1_squeezed_shape); + out[out_index] = f(arg0[arg0_index], arg1[arg1_index]); + } +} + +/** + * @brief Helper function to implement auto broadcasting elementwise binop references. + * + * @tparam T Element type of the input tensors. + * @tparam U Element type of the output tensor. + * @tparam Functor Type of the functor for the elementwise operation. Must support operator()(T,T), and operator()(T,T) + * must return a value of type U. + * + * @param arg0 Pointer to the buffer for left operand input tensor. + * @param arg1 Pointer to the buffer for right operand input tensor. + * @param out Pointer to the buffer for output tensor. This must be pre-allocated by the caller, and must be large + * enough to hold a tensor of the correct shape. + * @param broadcast_spec Specification of the auto-broadcasting scheme. + * @param elementwise_functor Functor implementing the elementwise operation to be applied across the input tensors. + * Must accept two arguments of type T, and return a value of type U. + */ template void autobroadcast_binop(const T* arg0, const T* arg1, @@ -99,203 +310,36 @@ void autobroadcast_binop(const T* arg0, Functor elementwise_functor) { switch (broadcast_spec.m_type) { case op::AutoBroadcastType::NONE: - for (size_t i = 0; i < shape_size(arg0_shape); i++) { - out[i] = static_cast(elementwise_functor(arg0[i], arg1[i])); - } + no_broadcast_binop(arg0, arg1, out, shape_size(arg0_shape), elementwise_functor); break; case op::AutoBroadcastType::NUMPY: - // We'll be using CoordinateTransformBasic to handle the broadcasting. The general - // procedure is as follows: - // - // (1) Left pad the shorter of the two shapes with ones. - // (2) Squeeze (remove ones from) both shapes, and record the squeezed axis - // indices. - // (3) Using CoordinateTransformBasic, broadcast both args to the final output - // shape. The "broadcasted axes" will be those that were squeezed in step - // 2. - // - // Example: - // - // Input shape->Padded shape->Squeezed Shape/Squeezed Axes - // ----------- ------------ ---------------------------- - // a: [ 3, 2, 1] [ 3, 2, 1] [ 3, 2 ] {2} - // b: [ 1, 6] [ 1, 1, 6] [ 6] {0,1} - // | | | - // v v v - // Output shape - // ------------ - // [ 3, 2, 6] - { - using namespace internal; - - size_t const shape_rank = std::max(arg0_shape.size(), arg1_shape.size()) + 1; - - // TODO: Use compiler-specific alloca() or variable-length array - std::vector tmp(shape_rank * 2); - - size_t* strides0 = tmp.data(); - size_t* strides1 = tmp.data() + shape_rank; - - row_major_strides(arg0_shape, strides0, shape_rank); - row_major_strides(arg1_shape, strides1, shape_rank); - - size_t const padding0 = shape_rank - arg0_shape.size(); - size_t const padding1 = shape_rank - arg1_shape.size(); - - Shape output_shape(shape_rank, 0); - - size_t axis = 0; - - for (size_t i = 0; i < shape_rank; i++) { - auto const dim0 = value_with_padding_or(arg0_shape, padding0, i, 1); - auto const dim1 = value_with_padding_or(arg1_shape, padding1, i, 1); - - output_shape[i] = std::max(dim0, dim1); - - if (dim0 != dim1) - axis = std::max(axis, i); - } - - if (axis == 0) { - for (size_t i = 0, end = strides0[0]; i < end; ++i) - out[i] = elementwise_functor(arg0[i], arg1[i]); - } else if (strides0[axis] == 1 && value_with_padding_or(arg0_shape, padding0, axis, 1) == 1) { - axis = calculate_fixed_axis(axis, strides0); - - numpy_autobroadcast_binop<0, 1>(arg0, - arg1, - out, - arg0_shape, - arg1_shape, - strides0, - strides1, - padding0, - padding1, - output_shape, - axis, - strides1[axis], - elementwise_functor); - } else if (strides1[axis] == 1 && value_with_padding_or(arg1_shape, padding1, axis, 1) == 1) { - axis = calculate_fixed_axis(axis, strides1); - - numpy_autobroadcast_binop<1, 0>(arg0, - arg1, - out, - arg0_shape, - arg1_shape, - strides0, - strides1, - padding0, - padding1, - output_shape, - axis, - strides0[axis], - elementwise_functor); - } else - numpy_autobroadcast_binop<1, 1>(arg0, - arg1, - out, - arg0_shape, - arg1_shape, - strides0, - strides1, - padding0, - padding1, - output_shape, - axis, - strides0[axis], - elementwise_functor); - } + numpy_broadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, elementwise_functor); break; case op::AutoBroadcastType::PDPD: - // We'll be using CoordinateTransformBasic to handle the broadcasting. No need to - // process arg0 and output shape will be the same as arg0. We need to process - // arg1 and the general procedure is as follows: - // - // (1) Trim trailing ones from arg1 shape. - // (2) Left and right pad arg1 to match arg0 shape. Axis is the index start - // to align between arg0 and arg1. - // (3) Squeeze (remove ones from) arg1 shape, and record the squeezed axis - // indices. - // (3) Using CoordinateTransformBasic, broadcast arg1 to the final output - // shape. The "broadcasted axes" will be those that were squeezed in step - // 23. - // - // Example: - // - // Input shape-> Padded shape-> Squeezed Shape/Squeezed Axes - // ----------- ------------ ---------------------------- - // a: [ 3, 4, 5, 6] [ 3, 4, 5, 6] [ 3, 4, 5, 6] - // b: [ 4, 5, ] [ 1, 4, 5, 1] [ 4, 5 ] {0,3} - // | | | - // v v v - // Output shape - // ------------ - // [ 3, 4, 5, 6] - { - int64_t axis = broadcast_spec.m_axis; - if (axis == -1) { - axis = arg0_shape.size() - arg1_shape.size(); - } - - Shape arg1_padded_shape = arg1_shape; - // Trim trailing ones - while (arg1_padded_shape.size() > 0 && arg1_padded_shape.back() == 1) { - arg1_padded_shape.pop_back(); - } - - for (int64_t i = 0; i < axis; ++i) { - arg1_padded_shape.insert(arg1_padded_shape.begin(), 1); - } - - while (arg1_padded_shape.size() < arg0_shape.size()) { - arg1_padded_shape.insert(arg1_padded_shape.end(), 1); - } - - Shape arg1_squeezed_shape; - AxisSet arg1_squeezed_axes; - - for (size_t i = 0; i < arg0_shape.size(); i++) { - if (arg1_padded_shape[i] == 1) { - arg1_squeezed_axes.insert(i); - } else { - arg1_squeezed_shape.push_back(arg1_padded_shape[i]); - } - } - - const CoordinateTransformBasic output_transform{arg0_shape}; - - for (const Coordinate& output_coord : output_transform) { - const auto arg1_coord = util::reduce(output_coord, arg1_squeezed_axes); - const auto out_index = coordinate_index(output_coord, arg0_shape); - const auto arg0_index = coordinate_index(output_coord, arg0_shape); - const auto arg1_index = coordinate_index(arg1_coord, arg1_squeezed_shape); - out[out_index] = elementwise_functor(arg0[arg0_index], arg1[arg1_index]); - } - } + pdpd_broadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec.m_axis, elementwise_functor); + break; + default: + break; } } -/// \brief Helper function to implement autobroadcasting elementwise ternaryop -/// references. -/// -/// \tparam U Element type of the selector tensor. -/// \tparam T Element type of the input tensors. -/// \tparam Functor Type of the functor for the elementwise operation. Must support -/// operator()(U,T,T), and operator()(U,T,T) must return a value of type -/// T. -/// -/// \param arg0 Pointer to the buffer for selector tensor. -/// \param arg1 Pointer to the buffer for left operand input tensor. -/// \param arg2 Pointer to the buffer for right operand input tensor. -/// \param out Pointer to the buffer for output tensor. This must be pre-allocated by -/// the caller, and must be large enough to hold a tensor of the correct -/// shape. -/// \param broadcast_spec Specification of the auto-broadcasting scheme. -/// \param elementwise_functor Functor implementing the elementwise operation to be -/// applied across the input tensors. Must accept an argument -/// of -/// type U and two of type T, and return a value of type T. +/** + * + * \brief Helper function to implement auto broadcasting elementwise ternary op references. + * \tparam U Element type of the selector tensor. + * \tparam T Element type of the input tensors. + * \tparam Functor Type of the functor for the elementwise operation. Must support operator()(U,T,T), and + * operator()(U,T,T) must return a value of type T. + * + * \param arg0 Pointer to the buffer for selector tensor. + * \param arg1 Pointer to the buffer for left operand input tensor. + * \param arg2 Pointer to the buffer for right operand input tensor. + * \param out Pointer to the buffer for output tensor. This must be pre-allocated by the caller, and must be large + * enough to hold a tensor of the correct shape. + * \param broadcast_spec Specification of the auto-broadcasting scheme. + * \param elementwise_functor Functor implementing the elementwise operation to be applied across the input tensors Must + * accept an argument of type U and two of type T, and return a value of type T. + */ template void autobroadcast_select(const U* arg0, const T* arg1, @@ -308,8 +352,8 @@ void autobroadcast_select(const U* arg0, Functor elementwise_functor) { switch (broadcast_spec.m_type) { case op::AutoBroadcastType::NONE: - for (size_t i = 0; i < shape_size(arg0_shape); i++) { - out[i] = elementwise_functor(arg0[i], arg1[i], arg2[i]); + for (auto last = arg0 + shape_size(arg0_shape); arg0 != last; ++arg0, ++arg1, ++arg2, ++out) { + *out = elementwise_functor(*arg0, *arg1, *arg2); } break; case op::AutoBroadcastType::NUMPY: @@ -422,7 +466,7 @@ void autobroadcast_select(const U* arg0, Shape arg2_squeezed_shape; AxisSet arg2_squeezed_axes; - for (size_t i = 0; i < arg1_shape.size(); i++) { + for (size_t i = 0, size = arg1_shape.size(); i < size; ++i) { if (arg0_padded_shape[i] == 1) { arg0_squeezed_axes.insert(i); } else { diff --git a/src/core/shape_inference/include/rms_norm_shape_inference.hpp b/src/core/shape_inference/include/rms_norm_shape_inference.hpp new file mode 100644 index 00000000000000..bc03fe37f91f34 --- /dev/null +++ b/src/core/shape_inference/include/rms_norm_shape_inference.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/rms_norm.hpp" +#include "utils.hpp" + +namespace ov { +namespace op { +namespace v14 { +template > +std::vector shape_infer(const RMSNorm* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { + const auto inputs_count = input_shapes.size(); + const auto has_scale_input = inputs_count == 3; + NODE_SHAPE_INFER_CHECK(op, input_shapes, inputs_count == 2 || has_scale_input); + + const auto& data_shape = input_shapes[0]; + const auto& data_rank = data_shape.rank(); + const auto& axes_shape = input_shapes[1]; + const auto& axes_rank = axes_shape.rank(); + + NODE_SHAPE_INFER_CHECK(op, + input_shapes, + ov::util::is_rank_compatible_any_of(axes_rank, {0, 1}), + "Axes input must be a scalar or 1D input. Got: ", + axes_shape); + + // Further validation requires data rank to be static + if (data_rank.is_dynamic()) { + return {data_shape}; + } + + if (axes_shape.rank().is_static()) { + const bool has_axes_compatible = axes_shape.size() == 0 || axes_shape[0].is_dynamic() || + cmp::ge(data_rank.get_length(), axes_shape.get_shape()[0]); + NODE_SHAPE_INFER_CHECK(op, + input_shapes, + has_axes_compatible, + "Number of the axes can't be higher than the rank of the data shape."); + } + + if (has_scale_input) { // Validate scale input + TRShape scale_shape = input_shapes[2]; + const bool is_scale_shape_broadcastable = + TRShape::broadcast_merge_into(scale_shape, data_shape, ov::op::AutoBroadcastType::NUMPY); + NODE_SHAPE_INFER_CHECK(op, + input_shapes, + is_scale_shape_broadcastable, + "Scale input shape must be broadcastable to the shape of the data input."); + } + + // Axes values validation + if (const auto axes_val = ov::op::get_input_const_data_as(op, 1, tensor_accessor)) { + ov::util::normalize_axes(op, data_rank.get_length(), *axes_val); + } + + return {data_shape}; +} +} // namespace v14 +} // namespace op +} // namespace ov diff --git a/src/core/src/op/rms_norm.cpp b/src/core/src/op/rms_norm.cpp new file mode 100644 index 00000000000000..a249e86a6a207e --- /dev/null +++ b/src/core/src/op/rms_norm.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/rms_norm.hpp" + +#include "itt.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/op.hpp" +#include "rms_norm_shape_inference.hpp" + +namespace ov { +namespace op { +namespace v14 { + +RMSNorm::RMSNorm(const Output& data, + const Output& axes, + double epsilson, + const ov::element::Type& compute_type) + : Op({data, axes}), + m_epsilon(epsilson), + m_compute_type(compute_type) { + constructor_validate_and_infer_types(); +} + +RMSNorm::RMSNorm(const Output& data, + const Output& axes, + const Output& scale, + double epsilson, + const ov::element::Type& compute_type) + : Op({data, axes, scale}), + m_epsilon(epsilson), + m_compute_type(compute_type) { + constructor_validate_and_infer_types(); +} + +bool RMSNorm::visit_attributes(ov::AttributeVisitor& visitor) { + OV_OP_SCOPE(v14_RMSNorm_visit_attributes); + visitor.on_attribute("epsilon", m_epsilon); + visitor.on_attribute("compute_type", m_compute_type); + return true; +} + +void RMSNorm::validate_and_infer_types() { + OV_OP_SCOPE(v14_RMSNorm_validate_and_infer_types); + + const auto& data_element_type = get_input_element_type(0); + const bool is_valid_data_type = data_element_type.is_dynamic() || data_element_type.is_real(); + NODE_VALIDATION_CHECK(this, + is_valid_data_type, + "The element type of the data tensor must be a floating point type. Got: ", + data_element_type); + + const auto& axes_element_type = get_input_element_type(1); + const bool is_valid_axes_type = + data_element_type.is_dynamic() || axes_element_type == element::i32 || axes_element_type == element::i64; + NODE_VALIDATION_CHECK(this, + is_valid_axes_type, + "The element type of the axes tensor must be i32 or i64 type. Got: ", + axes_element_type); + + if (get_input_size() > 2) { // Validate scale input type + + // Validate input types + auto merged_et = element::dynamic; + const auto& scale_element_type = get_input_element_type(2); + const bool is_scale_type_compatible = element::Type::merge(merged_et, data_element_type, scale_element_type); + NODE_VALIDATION_CHECK(this, + is_scale_type_compatible, + "Element type of the scale input must be the same as the data input type."); + } + + const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)); + // Output type and shape is the same as the first input + set_output_type(0, data_element_type, output_shapes[0]); +} + +std::shared_ptr RMSNorm::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OV_OP_SCOPE(v14_RMSNorm_clone_with_new_inputs); + check_new_args_count(this, new_args); + if (new_args.size() == 2) { + return std::make_shared(new_args.at(0), new_args.at(1), m_epsilon, m_compute_type); + } + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_epsilon, m_compute_type); +} + +double RMSNorm::get_epsilon() const { + return m_epsilon; +} + +const ov::element::Type& RMSNorm::get_compute_type() const { + return m_compute_type; +} + +} // namespace v14 +} // namespace op +} // namespace ov diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index 8bfc5cac1209ac..7a154779d735e6 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -110,7 +110,8 @@ const std::map>& ov::get_availabl _OPENVINO_REG_OPSET(opset11), _OPENVINO_REG_OPSET(opset12), _OPENVINO_REG_OPSET(opset13), - _OPENVINO_REG_OPSET(opset14)}; + _OPENVINO_REG_OPSET(opset14), + _OPENVINO_REG_OPSET(opset15)}; #undef _OPENVINO_REG_OPSET return opset_map; } @@ -268,3 +269,14 @@ const ov::OpSet& ov::get_opset14() { }); return opset; } + +const ov::OpSet& ov::get_opset15() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define _OPENVINO_OP_REG(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset15_tbl.hpp" +#undef _OPENVINO_OP_REG + }); + return opset; +} diff --git a/src/core/tests/op.cpp b/src/core/tests/op.cpp index e59247f0ee133d..cf65eecc16cd4c 100644 --- a/src/core/tests/op.cpp +++ b/src/core/tests/op.cpp @@ -66,4 +66,5 @@ TEST(op, opset_multi_thread) { doTest(ov::get_opset12); doTest(ov::get_opset13); doTest(ov::get_opset14); + doTest(ov::get_opset15); } diff --git a/src/core/tests/opset.cpp b/src/core/tests/opset.cpp index 4933543e58d8ea..9c2807cdf04edb 100644 --- a/src/core/tests/opset.cpp +++ b/src/core/tests/opset.cpp @@ -13,6 +13,7 @@ #include "openvino/opsets/opset12.hpp" #include "openvino/opsets/opset13.hpp" #include "openvino/opsets/opset14.hpp" +#include "openvino/opsets/opset15.hpp" #include "openvino/opsets/opset2.hpp" #include "openvino/opsets/opset3.hpp" #include "openvino/opsets/opset4.hpp" @@ -73,7 +74,8 @@ INSTANTIATE_TEST_SUITE_P(opset, OpsetTestParams{ov::get_opset11, 177}, OpsetTestParams{ov::get_opset12, 178}, OpsetTestParams{ov::get_opset13, 186}, - OpsetTestParams{ov::get_opset14, 188}), + OpsetTestParams{ov::get_opset14, 188}, + OpsetTestParams{ov::get_opset15, 3}), OpsetTestNameGenerator{}); class MyOpOld : public ov::op::Op { diff --git a/src/core/tests/type_prop/rms_norm.cpp b/src/core/tests/type_prop/rms_norm.cpp new file mode 100644 index 00000000000000..b24531a9c2cf23 --- /dev/null +++ b/src/core/tests/type_prop/rms_norm.cpp @@ -0,0 +1,273 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/rms_norm.hpp" + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "common_test_utils/type_prop.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" + +namespace ov { +namespace test { + +using ov::op::v0::Constant; +using ov::op::v0::Parameter; +using testing::HasSubstr; + +class TypePropRMSNormTest : public TypePropOpTest { +public: + double eps = 1e-5; +}; + +TEST_F(TypePropRMSNormTest, default_ctor) { + const auto op = make_op(); + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(element::i64, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + + op->set_arguments(ov::OutputVector{data, axes, scale}); + op->validate_and_infer_types(); + + EXPECT_EQ(op->get_output_size(), 1); + EXPECT_EQ(op->get_input_size(), 3); + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8, 6})); +} + +TEST_F(TypePropRMSNormTest, no_scale_no_compute_type) { + const auto data = std::make_shared(element::f32, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + + const auto op = make_op(data, axes, eps); + EXPECT_EQ(op->get_input_size(), 2); + EXPECT_EQ(op->get_output_size(), 1); + EXPECT_EQ(op->get_output_element_type(0), element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8, 6})); + EXPECT_EQ(op->get_epsilon(), eps); +} + +TEST_F(TypePropRMSNormTest, scale_no_compute_type) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + + const auto op = make_op(data, axes, scale, eps); + EXPECT_EQ(op->get_input_size(), 3); + EXPECT_EQ(op->get_output_size(), 1); + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8, 6})); + EXPECT_EQ(op->get_epsilon(), eps); +} + +TEST_F(TypePropRMSNormTest, scale_compute_type) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + const auto compute_type = element::f32; + + const auto op = make_op(data, axes, scale, eps, compute_type); + EXPECT_EQ(op->get_input_size(), 3); + EXPECT_EQ(op->get_output_size(), 1); + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8, 6})); + EXPECT_EQ(op->get_epsilon(), eps); + EXPECT_EQ(op->get_compute_type(), compute_type); +} + +TEST_F(TypePropRMSNormTest, scale_compute_type_no_scale) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto compute_type = element::f32; + + const auto op = make_op(data, axes, eps, compute_type); + EXPECT_EQ(op->get_output_size(), 1); + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8, 6})); +} + +TEST_F(TypePropRMSNormTest, dynamic_data_shape) { + const auto data = std::make_shared(element::f16, PartialShape{-1, {3, 4}, {8, -1}, 6}); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + const auto compute_type = element::f32; + + const auto op = make_op(data, axes, scale, eps, compute_type); + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, {3, 4}, {8, -1}, 6})); +} + +TEST_F(TypePropRMSNormTest, dynamic_data_shape_rank) { + const auto data = std::make_shared(element::f16, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + const auto compute_type = element::f32; + + const auto op = make_op(data, axes, scale, eps, compute_type); + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape::dynamic())); +} + +TEST_F(TypePropRMSNormTest, propagate_symbols) { + auto data_shape = PartialShape{-1, {3, 4}, {8, -1}, 6}; + set_shape_symbols(data_shape); + const auto exp_symbols = get_shape_symbols(data_shape); + + const auto data = std::make_shared(element::f16, data_shape); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + const auto compute_type = element::f32; + + const auto op = make_op(data, axes, scale, eps, compute_type); + EXPECT_EQ(get_shape_symbols(op->get_output_partial_shape(0)), exp_symbols); +} + +TEST_F(TypePropRMSNormTest, incorrect_input_type) { + const auto data = std::make_shared(element::f16, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + const auto compute_type = element::f32; + { + const auto data_int = std::make_shared(element::i32, PartialShape::dynamic()); + OV_EXPECT_THROW(std::ignore = make_op(data_int, axes, scale, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("The element type of the data tensor must be a floating point type")); + } + { + const auto axes_float = std::make_shared(element::f32, PartialShape{1}); + OV_EXPECT_THROW(std::ignore = make_op(data, axes_float, scale, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("The element type of the axes tensor must be i32 or i64 type")); + } + { + const auto scale_incompatible = std::make_shared(element::f32, PartialShape{1}); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, scale_incompatible, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("Element type of the scale input must be the same as the data input type")); + } +} + +TEST_F(TypePropRMSNormTest, incompatible_axes_shape) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8}); + const auto scale = std::make_shared(element::f16, PartialShape{}); + const auto compute_type = element::f32; + { + const auto axes = std::make_shared(element::i32, PartialShape{1, 2}); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, scale, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("Axes input must be a scalar or 1D input. Got: [1,2]")); + } + { + const auto axes = std::make_shared(element::i32, PartialShape{4}); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, scale, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("Number of the axes can't be higher than the rank of the data shape")); + } +} + +TEST_F(TypePropRMSNormTest, constant_axes_val_data_dyn_rank) { + const auto data = std::make_shared(element::f16, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, Shape{}, 1); + const auto op = make_op(data, axes, eps); + + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape::dynamic())); +} + +TEST_F(TypePropRMSNormTest, constant_axes_val_data_static_rank) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8}); + const auto axes = std::make_shared(element::i32, Shape{}, 1); + const auto op = make_op(data, axes, eps); + + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8})); +} + +TEST_F(TypePropRMSNormTest, axes_val_as_shape_of) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8}); + const auto data_rank = std::make_shared(std::make_shared(data)); + const auto axes = + std::make_shared(data_rank, std::make_shared(element::i64, Shape{}, 1)); + const auto op = make_op(data, axes, eps); + + EXPECT_EQ(op->get_output_element_type(0), element::f16); + EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{2, 3, 8})); +} + +TEST_F(TypePropRMSNormTest, incorrect_axes_val) { + const auto data = std::make_shared(element::f16, PartialShape{2, 3, 8}); + { + const auto axes = std::make_shared(element::i32, Shape{}, 3); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, eps), + ov::NodeValidationFailure, + HasSubstr("Parameter axis 3 out of the tensor rank range [-3, 2]")); + } + { + const auto axes = std::make_shared(element::i32, Shape{}, -4); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, eps), + ov::NodeValidationFailure, + HasSubstr("Parameter axis -4 out of the tensor rank range [-3, 2]")); + } +} + +using RMSNormTestParam = std::tuple; +class TypePropRMSNormTestP : public TypePropRMSNormTest, public testing::WithParamInterface { +protected: + void SetUp() override { + std::tie(shape_data, shape_scale) = GetParam(); + } + PartialShape shape_data, shape_scale; +}; + +INSTANTIATE_TEST_SUITE_P(type_prop_rms_scale_shape, + TypePropRMSNormTestP, + testing::Values(std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{-1}), + std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{}), + std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{1}), + std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{2}), + std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{1, 1}), + std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{1, 2}), + std::make_tuple(PartialShape{-1, 3, 1, 2}, PartialShape{3, 1, 2}), + std::make_tuple(PartialShape{-1, 4, 8, 6}, PartialShape{1, 4, 1, 1}), + std::make_tuple(PartialShape{2, 4, 8, 6}, PartialShape{2, 4, 8, 6}), + std::make_tuple(PartialShape{2, 4, 8, 6}, PartialShape{1, 4, 1, 1}), + std::make_tuple(PartialShape{2, 4, 8, 6}, PartialShape{1, 1, 1, 1}), + std::make_tuple(PartialShape{2, 4, 8, 6}, PartialShape::dynamic()), + std::make_tuple(PartialShape::dynamic(), PartialShape{1}), + std::make_tuple(PartialShape::dynamic(), PartialShape::dynamic())), + testing::PrintToStringParamName()); + +TEST_P(TypePropRMSNormTestP, scale_shape) { + const auto data = std::make_shared(element::f16, shape_data); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + + const auto scale = std::make_shared(element::f16, shape_scale); + const auto op = make_op(data, axes, scale, eps); + + EXPECT_EQ(op->get_output_partial_shape(0), shape_data); +} + +TEST_F(TypePropRMSNormTest, scale_incompatible_shape) { + const auto data = std::make_shared(element::f16, PartialShape{-1, 3, 8, 6}); + const auto axes = std::make_shared(element::i32, PartialShape{1}); + const auto compute_type = element::f32; + { + const auto scale = std::make_shared(element::f16, PartialShape{8}); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, scale, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("Scale input shape must be broadcastable to the shape of the data input")); + } + { + const auto scale = std::make_shared(element::f16, PartialShape{6, 1}); + OV_EXPECT_THROW(std::ignore = make_op(data, axes, scale, eps, compute_type), + ov::NodeValidationFailure, + HasSubstr("Scale input shape must be broadcastable to the shape of the data input")); + } +} + +} // namespace test +} // namespace ov diff --git a/src/core/tests/visitors/op/rms_norm.cpp b/src/core/tests/visitors/op/rms_norm.cpp new file mode 100644 index 00000000000000..ac0d191d1e6dfb --- /dev/null +++ b/src/core/tests/visitors/op/rms_norm.cpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/rms_norm.hpp" + +#include + +#include "visitors/visitors.hpp" + +using ov::PartialShape; +using ov::op::v0::Parameter; +using ov::test::NodeBuilder; + +TEST(attributes, rms_norm_v14_attr_comp_type_default) { + using ov::op::v14::RMSNorm; + NodeBuilder::opset().insert(); + + const auto data = std::make_shared(ov::element::f16, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(ov::element::i32, PartialShape{1}); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, eps); + + NodeBuilder builder(op, {data, axes}); + auto g_op = ov::as_type_ptr(builder.create()); + + EXPECT_EQ(g_op->get_compute_type(), op->get_compute_type()); + EXPECT_EQ(g_op->get_output_element_type(0), op->get_output_element_type(0)); + EXPECT_EQ(g_op->get_output_partial_shape(0), op->get_output_partial_shape(0)); +} + +TEST(attributes, rms_norm_v14_attr_comp_type_custom) { + using ov::op::v14::RMSNorm; + NodeBuilder::opset().insert(); + + const auto data = std::make_shared(ov::element::f16, PartialShape{2, 3, 8, 6}); + const auto axes = std::make_shared(ov::element::i32, PartialShape{1}); + const auto eps = 1e-5f; + const auto compute_type = ov::element::f32; + + const auto op = std::make_shared(data, axes, eps, compute_type); + + NodeBuilder builder(op, {data, axes}); + auto g_op = ov::as_type_ptr(builder.create()); + + EXPECT_EQ(g_op->get_compute_type(), op->get_compute_type()); + EXPECT_EQ(g_op->get_output_element_type(0), op->get_output_element_type(0)); + EXPECT_EQ(g_op->get_output_partial_shape(0), op->get_output_partial_shape(0)); +} diff --git a/src/frontends/common/include/openvino/frontend/extension/op.hpp b/src/frontends/common/include/openvino/frontend/extension/op.hpp index bead92c3b17d1b..4198c411082e42 100644 --- a/src/frontends/common/include/openvino/frontend/extension/op.hpp +++ b/src/frontends/common/include/openvino/frontend/extension/op.hpp @@ -25,7 +25,7 @@ inline const ov::OpSet& get_opset_by_name(const std::string& opset_name) { if (opsets.find(opset_name) != opsets.end()) return opsets.at(opset_name)(); if (opset_name.empty() || opset_name == "latest") { - return ov::get_opset14(); + return ov::get_opset14(); // TODO (ticket 138273): Update at the end of the opset15 development } else { FRONT_END_GENERAL_CHECK(false, "Unsupported opset name: ", opset_name); } diff --git a/src/frontends/pytorch/src/op/min_max.cpp b/src/frontends/pytorch/src/op/min_max.cpp index 3600939336f20b..70f67d16cfee76 100644 --- a/src/frontends/pytorch/src/op/min_max.cpp +++ b/src/frontends/pytorch/src/op/min_max.cpp @@ -207,6 +207,26 @@ OutputVector translate_amax(const NodeContext& context) { return {res}; } +OutputVector translate_aminmax(const NodeContext& context) { + num_inputs_check(context, 1, 4); // Expect between 1 and 4 inputs + // (input tensor, dim = none, keepdim = false, out = none) + + auto input = context.get_input(0); + + // check if dim is provided, if not, get the range of axes to compute min and max + auto dim = !context.input_is_none(1) ? context.get_input(1) : get_axes_range(context, 0); + + // check if keepdim is provided, if not, set it to false like PyTorch + bool keep_dims = !context.input_is_none(2) ? context.const_input(2) : false; + + auto amin = context.mark_node(std::make_shared(input, dim, keep_dims)); + auto amax = context.mark_node(std::make_shared(input, dim, keep_dims)); + + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(3), "out argument is not supported for aten::aminmax"); + + return {amin, amax}; +} + } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 4560ccb10b279e..7d7de25d092d25 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -24,6 +24,7 @@ OP_CONVERTER(translate_adaptive_max_pool2d); OP_CONVERTER(translate_adaptive_max_pool1d); OP_CONVERTER(translate_add); OP_CONVERTER(translate_add_); +OP_CONVERTER(translate_aminmax); OP_CONVERTER(translate_mul); OP_CONVERTER(translate_mul_); OP_CONVERTER(translate_addcmul); @@ -352,6 +353,7 @@ const std::map get_supported_ops_ts() { {"aten::all", op::translate_all}, {"aten::amax", op::translate_amax}, {"aten::amin", op::translate_amin}, + {"aten::aminmax", op::translate_aminmax}, // aten::append - Supported in limited set of patterns {"aten::arange", op::translate_arange}, {"aten::argmax", op::translate_argmax}, diff --git a/src/frontends/tensorflow/docs/supported_ops.md b/src/frontends/tensorflow/docs/supported_ops.md index 7206a2af141041..a8bb88e1ab601a 100644 --- a/src/frontends/tensorflow/docs/supported_ops.md +++ b/src/frontends/tensorflow/docs/supported_ops.md @@ -629,7 +629,7 @@ A "supported operation" is one that TensorFlow Frontend can convert to the OpenV | MatrixDiagV2 | NO | | | MatrixDiagV3 | NO | | | MatrixExponential | NO | | -| MatrixInverse | NO | | +| MatrixInverse | YES | | | MatrixLogarithm | NO | | | MatrixSetDiag | NO | | | MatrixSetDiagV2 | NO | | diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index 3422148db2d771..7a9e4f9acd3d63 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -302,6 +302,7 @@ const std::map get_supported_ops() { {"MatMul", CreatorFunction(translate_mat_mul_op)}, {"MatrixBandPart", CreatorFunction(translate_matrix_band_part_op)}, {"MatrixDiag", CreatorFunction(translate_matrix_diag_op)}, + {"MatrixInverse", CreatorFunction(translate_matrix_inverse_op)}, {"MaxPool", CreatorFunction(translate_max_pool_op)}, {"MaxPoolV2", CreatorFunction(translate_max_pool_op)}, {"MaxPool3D", CreatorFunction(translate_max_pool_op)}, diff --git a/src/frontends/tensorflow/tests/requirements.txt b/src/frontends/tensorflow/tests/requirements.txt index ffe9a6acd37681..a1064f488d6b34 100644 --- a/src/frontends/tensorflow/tests/requirements.txt +++ b/src/frontends/tensorflow/tests/requirements.txt @@ -1,3 +1,5 @@ -c ../../../bindings/python/constraints.txt numpy tensorflow +# limit h5py version for Linux arm64, 3.11 version failed +h5py<3.11.0; sys_platform == 'linux' and platform_machine == 'aarch64' \ No newline at end of file diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index af59b862c89234..df62808b3cdacf 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -106,6 +106,7 @@ OP_CONVERTER(translate_lrn_op); OP_CONVERTER(translate_mat_mul_op); OP_CONVERTER(translate_matrix_diag_op); OP_CONVERTER(translate_matrix_band_part_op); +OP_CONVERTER(translate_matrix_inverse_op); OP_CONVERTER(translate_max_pool_op); OP_CONVERTER_NAMED(translate_max_pool_with_argmax); OP_CONVERTER(translate_mirror_pad_op); diff --git a/src/frontends/tensorflow_common/src/op/matrix_inverse.cpp b/src/frontends/tensorflow_common/src/op/matrix_inverse.cpp new file mode 100644 index 00000000000000..0e7d6e15948fa7 --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/matrix_inverse.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "openvino/op/inverse.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_matrix_inverse_op(const NodeContext& node) { + default_op_checks(node, 1, {"MatrixInverse"}); + // retrieve the input tensor + auto input = node.get_input(0); + + // handle optional 'adjoint' attribute (default is false) + bool adjoint = node.get_attribute("adjoint", false); + TENSORFLOW_OP_VALIDATION( + node, + !adjoint, + "[TensorFlow Frontend] internal error: MatrixInverse is supported only for adjoint equal to false"); + + auto inverse_op = make_shared(input, adjoint); + set_node_name(node.get_name(), inverse_op); + + return {inverse_op}; +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index 8f05876ce219b7..b3588f8bffbd47 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -90,6 +90,7 @@ #include "reshape_shape_inference.hpp" #include "reverse_sequence_shape_inference.hpp" #include "reverse_shape_inference.hpp" +#include "rms_norm_shape_inference.hpp" #include "rnn_cell_shape_inference.hpp" #include "rnn_sequence_shape_inference.hpp" #include "roi_align_shape_inference.hpp" @@ -399,6 +400,7 @@ using IStaticShapeInferFactory = template <> const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{ // opset14 + _OV_OP_SHAPE_INFER_MASK_REG(op::v14::RMSNorm, ShapeInferTA, util::bit::mask(1)), _OV_OP_SHAPE_INFER_MASK_REG(opset14::Inverse, ShapeInferTA, util::bit::mask()), // opset13 _OV_OP_SHAPE_INFER_MASK_REG(opset13::Multinomial, ShapeInferTA, util::bit::mask(1)), diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp new file mode 100644 index 00000000000000..cb3f346ec98c6f --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp @@ -0,0 +1,137 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "utils.hpp" + +using namespace ov; +using namespace ov::intel_cpu; +using ov::op::v0::Constant; +using ov::op::v0::Parameter; +using testing::HasSubstr; + +TEST(StaticShapeInferenceTest, RMSNormStaticShapeInferenceTestDefaultCtor) { + const auto op = std::make_shared(); + const auto data = std::make_shared(element::f16, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i64, PartialShape::dynamic()); + const auto scale = std::make_shared(element::f16, PartialShape::dynamic()); + + op->set_arguments(ov::OutputVector{data, axes, scale}); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}, StaticShape{1}}; + int32_t axis_val = -1; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto static_output_shapes = shape_inference(op.get(), static_input_shapes, const_data); + EXPECT_EQ(static_output_shapes[0], StaticShape({2, 3, 8, 6})); +} + +TEST(StaticShapeInferenceTest, RMSNormStaticShapeInferenceTest2ins) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}}; + int32_t axis_val = -1; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto static_output_shapes = shape_inference(op.get(), static_input_shapes, const_data); + EXPECT_EQ(static_output_shapes[0], StaticShape({2, 3, 8, 6})); +} + +TEST(StaticShapeInferenceTest, RMSNormStaticShapeInferenceTest3ins) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto scale = std::make_shared(element::f32, PartialShape::dynamic()); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, scale, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}, StaticShape{1}}; + int32_t axis_val = -1; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto static_output_shapes = shape_inference(op.get(), static_input_shapes, const_data); + EXPECT_EQ(static_output_shapes[0], StaticShape({2, 3, 8, 6})); +} + +TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisValParam) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}}; + int32_t axis_val = 5; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + + OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), + NodeValidationFailure, + HasSubstr("Parameter axis 5 out of the tensor rank range [-4, 3]")); +} + +TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisValConst) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, Shape{}, 5); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{}}; + + OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes), + NodeValidationFailure, + HasSubstr("Parameter axis 5 out of the tensor rank range [-4, 3]")); +} + +TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisShapeDim) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{5}}; + int32_t axis_val = 5; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + + OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), + NodeValidationFailure, + HasSubstr("Number of the axes can't be higher than the rank of the data shape")); +} + +TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisShapeRank) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1, 5}}; + int32_t axis_val = 5; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + + OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), + NodeValidationFailure, + HasSubstr("Axes input must be a scalar or 1D input. Got: {1,5}")); +} + +TEST(StaticShapeInferenceTest, RMSNormIncorrectScaleShape) { + const auto data = std::make_shared(element::f32, PartialShape::dynamic()); + const auto axes = std::make_shared(element::i32, PartialShape::dynamic()); + const auto scale = std::make_shared(element::f32, PartialShape::dynamic()); + const auto eps = 1e-5f; + + const auto op = std::make_shared(data, axes, scale, eps); + + std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}, StaticShape{6, 1}}; + int32_t axis_val = -1; + const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + + OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), + NodeValidationFailure, + HasSubstr("Scale input shape must be broadcastable to the shape of the data input")); +} diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp index 688ab6a72c0c22..fd7a141cc85487 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp @@ -73,6 +73,9 @@ inline ov::element::Type convert_to_supported_device_type(ov::element::Type et) } } +bool is_supported(ov::element::Type_t et); +bool data_types_are_supported(const ov::Node* node); + using PrecisionMap = std::map; std::vector get_output_data_types(const ov::Node* op, PrecisionMap precision_map = {}); diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp index 0931e9310145c4..5cede62fd17e69 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp @@ -80,8 +80,7 @@ struct PerfCounter { class ProgramBuilder final { public: - ProgramBuilder(std::shared_ptr model, cldnn::engine& engine, const ExecutionConfig& config, - bool createTopologyOnly = false, bool partialBuild = false, + ProgramBuilder(std::shared_ptr model, cldnn::engine& engine, const ExecutionConfig& config, bool partialBuild = false, std::shared_ptr task_executor = nullptr, std::shared_ptr compilation_context = nullptr, bool innerProgram = false); @@ -174,10 +173,9 @@ class ProgramBuilder final { void cleanup_build(); // TODO(eunsoo): remove createTopolpgyOnly argument and add another method to create topology from ngraph function - std::shared_ptr build(const std::vector>& ops, - bool createTopologyOnly = false, bool partialBuild = false, bool innerProgram = false); + std::shared_ptr build(const std::vector>& ops, bool partialBuild = false, bool innerProgram = false); - void CreateSingleLayerPrimitive(cldnn::topology& topology, const std::shared_ptr& op); + void CreateSingleLayerPrimitive(const std::shared_ptr& op); }; void CreateCustomOp(ProgramBuilder& p, const std::shared_ptr& node, CustomLayerPtr customLayer); @@ -189,8 +187,6 @@ void CreateElementwiseOp(ProgramBuilder& p, std::vector coefficients = {}, bool pythondiv = true); -bool IsNodeOnConstPath(const std::shared_ptr& node); - void validate_inputs_count(const std::shared_ptr& op, std::vector possible_inputs_count); inline bool ends_with(const std::string& value, const std::string& suffix) { diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp index da847d5d2504bc..f7f173ca5282e5 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_runtime_skippable_nodes.cpp @@ -37,6 +37,8 @@ void mark_runtime_skippable_nodes::run(program& p) { || impl_params->get_input_layout(0).get_partial_shape()[axis] == impl_params->get_input_layout(1).get_partial_shape()[0]) { // May be skipepd node.can_be_optimized(true); + // Set runtime skippable only when the node is set as can_be_optimized finally. + node.set_runtime_skippable(true); GPU_DEBUG_TRACE_DETAIL << "[mark_runtime_skippable_nodes] : " << node.id() << " can_be_optimized" << std::endl; } }); @@ -56,6 +58,8 @@ void mark_runtime_skippable_nodes::run(program& p) { if (node.have_user_with_type() && node.get_users().size() == 1) return; node.can_be_optimized(true); + // Set runtime skippable only when the node is set as can_be_optimized finally. + node.set_runtime_skippable(true); GPU_DEBUG_TRACE_DETAIL << "[mark_runtime_skippable_nodes] : " << node.id() << " can_be_optimized" << std::endl; } }); @@ -94,6 +98,8 @@ void mark_runtime_skippable_nodes::run(program& p) { if (!end.empty() && !is_valid) return; node.can_be_optimized(true); + // Set runtime skippable only when the node is set as can_be_optimized finally. + node.set_runtime_skippable(true); GPU_DEBUG_TRACE_DETAIL << "[mark_runtime_skippable_nodes] : " << node.id() << " can_be_optimized" << std::endl; }); program_helpers::do_for_types(*node, [](broadcast_node& node){ @@ -132,6 +138,8 @@ void mark_runtime_skippable_nodes::run(program& p) { } node.can_be_optimized(true); + // Set runtime skippable only when the node is set as can_be_optimized finally. + node.set_runtime_skippable(true); GPU_DEBUG_TRACE_DETAIL << "[mark_runtime_skippable_nodes] : " << node.id() << " can_be_optimized" << std::endl; } }); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp index 622ce9c05061c6..99a2b3c6f857f9 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp @@ -242,7 +242,10 @@ void prepare_padding::run(program& p) { } auto& input = node.get_dependency(0); - if (node.get_preferred_impl_type() == impl_types::ocl && input.is_type()) { + // WA to add reorder between MVN and Conv because Conv need input data with padding but MVN opt kernel with default format does not support padding. + // TODO: MVN opt kernel should support padding. + if (node.get_preferred_impl_type() == impl_types::ocl && input.is_type() + && format::is_default_format(input.get_output_layout().format)) { // check the allowed format to avoid perf drop by unnecessary reorder addition. auto new_reorder = std::make_shared(node.id() + "_padding_reorder_for_" + input.id(), input.id(), input.get_output_layout()); auto& new_reorder_node = p.get_or_create(new_reorder); p.add_intermediate(new_reorder_node, node, input); diff --git a/src/plugins/intel_gpu/src/graph/include/program_node.h b/src/plugins/intel_gpu/src/graph/include/program_node.h index d133d92e77b72a..6161d04ed26ab1 100644 --- a/src/plugins/intel_gpu/src/graph/include/program_node.h +++ b/src/plugins/intel_gpu/src/graph/include/program_node.h @@ -301,6 +301,10 @@ struct program_node { bool can_be_optimized() const { return optimized; } void can_be_optimized(bool opt) { optimized = opt; } + // check/set if the node is runtime skippable + bool is_runtime_skippable() const { return runtime_skippable; } + void set_runtime_skippable(bool skippable) { runtime_skippable = skippable; } + // check/set if the node's buffer can be shared during the memory pool optimization bool can_share_buffer() const { return share_buffer; } void can_share_buffer(bool share) { share_buffer = share; } @@ -484,6 +488,7 @@ struct program_node { bool constant = false; bool data_flow = false; bool in_shape_of_subgraph = false; + bool runtime_skippable = false; std::set dependant_shape_of_nodes; diff --git a/src/plugins/intel_gpu/src/graph/include/reorder_inst.h b/src/plugins/intel_gpu/src/graph/include/reorder_inst.h index 8e91957c5192dd..9226510cb34520 100644 --- a/src/plugins/intel_gpu/src/graph/include/reorder_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/reorder_inst.h @@ -27,6 +27,7 @@ struct typed_program_node : public typed_program_node_base { public: typed_program_node(const std::shared_ptr prim, program& prog) : parent(prim, prog) { support_padding_all(true); + set_runtime_skippable(true); } program_node& mean_nv12() const { return get_dependency(2); } diff --git a/src/plugins/intel_gpu/src/graph/include/reshape_inst.h b/src/plugins/intel_gpu/src/graph/include/reshape_inst.h index fda3975d2e691a..166a1cb5d3b734 100644 --- a/src/plugins/intel_gpu/src/graph/include/reshape_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/reshape_inst.h @@ -20,6 +20,7 @@ struct typed_program_node : public typed_program_node_base { using parent = typed_program_node_base; typed_program_node(const std::shared_ptr prim, program& prog) : parent(prim, prog) { support_padding_all(true); + set_runtime_skippable(true); } public: diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 56231751990908..6dda7d9147f238 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -103,11 +103,9 @@ bool is_user_cpu(const program_node* user) { return true; } } - // TODO : refactor these as runtime_skippable_nodes - // If the user is dynamic && runtime skippable gather or strided slice, we still need to its parents' completion + // If the user is dynamic and runtime skippable node, we still need to its parents' completion // event even though the user's program_node is can_be_optimized - if (!user->is_dynamic() || (!user->is_type() && !user->is_type() && - !user->is_type() && !user->is_type())) + if (!user->is_dynamic() || (!user->is_runtime_skippable())) return false; } bool is_cpu = user->get_selected_impl() ? user->get_selected_impl()->is_cpu() @@ -541,8 +539,7 @@ event::ptr primitive_inst::realloc_if_needed() { } // Clear out memory if if was previously reused, but now primitive can't be optimized - if (_node->is_type() || _node->is_type() || _node->is_type() || _node->is_type() || - _node->is_type() || _node->is_type()) { + if (_node->is_runtime_skippable()) { if (can_be_optimized()) { _max_output_layout_count = _deps[0].first->_max_output_layout_count; GPU_DEBUG_PROFILED_STAGE_MEMALLOC_INFO("can_be_optimized"); diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/strided_slice_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/strided_slice_ref.cl index eff062e449f097..088e7f6364c469 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/strided_slice_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/strided_slice_ref.cl @@ -289,33 +289,62 @@ KERNEL(strided_slice_ref)(OPTIONAL_SHAPE_INFO_ARG #if NEW_AXIS_MODE // If NEW_AXIS_MODE that just copy input to output -#ifdef OUTPUT_LAYOUT_BFYX +#ifdef INPUT0_LAYOUT_BFYX + const uint index_in_batch = (feature * (uint)get_global_size(2) + (uint)get_global_id(2)) % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint input_feature_id = (feature * (uint)get_global_size(2) + (uint)get_global_id(2)) / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); const uint w_input = 0; const uint z_input = 0; - const uint y_input = (uint)get_global_id(2) / INPUT0_SIZE_X; - const uint x_input = (uint)get_global_id(2) % INPUT0_SIZE_X; -#elif OUTPUT_LAYOUT_BFZYX + const uint y_input = index_in_batch / OUTPUT_SIZE_X; + const uint x_input = index_in_batch % OUTPUT_SIZE_X; +#elif INPUT0_LAYOUT_BFZYX + const uint index_in_batch = (feature * (uint)get_global_size(2) + (uint)get_global_id(2)) % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z); + const uint input_feature_id = (feature * (uint)get_global_size(2) + (uint)get_global_id(2)) / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z); const uint w_input = 0; - const uint yx_input = (uint)get_global_id(2) % (INPUT0_SIZE_X * INPUT0_SIZE_Y); - const uint z_input = (uint)get_global_id(2) / (INPUT0_SIZE_X * INPUT0_SIZE_Y); - const uint y_input = yx_input / INPUT0_SIZE_X; - const uint x_input = yx_input % INPUT0_SIZE_X; -#elif OUTPUT_LAYOUT_BFWZYX - const uint zyx_input = (uint)get_global_id(2) % (INPUT0_SIZE_X * INPUT0_SIZE_Y * INPUT0_SIZE_Z); - const uint w_input = (uint)get_global_id(2) / (INPUT0_SIZE_X * INPUT0_SIZE_Y * INPUT0_SIZE_Z); - const uint z_input = zyx_input / (INPUT0_SIZE_X * INPUT0_SIZE_Y); - const uint yx_input = zyx_input % (INPUT0_SIZE_X * INPUT0_SIZE_Y); - const uint y_input = yx_input / INPUT0_SIZE_X; - const uint x_input = yx_input % INPUT0_SIZE_X; + const uint yx_input = index_in_batch % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint z_input = index_in_batch / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint y_input = yx_input / OUTPUT_SIZE_X; + const uint x_input = yx_input % OUTPUT_SIZE_X; +#elif INPUT0_LAYOUT_BFWZYX + const uint index_in_batch = (feature * (uint)get_global_size(2) + (uint)get_global_id(2)) % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z * OUTPUT_SIZE_W); + const uint input_feature_id = (feature * (uint)get_global_size(2) + (uint)get_global_id(2)) / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z * OUTPUT_SIZE_W); + const uint zyx_input = index_in_batch % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z); + const uint w_input = index_in_batch / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z); + const uint z_input = zyx_input / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint yx_input = zyx_input % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint y_input = yx_input / OUTPUT_SIZE_X; + const uint x_input = yx_input % OUTPUT_SIZE_X; #endif + const uint input_index = INPUT0_OFFSET + batch * INPUT0_BATCH_PITCH + - feature * INPUT0_FEATURE_PITCH + - w_input * INPUT0_W_PITCH + - z_input * INPUT0_Z_PITCH + - y_input * INPUT0_Y_PITCH + - x_input * INPUT0_X_PITCH; - output[input_index] = input[input_index]; + input_feature_id * INPUT0_FEATURE_PITCH + + w_input * OUTPUT_W_PITCH + + z_input * OUTPUT_Z_PITCH + + y_input * OUTPUT_Y_PITCH + + x_input * OUTPUT_X_PITCH; + +#ifdef OUTPUT_LAYOUT_BFYX + const uint y = (uint)get_global_id(2) / OUTPUT_SIZE_X; + const uint x = (uint)get_global_id(2) % OUTPUT_SIZE_X; + const uint output_index = OUTPUT_GET_INDEX(batch, feature, y, x); +#elif OUTPUT_LAYOUT_BFZYX + const uint yx = (uint)get_global_id(2) % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint z = (uint)get_global_id(2) / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint y = yx / OUTPUT_SIZE_X; + const uint x = yx % OUTPUT_SIZE_X; + const uint output_index = OUTPUT_GET_INDEX(batch, feature, z, y, x); +#elif OUTPUT_LAYOUT_BFWZYX + const uint zyx = (uint)get_global_id(2) % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z); + const uint w = (uint)get_global_id(2) / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y * OUTPUT_SIZE_Z); + const uint z = zyx / (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint yx = zyx % (OUTPUT_SIZE_X * OUTPUT_SIZE_Y); + const uint y = yx / OUTPUT_SIZE_X; + const uint x = yx % OUTPUT_SIZE_X; + const uint output_index = OUTPUT_GET_INDEX(batch, feature, w, z, y, x); +#endif + + output[output_index] = input[input_index]; + #else // NEW_AXIS_MODE #ifdef OUTPUT_LAYOUT_BFYX const uint w = 0; diff --git a/src/plugins/intel_gpu/src/plugin/common_utils.cpp b/src/plugins/intel_gpu/src/plugin/common_utils.cpp index 8c7d45bff6208f..6064b70e6f07d6 100644 --- a/src/plugins/intel_gpu/src/plugin/common_utils.cpp +++ b/src/plugins/intel_gpu/src/plugin/common_utils.cpp @@ -94,6 +94,53 @@ void convert_and_copy(const void* src_ptr, ov::element::Type src_et, void* dst_p namespace ov { namespace intel_gpu { +bool is_supported(ov::element::Type_t et) { + switch (et) { + case ov::element::Type_t::undefined: return true; + case ov::element::Type_t::dynamic: return false; + case ov::element::Type_t::boolean: return true; // converted to u8 + case ov::element::Type_t::bf16: return false; + case ov::element::Type_t::f16: return true; + case ov::element::Type_t::f32: return true; + case ov::element::Type_t::f64: return true; // converted to inference precision + case ov::element::Type_t::i4: return true; + case ov::element::Type_t::i8: return true; + case ov::element::Type_t::i16: return false; + case ov::element::Type_t::i32: return true; + case ov::element::Type_t::i64: return true; // converted to i32 + case ov::element::Type_t::u1: return true; + case ov::element::Type_t::u2: return false; + case ov::element::Type_t::u3: return false; + case ov::element::Type_t::u4: return true; + case ov::element::Type_t::u6: return true; + case ov::element::Type_t::u8: return true; + case ov::element::Type_t::u16: return true; // converted to i32 + case ov::element::Type_t::u32: return true; // converted to i32 + case ov::element::Type_t::u64: return true; // converted to i32 + case ov::element::Type_t::nf4: return false; + case ov::element::Type_t::f8e4m3: return false; + case ov::element::Type_t::f8e5m2: return false; + case ov::element::Type_t::string: return false; + default: return false; + } + + return false; +} + +bool data_types_are_supported(const ov::Node* node) { + for (size_t i = 0; i < node->get_input_size(); i++) { + if (!is_supported(node->get_input_element_type(i))) + return false; + } + + for (size_t i = 0; i < node->get_output_size(); i++) { + if (!is_supported(node->get_output_element_type(i))) + return false; + } + + return true; +} + void convert_and_copy(const ov::ITensor* src, cldnn::memory::ptr dst, cldnn::stream& stream) { const bool blocking = true; auto src_et = src->get_element_type(); diff --git a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp index bfaea6ea279c04..52fa7f03e14da0 100644 --- a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp +++ b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp @@ -2,28 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "openvino/pass/serialize.hpp" #include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/intel_gpu/properties.hpp" #include "openvino/runtime/internal_properties.hpp" -#include "openvino/util/common_util.hpp" #include "intel_gpu/graph/serialization/binary_buffer.hpp" -#include "intel_gpu/graph/serialization/layout_serializer.hpp" -#include "intel_gpu/graph/serialization/string_serializer.hpp" -#include "intel_gpu/graph/serialization/utils.hpp" -#include "intel_gpu/graph/serialization/vector_serializer.hpp" #include "intel_gpu/runtime/itt.hpp" #include "intel_gpu/plugin/graph.hpp" #include "intel_gpu/plugin/compiled_model.hpp" #include "intel_gpu/plugin/async_infer_request.hpp" -#include -#include #include -#include -#include -#include namespace ov { namespace intel_gpu { diff --git a/src/plugins/intel_gpu/src/plugin/graph.cpp b/src/plugins/intel_gpu/src/plugin/graph.cpp index 852a75f0ef4cbd..cc35d024322538 100644 --- a/src/plugins/intel_gpu/src/plugin/graph.cpp +++ b/src/plugins/intel_gpu/src/plugin/graph.cpp @@ -39,7 +39,7 @@ Graph::Graph(std::shared_ptr model, const RemoteContextImpl::Ptr& con : m_context(context) , m_config(config) , m_stream_id(stream_id) { - auto program_builder = std::make_shared(model, get_engine(), config, false, false); + auto program_builder = std::make_shared(model, get_engine(), config, false); m_config = program_builder->get_config(); build(program_builder->get_compiled_program()); diff --git a/src/plugins/intel_gpu/src/plugin/ops/condition.cpp b/src/plugins/intel_gpu/src/plugin/ops/condition.cpp index ba238e111c70d1..4b7b3748d6e69d 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/condition.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/condition.cpp @@ -31,7 +31,7 @@ static cldnn::condition::branch gen_branch(ProgramBuilder& p, const std::shared_ config.set_property(ov::intel_gpu::max_dynamic_batch(1)); config.set_property(ov::intel_gpu::allow_new_shape_infer(op->is_dynamic() || p.use_new_shape_infer())); - ProgramBuilder prog(internal_body, p.get_engine(), config, false, false, p.get_task_executor(), p.get_compilation_context(), true); + ProgramBuilder prog(internal_body, p.get_engine(), config, false, p.get_task_executor(), p.get_compilation_context(), true); branch.inner_program = prog.get_compiled_program(); auto& input_map = branch.input_map; diff --git a/src/plugins/intel_gpu/src/plugin/ops/loop.cpp b/src/plugins/intel_gpu/src/plugin/ops/loop.cpp index d280be79adab2d..21314503caa055 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/loop.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/loop.cpp @@ -297,7 +297,7 @@ static void CreateCommonLoopOp(ProgramBuilder& p, const std::shared_ptr(cloned_model, engine, config, false, true); + program = std::make_shared(cloned_model, engine, config, true); std::pair device_memory_usage = program->get_compiled_program()->get_estimated_device_mem_usage(); if (device_memory_usage.first == static_cast(-1L) && device_memory_usage.second == static_cast(-1L)) { return static_cast(max_batch_size); diff --git a/src/plugins/intel_gpu/src/plugin/program_builder.cpp b/src/plugins/intel_gpu/src/plugin/program_builder.cpp index 499744f8d0eacc..2d09e3ffba0b81 100644 --- a/src/plugins/intel_gpu/src/plugin/program_builder.cpp +++ b/src/plugins/intel_gpu/src/plugin/program_builder.cpp @@ -8,6 +8,7 @@ #include "openvino/op/lstm_cell.hpp" #include "openvino/op/loop.hpp" +#include "intel_gpu/plugin/common_utils.hpp" #include "intel_gpu/plugin/program_builder.hpp" #include "intel_gpu/runtime/itt.hpp" #include "intel_gpu/runtime/debug_configuration.hpp" @@ -54,7 +55,7 @@ std::string layer_type_name_ID(const std::shared_ptr& op) { } ProgramBuilder::ProgramBuilder(std::shared_ptr model, cldnn::engine& engine, const ExecutionConfig& config, - bool create_topology_only, bool partial_build, + bool partial_build, std::shared_ptr task_executor, std::shared_ptr compilation_context, bool is_inner_program) @@ -103,7 +104,7 @@ ProgramBuilder::ProgramBuilder(std::shared_ptr model, cldnn::engine& auto ops = model->get_ordered_ops(); - m_program = build(ops, create_topology_only, partial_build, is_inner_program); + m_program = build(ops, partial_build, is_inner_program); } ProgramBuilder::ProgramBuilder(cldnn::engine& engine, const ExecutionConfig& config) @@ -133,8 +134,7 @@ void ProgramBuilder::cleanup_build() { #endif } -std::shared_ptr ProgramBuilder::build(const std::vector>& ops, - bool create_topology_only, bool partial_build, bool is_inner_program) { +std::shared_ptr ProgramBuilder::build(const std::vector>& ops, bool partial_build, bool is_inner_program) { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::build"); // In the case of inner program, allow_new_shape_infer flag is setted by outside of program. // So, do not check allow_new_shape_infer for inner program build @@ -157,35 +157,31 @@ std::shared_ptr ProgramBuilder::build(const std::vector& op) { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::is_op_supported"); - cldnn::topology topology; try { // Query mode disables checks that input primitives are created, // as is_op_supported method is called for each operation separately @@ -198,8 +194,11 @@ bool ProgramBuilder::is_op_supported(const std::shared_ptr& op) { // 2. We also check parameters of each operation, which means we have more // reliable results of QueryNetwork call. prepare_build(); + if (!data_types_are_supported(op.get())) + return false; + allow_new_shape_infer = requires_new_shape_infer(op); - CreateSingleLayerPrimitive(topology, op); + CreateSingleLayerPrimitive(op); cleanup_build(); DisableQueryMode(); } catch (std::exception&) { @@ -211,7 +210,7 @@ bool ProgramBuilder::is_op_supported(const std::shared_ptr& op) { return true; } -void ProgramBuilder::CreateSingleLayerPrimitive(cldnn::topology& topology, const std::shared_ptr& op) { +void ProgramBuilder::CreateSingleLayerPrimitive(const std::shared_ptr& op) { OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "ProgramBuilder::CreateSingleLayerPrimitive"); GPU_DEBUG_LOG << "Process " << "op::" << op->get_type_info().version_id << "::" << op->get_type_name() << " operation " << "(friendly_name=" << op->get_friendly_name() << ")" << std::endl; @@ -369,28 +368,6 @@ int64_t ProgramBuilder::get_result_index(const ov::Output& value return m_model->get_result_index(value); } -// TODO: Does it make sense to add such method to ov core? -bool IsNodeOnConstPath(const std::shared_ptr& node) { - std::set> nodes_processed = {}; - std::function&)> is_const_node = [&nodes_processed, &is_const_node](const std::shared_ptr& node) { - if (nodes_processed.count(node)) return true; - nodes_processed.insert(node); - // If input is constant, then drop it from the processing list - if (std::dynamic_pointer_cast(node) != nullptr) - return true; - // If the node doesn't have any parents and it's not a constant, then we deal with dynamic path - if (node->get_input_size() == 0) - return false; - for (size_t i = 0; i < node->get_input_size(); i++) { - auto input_node = node->get_input_node_shared_ptr(i); - if (!is_const_node(input_node)) - return false; - } - return true; - }; - return is_const_node(node); -} - void validate_inputs_count(const std::shared_ptr& op, std::vector valid_inputs_count) { for (auto ic : valid_inputs_count) { if (op->get_input_size() == ic) { diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index 3018a4f0b965f5..dba2809c7b1019 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -6,8 +6,6 @@ #include "openvino/core/preprocess/input_tensor_info.hpp" #include "openvino/core/parallel.hpp" #include "openvino/core/validation_util.hpp" -#include "openvino/op/util/op_types.hpp" -#include "transformations/utils/utils.hpp" #include "intel_gpu/primitives/kv_cache.hpp" #include "intel_gpu/plugin/usm_host_tensor.hpp" diff --git a/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp b/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp index ebc4d20b3c1844..5ffe32f7a2dfdf 100644 --- a/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp +++ b/src/plugins/intel_gpu/src/plugin/usm_host_tensor.cpp @@ -5,7 +5,6 @@ #include "intel_gpu/plugin/usm_host_tensor.hpp" #include "intel_gpu/plugin/remote_tensor.hpp" #include "intel_gpu/plugin/remote_context.hpp" -#include "openvino/runtime/make_tensor.hpp" #include namespace ov { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 516e1a54b6792e..d705eb52414b13 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -205,8 +205,6 @@ std::vector disabledTestPatterns() { R"(.*smoke_ConvolutionBackpropData3D_AutoPadding_OutputPaddingDefined/ConvolutionBackpropDataLayerTest.Inference/.*_TS=\{\(1.16.5.5.5\)\}_.*_netPRC=f16_.*)", R"(.*smoke_PSROIPooling_average/PSROIPoolingLayerTest.Inference/IS=\(3.8.16.16\)_coord_shape=\(10.5\)_out_dim=2_group_size=2_scale=(0.625|1)_bins_x=1_bins_y=1_mode=average_modelType=f16.*)", R"(.*smoke_RDFT_5d_last_axis/RDFTLayerTest.Inference/IS=\(10.4.8.2.5\)_modelType=f32_Axes=\(0.1.2.3.4\)_SignalSize=\(\).*)", - // Issue: 129991 - R"(.*StridedSliceLayerTest.*TS=.*2.2.4.1*.*)", // Issue: 136862 R"(.*smoke_ConditionGPUTest_static/StaticConditionLayerGPUTest.CompareWithRefs/IS=\(3.6\)_netPRC=i8_ifCond=PARAM_targetDevice=GPU_.*)", #if defined(_WIN32) diff --git a/src/plugins/template/src/plugin.cpp b/src/plugins/template/src/plugin.cpp index b612e2d68d6546..b87dbdf618dfe5 100644 --- a/src/plugins/template/src/plugin.cpp +++ b/src/plugins/template/src/plugin.cpp @@ -215,6 +215,7 @@ ov::SupportedOpsMap ov::template_plugin::Plugin::query_model(const std::shared_p #include "openvino/opsets/opset12_tbl.hpp" #include "openvino/opsets/opset13_tbl.hpp" #include "openvino/opsets/opset14_tbl.hpp" +#include "openvino/opsets/opset15_tbl.hpp" // clang-format on #undef _OPENVINO_OP_REG return op_super_set.contains_type(node->get_type_info()); diff --git a/src/plugins/template/tests/functional/op_reference/experimental_detectron_detection_output.cpp b/src/plugins/template/tests/functional/op_reference/experimental_detectron_detection_output.cpp index 29bac1c4af004e..07daa8373f1368 100644 --- a/src/plugins/template/tests/functional/op_reference/experimental_detectron_detection_output.cpp +++ b/src/plugins/template/tests/functional/op_reference/experimental_detectron_detection_output.cpp @@ -34,14 +34,16 @@ struct ExperimentalDOParams { deltasData(CreateTensor(iType, deltasValues)), scoresData(CreateTensor(iType, scoresValues)), imageSizeInfoData(CreateTensor(iType, imageSizeInfoValues)), - refBoxesData(CreateTensor(iType, refBoxesValues)), - refClassesData(CreateTensor(ov::element::i32, refClassesValues)), - refScoresData(CreateTensor(iType, refScoresValues)), testcaseName(testcaseName) { roisShape = Shape{num_rois, 4}; deltasShape = Shape{num_rois, static_cast(attrs.num_classes * 4)}; scoresShape = Shape{num_rois, static_cast(attrs.num_classes)}; imageSizeInfoShape = Shape{1, 3}; + + const auto max_d = attrs.max_detections_per_image; + refBoxesData = CreateTensor(Shape{max_d, 4}, iType, refBoxesValues); + refClassesData = CreateTensor(Shape{max_d}, ov::element::i32, refClassesValues); + refScoresData = CreateTensor(Shape{max_d}, iType, refScoresValues); } Attrs attrs; @@ -65,14 +67,13 @@ class ReferenceExperimentalDOLayerTest : public testing::TestWithParam& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "roisShape=" << param.roisShape << "_"; result << "deltasShape=" << param.deltasShape << "_"; diff --git a/src/plugins/template/tests/functional/op_reference/experimental_detectron_generate_proposals.cpp b/src/plugins/template/tests/functional/op_reference/experimental_detectron_generate_proposals.cpp index 3b8fc5ba1f8c64..aa4d50a0c0358e 100644 --- a/src/plugins/template/tests/functional/op_reference/experimental_detectron_generate_proposals.cpp +++ b/src/plugins/template/tests/functional/op_reference/experimental_detectron_generate_proposals.cpp @@ -35,13 +35,15 @@ struct ExperimentalGPParams { anchorsData(CreateTensor(iType, anchorsValues)), deltasData(CreateTensor(iType, deltasValues)), scoresData(CreateTensor(iType, scoresValues)), - refRoisData(CreateTensor(iType, refRoisValues)), - refScoresData(CreateTensor(iType, refScoresValues)), testcaseName(testcaseName) { imageSizeInfoShape = Shape{3}; anchorsShape = Shape{height * width * number_of_channels, 4}; deltasShape = Shape{number_of_channels * 4, height, width}; scoresShape = Shape{number_of_channels, height, width}; + + const auto post_nms = static_cast(attrs.post_nms_count); + refRoisData = CreateTensor(Shape{post_nms, 4}, iType, refRoisValues); + refScoresData = CreateTensor(Shape{post_nms}, iType, refScoresValues); } Attrs attrs; @@ -64,14 +66,13 @@ class ReferenceExperimentalGPLayerTest : public testing::TestWithParam& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "imageSizeInfoShape=" << param.imageSizeInfoShape << "_"; result << "anchorsShape=" << param.anchorsShape << "_"; diff --git a/src/plugins/template/tests/functional/op_reference/generate_proposals.cpp b/src/plugins/template/tests/functional/op_reference/generate_proposals.cpp index 61e32387b8bf7a..52aa2a98b71785 100644 --- a/src/plugins/template/tests/functional/op_reference/generate_proposals.cpp +++ b/src/plugins/template/tests/functional/op_reference/generate_proposals.cpp @@ -39,14 +39,16 @@ struct GPParams { anchorsData(CreateTensor(iType, anchorsValues)), deltasData(CreateTensor(iType, deltasValues)), scoresData(CreateTensor(iType, scoresValues)), - refRoisData(CreateTensor(iType, refRoisValues)), - refScoresData(CreateTensor(iType, refScoresValues)), - refRoiNumData(CreateTensor(roiNumType, refRoiNumValues)), testcaseName(testcaseName) { imageSizeInfoShape = Shape{batch, 3}; anchorsShape = Shape{height, width, number_of_channels, 4}; deltasShape = Shape{batch, number_of_channels * 4, height, width}; scoresShape = Shape{batch, number_of_channels, height, width}; + + const auto number_of_rois = refScoresValues.size(); + refRoisData = CreateTensor(Shape{number_of_rois, 4}, iType, refRoisValues); + refScoresData = CreateTensor(Shape{number_of_rois}, iType, refScoresValues); + refRoiNumData = CreateTensor(Shape{batch}, roiNumType, refRoiNumValues); } Attrs attrs; @@ -70,14 +72,13 @@ struct GPParams { class ReferenceGPLayerTest : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - legacy_compare = true; - auto params = GetParam(); + const auto& params = GetParam(); function = CreateFunction(params); inputData = {params.imageSizeInfoData, params.anchorsData, params.deltasData, params.scoresData}; refOutData = {params.refRoisData, params.refScoresData, params.refRoiNumData}; } static std::string getTestCaseName(const testing::TestParamInfo& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "imageSizeInfoShape=" << param.imageSizeInfoShape << "_"; result << "anchorsShape=" << param.anchorsShape << "_"; diff --git a/src/plugins/template/tests/functional/op_reference/grn.cpp b/src/plugins/template/tests/functional/op_reference/grn.cpp index f5a31eb2c97a1c..19e91b6485d522 100644 --- a/src/plugins/template/tests/functional/op_reference/grn.cpp +++ b/src/plugins/template/tests/functional/op_reference/grn.cpp @@ -23,8 +23,8 @@ struct GrnParams { pshape(shape), inType(iType), outType(iType), - inputData(CreateTensor(iType, iValues)), - refData(CreateTensor(iType, oValues)) {} + inputData(CreateTensor(pshape.get_shape(), iType, iValues)), + refData(CreateTensor(pshape.get_shape(), iType, oValues)) {} float bias; PartialShape pshape; element::Type inType; @@ -36,14 +36,13 @@ struct GrnParams { class ReferenceGrnLayerTest : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - legacy_compare = true; - auto params = GetParam(); + const auto& params = GetParam(); function = CreateFunction(params.bias, params.pshape, params.inType); inputData = {params.inputData}; refOutData = {params.refData}; } static std::string getTestCaseName(const testing::TestParamInfo& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "bias=" << param.bias << "_"; result << "shape=" << param.pshape << "_"; diff --git a/src/plugins/template/tests/functional/op_reference/nonzero.cpp b/src/plugins/template/tests/functional/op_reference/nonzero.cpp index 0f2cafcd820e00..30b3e036676a8f 100644 --- a/src/plugins/template/tests/functional/op_reference/nonzero.cpp +++ b/src/plugins/template/tests/functional/op_reference/nonzero.cpp @@ -27,8 +27,11 @@ struct NonZeroParams { inType(inType), refType(refType), inputData(CreateTensor(inType, inputData)), - refData(CreateTensor(refType, refData)), - testcaseName(test_name) {} + testcaseName(test_name) { + const auto input_rank = inputShape.get_shape().size(); + const auto non_zero_num = refData.size() / input_rank; + this->refData = CreateTensor(Shape{input_rank, non_zero_num}, refType, refData); + } PartialShape dynamicShape; PartialShape inputShape; @@ -42,15 +45,14 @@ struct NonZeroParams { class ReferenceNonZeroLayerTest : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - legacy_compare = true; - auto params = GetParam(); + const auto& params = GetParam(); function = CreateFunction(params.dynamicShape, params.inType, params.refType); inputData = {params.inputData}; refOutData = {params.refData}; } static std::string getTestCaseName(const testing::TestParamInfo& obj) { - auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "dShape=" << param.dynamicShape << "_"; result << "iShape=" << param.inputShape << "_"; diff --git a/src/plugins/template/tests/functional/op_reference/reshape.cpp b/src/plugins/template/tests/functional/op_reference/reshape.cpp index 861de521d2706e..68a39c1c9229ec 100644 --- a/src/plugins/template/tests/functional/op_reference/reshape.cpp +++ b/src/plugins/template/tests/functional/op_reference/reshape.cpp @@ -28,10 +28,8 @@ struct ReshapeParams { m_input_type = input_type; m_expected_type = expected_type; m_zero_flag = zero_flag; - m_input_value = input_shape.size() > 0 ? CreateTensor(input_shape, input_type, input_value) - : CreateTensor(input_type, input_value); - m_expected_value = expected_shape.size() > 0 ? CreateTensor(expected_shape, expected_type, expected_value) - : CreateTensor(expected_type, expected_value); + m_input_value = CreateTensor(input_shape, input_type, input_value); + m_expected_value = CreateTensor(expected_shape, expected_type, expected_value); } template @@ -105,8 +103,7 @@ struct ReshapeShuffleParams { class ReferenceReshapeLayerTest : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - legacy_compare = true; - const auto params = GetParam(); + const auto& params = GetParam(); function = CreateFunction(params.m_input_type, params.m_expected_type, params.m_input_shape, @@ -117,7 +114,7 @@ class ReferenceReshapeLayerTest : public testing::TestWithParam, } static std::string getTestCaseName(const testing::TestParamInfo& obj) { - const auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "input_shape=" << param.m_input_shape << "; "; @@ -147,8 +144,7 @@ class ReferenceReshapeShuffleLayerTest : public testing::TestWithParam& obj) { - const auto param = obj.param; + const auto& param = obj.param; std::ostringstream result; result << "input_shape=" << param.m_input_shape1 << "; "; diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp index b8fb8dd80c2a10..d8920e6309cc05 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp @@ -2114,6 +2114,7 @@ OpGenerator getOpGeneratorMap() { #include "openvino/opsets/opset12_tbl.hpp" #include "openvino/opsets/opset13_tbl.hpp" #include "openvino/opsets/opset14_tbl.hpp" +#include "openvino/opsets/opset15_tbl.hpp" #undef _OPENVINO_OP_REG }; return opGeneratorMap; diff --git a/tests/layer_tests/pytorch_tests/test_aminmax.py b/tests/layer_tests/pytorch_tests/test_aminmax.py new file mode 100644 index 00000000000000..e8b31543b5dd9c --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_aminmax.py @@ -0,0 +1,60 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + +class TestAminMax(PytorchLayerTest): + def _prepare_input(self, inputs, dtype=None): + import numpy as np + return [np.array(inputs).astype(dtype)] + + def create_model(self, dtype=None, dim=None, keepdim=False): + dtype_map = { + "float32": torch.float32, + "float64": torch.float64, + "int32": torch.int32, + "int64": torch.int64, + } + + dtype = dtype_map.get(dtype) + + class aten_aminmax(torch.nn.Module): + def __init__(self, dtype, dim, keepdim): + super().__init__() + self.dtype = dtype + self.dim = dim + self.keepdim = keepdim + + def forward(self, x): + return torch.aminmax(x.to(self.dtype), dim=self.dim, keepdim=self.keepdim, out=None) + + model_class = aten_aminmax(dtype, dim, keepdim) + + ref_net = None + + return model_class, ref_net, "aten::aminmax" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("dtype", ["float32", "float64", "int32", "int64"]) + @pytest.mark.parametrize("inputs", [[0, 1, 2, 3, 4, -1], + [-2, -1, 0, 1, 2, 3], + [1, 2, 3, 4, 5, 6]]) + @pytest.mark.parametrize("dim,keepdim", [(None, False), # Test with default arguments + (0, False), # Test with dim provided and keepdim=False + (0, True), # Test with dim provided and keepdim=True + (None, True)]) # Test with keepdim=True and dim not provided + def test_aminmax(self, dtype, inputs, ie_device, + precision, ir_version, dim, keepdim): + self._test( + *self.create_model(dtype=dtype, dim=dim, keepdim=keepdim), + ie_device, + precision, + ir_version, + trace_model=True, + freeze_model=False, + kwargs_to_prepare_input={"inputs": inputs, "dtype": dtype} + ) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py index ce02288fa94e51..a5756b248f1a4a 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activation.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -44,8 +44,8 @@ def create_keras_activation_net(self, activation_func, input_names, input_shapes input_type=tf.float32), dict(activation_func="sigmoid", input_names=["x1"], input_shapes=[[5, 4, 8, 3]], input_type=tf.float32), - pytest.param(dict(activation_func="softmax", input_names=["x1"], input_shapes=[[5, 4, 8]], - input_type=tf.float32), marks=pytest.mark.precommit_tf_fe), + dict(activation_func="softmax", input_names=["x1"], input_shapes=[[5, 4, 8]], + input_type=tf.float32), dict(activation_func="softsign", input_names=["x1"], input_shapes=[[5, 4]], input_type=tf.float32), dict(activation_func="swish", input_names=["x1"], input_shapes=[[5, 4, 8]], @@ -57,9 +57,8 @@ def create_keras_activation_net(self, activation_func, input_names, input_shapes dict(activation_func="relu", input_names=["x1"], input_shapes=[[1]], input_type=tf.float32), dict(activation_func="swish", input_names=["x1"], input_shapes=[[5, 4, 8, 3, 4]], input_type=tf.float32), - - pytest.param(dict(activation_func="softplus", input_names=["x1"], input_shapes=[[5, 7, 6]], - input_type=tf.float32), marks=pytest.mark.xfail(reason="49516")), + dict(activation_func="softplus", input_names=["x1"], input_shapes=[[5, 7, 6]], + input_type=tf.float32), pytest.param(dict(activation_func="selu", input_names=["x1"], input_shapes=[[5, 7, 6]], input_type=tf.float32), marks=[pytest.mark.xfail(reason="49512"), pytest.mark.skip( @@ -71,6 +70,8 @@ def create_keras_activation_net(self, activation_func, input_names, input_shapes @pytest.mark.precommit def test_keras_activation_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + if params['activation_func'] == "swish": + pytest.skip("Error: failed due to missing a required argument: x1") self._test(*self.create_keras_activation_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py index 8c16d5ef50618b..b77377642c982d 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_activity_regularization.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -34,6 +34,7 @@ def create_keras_activity_regularization_net(self, l1_param, l2_param, input_nam def test_keras_activity_regularization_case1_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x1") self._test(*self.create_keras_activity_regularization_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py index 1ddc307eea3bf7..15b73d422bdfb4 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_add.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasAdd(CommonTF2LayerTest): @@ -27,62 +25,6 @@ def create_keras_add_net(self, input_names, input_shapes, input_type, ir_version # create reference IR net ref_net = None - op_name = "Add" - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - if len(input_names) == 2: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op': {'kind': 'op', 'type': op_name}, - 'op_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op', {'in': 0}), - ('input2_data', 'op', {'in': 1}), - ('op', 'op_data'), - ('op_data', 'result') - ]) - elif len(input_names) == 3: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op1': {'kind': 'op', 'type': op_name}, - 'op1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input3': {'kind': 'op', 'type': 'Parameter'}, - 'input3_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op2': {'kind': 'op', 'type': op_name}, - 'op2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op1', {'in': 0}), - ('input2_data', 'op1', {'in': 1}), - ('op1', 'op1_data'), - ('input3', 'input3_data'), - ('op1_data', 'op2', {'in': 0}), - ('input3_data', 'op2', {'in': 1}), - ('op2', 'op2_data'), - ('op2_data', 'result') - ]) - else: - AssertionError("Not supported case with input number greater 2") - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py index 0ffdacbd93b1f7..4d3efcd8b99e3d 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_alpha_dropout.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -30,6 +30,7 @@ def create_keras_alpha_dropout_net(self, rate, input_names, input_shapes, input_ @pytest.mark.precommit def test_keras_keras_alpha_dropout_case1_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x1") self._test(*self.create_keras_alpha_dropout_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py index 79a0b1238a1142..a772f75927ac34 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_avg_pool_2D.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -46,11 +46,12 @@ def test_keras_avg_pool_2D_float32(self, params, ie_device, precision, ir_versio dict(pool_size=(5, 5), strides=None, padding='same', data_format='channels_last', input_names=["x1"], input_shapes=[[3, 4, 5, 6]], input_type=tf.float32), - pytest.param(dict(pool_size=(5, 5), strides=(3, 3), padding='valid', data_format='channels_last', - input_names=["x1"], - input_shapes=[[3, 7, 6, 5]], input_type=tf.float32), marks=pytest.mark.precommit_tf_fe)] + dict(pool_size=(5, 5), strides=(3, 3), padding='valid', data_format='channels_last', + input_names=["x1"], + input_shapes=[[3, 7, 6, 5]], input_type=tf.float32)] @pytest.mark.parametrize("params", test_data_extended_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_avg_pool_2D_extended_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py index 8cce6df202c657..7b12c8e5c912d4 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_batch_normalization.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -32,7 +32,6 @@ def create_keras_batch_normalization_net(self, axis, momentum, epsilon, center, @pytest.mark.parametrize("params", test_data_float32) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.precommit_tf_fe def test_keras_batch_normalization_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): self._test(*self.create_keras_batch_normalization_net(**params, ir_version=ir_version), @@ -47,15 +46,16 @@ def test_keras_batch_normalization_float32(self, params, ie_device, precision, i scale=False, input_names=["x1"], input_shapes=[[3, 4, 5]], input_type=tf.float32), - pytest.param(dict(axis=-1, momentum=0.0, epsilon=1e-5, center=True, scale=True, - input_names=["x1"], input_shapes=[[3, 4, 5, 6]], - input_type=tf.float32), marks=pytest.mark.precommit_tf_fe), + dict(axis=-1, momentum=0.0, epsilon=1e-5, center=True, scale=True, + input_names=["x1"], input_shapes=[[3, 4, 5, 6]], + input_type=tf.float32), dict(axis=[2, 1, 4], momentum=0.99, epsilon=1e-2, center=False, scale=True, input_names=["x1"], input_shapes=[[3, 4, 5, 6, 7]], input_type=tf.float32)] @pytest.mark.parametrize("params", test_data_extended_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_batch_normalization_extended_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py index be82a0b21db092..b5bee051965d7b 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -33,25 +33,21 @@ def create_keras_conv1d_net(self, conv_params, input_names, input_shapes, input_ return tf2_net, ref_net test_data_float32 = [ - pytest.param(dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=2), - input_names=["x"], - input_shapes=[[5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.precommit), - pytest.param( - dict(conv_params=dict(filters=10, kernel_size=5, padding="same", dilation_rate=4), - input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.precommit), - + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=2), + input_names=["x"], + input_shapes=[[5, 7, 6]], input_type=tf.float32), + dict(conv_params=dict(filters=10, kernel_size=5, padding="same", dilation_rate=4), + input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=3), input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3), input_names=["x"], input_shapes=[[5, 7, 6]], input_type=tf.float32), - pytest.param(dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3, - activation="swish", - use_bias=True), input_names=["x"], input_shapes=[[5, 7, 6]], - input_type=tf.float32), marks=pytest.mark.precommit_tf_fe), + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3, + activation="swish", + use_bias=True), input_names=["x"], input_shapes=[[5, 7, 6]], + input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", dilation_rate=4, activation="softmax", use_bias=False), input_names=["x"], input_shapes=[[5, 7, 8]], @@ -59,6 +55,7 @@ def create_keras_conv1d_net(self, conv_params, input_names, input_shapes, input_ ] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_conv_1d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py index feba0340c3e3a3..5ec72a706c7c95 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_1d_transpose.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -15,6 +15,7 @@ def create_keras_conv1d_transpose_net(self, params, input_names, input_shapes, i # tf.nn. operation have no "==" operation to be compared "relu": tf.nn.relu } + params = params.copy() if "activation" in params: params["activation"] = activation_func_structure[params["activation"]] @@ -32,31 +33,25 @@ def create_keras_conv1d_transpose_net(self, params, input_names, input_shapes, i return tf2_net, ref_net test_data_float32_set1 = [ - pytest.param(dict(params=dict(filters=27, kernel_size=3, padding="valid", strides=2), - input_names=["x"], - input_shapes=[[5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.precommit), - pytest.param(dict(params=dict(filters=10, kernel_size=5, padding="same", activation="relu", - use_bias=True), - input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.precommit), - - pytest.param(dict(params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3), - input_names=["x"], - input_shapes=[[5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.xfail(reason="49505")), - pytest.param(dict( + dict(params=dict(filters=27, kernel_size=3, padding="valid", strides=2), + input_names=["x"], + input_shapes=[[5, 7, 6]], input_type=tf.float32), + dict(params=dict(filters=10, kernel_size=5, padding="same", activation="relu", + use_bias=True), + input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), + dict(params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3), + input_names=["x"], + input_shapes=[[5, 7, 6]], input_type=tf.float32), + dict( params=dict(filters=20, kernel_size=7, padding="valid", data_format="channels_first"), input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.xfail(reason="49505")), - dict(params=dict(filters=10, kernel_size=5, padding="same", strides=3), input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), dict(params=dict(filters=20, kernel_size=7, padding="valid", strides=4), input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), - pytest.param(dict(params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3), - input_names=["x"], - input_shapes=[[5, 7, 6]], input_type=tf.float32), marks=pytest.mark.precommit_tf_fe), + dict(params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3), + input_names=["x"], + input_shapes=[[5, 7, 6]], input_type=tf.float32), dict(params=dict(filters=20, kernel_size=7, padding="valid", data_format="channels_first"), input_names=["x"], input_shapes=[[5, 7, 8]], input_type=tf.float32), @@ -64,6 +59,7 @@ def create_keras_conv1d_transpose_net(self, params, input_names, input_shapes, i # TODO: This test works only with tensorflow 2.3.0 or higher version @pytest.mark.parametrize("params", test_data_float32_set1) + @pytest.mark.precommit @pytest.mark.nightly @pytest.mark.xfail(reason="Needs tensorflow 2.3.0.") def test_keras_conv_1d_case1_transpose_float32(self, params, ie_device, precision, ir_version, diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py index 6527d4973f03ed..6587e1df459fe2 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -16,6 +16,7 @@ def create_keras_conv2d_net(self, conv_params, input_names, input_shapes, input_ "relu": tf.nn.relu, "sigmoid": tf.nn.sigmoid } + conv_params = conv_params.copy() if "activation" in conv_params: conv_params["activation"] = activation_func_structure[conv_params["activation"]] @@ -33,15 +34,11 @@ def create_keras_conv2d_net(self, conv_params, input_names, input_shapes, input_ return tf2_net, ref_net test_data_float32 = [ - pytest.param( - dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(2, 2)), - input_names=["x"], input_shapes=[[3, 5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.precommit), - pytest.param( - dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 3), - activation="relu", use_bias=True), - input_names=["x"], input_shapes=[[3, 5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.precommit), + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(2, 2)), + input_names=["x"], input_shapes=[[3, 5, 7, 6]], input_type=tf.float32), + dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 3), + activation="relu", use_bias=True), + input_names=["x"], input_shapes=[[3, 5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 3)), input_names=["x"], input_shapes=[[3, 5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3), @@ -54,10 +51,11 @@ def create_keras_conv2d_net(self, conv_params, input_names, input_shapes, input_ input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", dilation_rate=4, use_bias=True), - input_names=["x"], input_shapes=[[3, 9, 7, 8]], nput_type=tf.float32) + input_names=["x"], input_shapes=[[3, 9, 7, 8]], input_type=tf.float32) ] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_conv_2d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py index bac4e02e27aca1..13eea47746b040 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_2d_transpose.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -34,16 +34,12 @@ def create_keras_conv_2d_transpose_net(self, conv_params, input_names, input_sha return tf2_net, ref_net test_data_float32 = [ - pytest.param( - dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(2, 2), - data_format="channels_first"), input_names=["x"], - input_shapes=[[3, 5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.precommit), - pytest.param( - dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(7, 7), - activation="relu", use_bias=True, output_padding=(3, 3)), - input_names=["x"], input_shapes=[[3, 5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.precommit), + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(2, 2), + data_format="channels_first"), input_names=["x"], + input_shapes=[[3, 5, 7, 6]], input_type=tf.float32), + dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(7, 7), + activation="relu", use_bias=True, output_padding=(3, 3)), + input_names=["x"], input_shapes=[[3, 5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(7, 7), output_padding=(5, 5)), input_names=["x"], input_shapes=[[3, 5, 7, 8]], input_type=tf.float32), @@ -53,16 +49,17 @@ def create_keras_conv_2d_transpose_net(self, conv_params, input_names, input_sha dict(conv_params=dict(filters=10, kernel_size=5, padding="same", dilation_rate=4), input_names=["x"], input_shapes=[[3, 9, 7, 8]], input_type=tf.float32), - pytest.param(dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3, - activation="sigmoid", - use_bias=False), input_names=["x"], input_shapes=[[3, 9, 7, 6]], - input_type=tf.float32), marks=pytest.mark.precommit_tf_fe), + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", dilation_rate=3, + activation="sigmoid", + use_bias=False), input_names=["x"], input_shapes=[[3, 9, 7, 6]], + input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", dilation_rate=4, use_bias=True), input_names=["x"], input_shapes=[[3, 9, 7, 8]], input_type=tf.float32) ] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_conv_2d_transpose_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py index bc564bb9cbd546..e0d2d54aebd299 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -16,6 +16,7 @@ def create_keras_conv3d_net(self, conv_params, input_names, input_shapes, input_ "relu": tf.nn.relu, "sigmoid": tf.nn.sigmoid } + conv_params = conv_params.copy() if "activation" in conv_params: conv_params["activation"] = activation_func_structure[conv_params["activation"]] @@ -33,15 +34,11 @@ def create_keras_conv3d_net(self, conv_params, input_names, input_shapes, input_ return tf2_net, ref_net test_data_float32 = [ - pytest.param( - dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(2, 2, 2)), - input_names=["x"], input_shapes=[[5, 3, 5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.precommit), - pytest.param( - dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 3, 3), - activation="relu", use_bias=True), input_names=["x"], - input_shapes=[[5, 3, 5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.precommit), + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(2, 2, 2)), + input_names=["x"], input_shapes=[[5, 3, 5, 7, 6]], input_type=tf.float32), + dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 3, 3), + activation="relu", use_bias=True), input_names=["x"], + input_shapes=[[5, 3, 5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 3, 3)), input_names=["x"], input_shapes=[[5, 3, 5, 7, 8]], input_type=tf.float32), @@ -61,6 +58,7 @@ def create_keras_conv3d_net(self, conv_params, input_names, input_shapes, input_ ] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_conv_3d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py index 720c6c787c4106..518db5a006ee1b 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_3d_transpose.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -16,6 +16,7 @@ def create_keras_conv_3d_transpose_net(self, conv_params, input_names, input_sha "relu": tf.nn.relu, "sigmoid": tf.nn.sigmoid } + conv_params = conv_params.copy() if "activation" in conv_params: conv_params["activation"] = activation_func_structure[conv_params["activation"]] @@ -33,20 +34,11 @@ def create_keras_conv_3d_transpose_net(self, conv_params, input_names, input_sha return tf2_net, ref_net test_data_float32 = [ - pytest.param( - dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(1, 1, 2)), - input_names=["x"], input_shapes=[[5, 3, 5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.precommit), - pytest.param( - dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 4, 5), - activation="relu", use_bias=True, output_padding=2), - input_names=["x"], input_shapes=[[5, 3, 5, 7, 8]], input_type=tf.float32), - marks=pytest.mark.precommit), - - pytest.param(dict(conv_params=dict(filters=27, kernel_size=3, data_format="channels_first"), - input_names=["x"], input_shapes=[[5, 3, 5, 7, 6]], input_type=tf.float32), - marks=pytest.mark.xfail(reason="49529")), - + dict(conv_params=dict(filters=27, kernel_size=3, padding="valid", strides=(1, 1, 2)), + input_names=["x"], input_shapes=[[5, 3, 5, 7, 6]], input_type=tf.float32), + dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(3, 4, 5), + activation="relu", use_bias=True, output_padding=2), + input_names=["x"], input_shapes=[[5, 3, 5, 7, 8]], input_type=tf.float32), dict(conv_params=dict(filters=10, kernel_size=5, padding="same", strides=(4, 3, 2), output_padding=1), input_names=["x"], input_shapes=[[5, 3, 5, 7, 8]], input_type=tf.float32), @@ -63,6 +55,7 @@ def create_keras_conv_3d_transpose_net(self, conv_params, input_names, input_sha ] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_conv_3D_transpose_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py index 926e1a830b5812..98c9113dfca781 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np @@ -60,7 +60,7 @@ def create_keras_conv_lstm_2d_net(self, params, input_shapes): ] @pytest.mark.parametrize("params", test_data_basic) - @pytest.mark.precommit_tf_fe + @pytest.mark.precommit @pytest.mark.nightly def test_keras_conv_lstm_2d_basic(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py index 11f3739015a0df..c5421184754fc7 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_1d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -24,14 +24,12 @@ def create_keras_cropping_1d_net(self, cropping, input_names, input_shapes, inpu return tf2_net, ref_net test_data_float32 = [ - pytest.param( - dict(cropping=2, input_names=["x"], input_shapes=[[3, 5, 4]], input_type=tf.float32), - marks=pytest.mark.precommit), - + dict(cropping=2, input_names=["x"], input_shapes=[[3, 5, 4]], input_type=tf.float32), dict(cropping=(1, 1), input_names=["x"], input_shapes=[[2, 3, 4]], input_type=tf.float32), dict(cropping=(2, 3), input_names=["x"], input_shapes=[[3, 7, 5]], input_type=tf.float32)] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_cropping_1d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py index df78451385e0a2..1fe8d3558afe81 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -24,16 +24,14 @@ def create_keras_cropping_2d_net(self, cropping, input_names, input_shapes, inpu return tf2_net, ref_net test_data_float32 = [ - pytest.param( - dict(cropping=2, input_names=["x"], input_shapes=[[3, 5, 7, 5]], input_type=tf.float32), - marks=pytest.mark.precommit), - + dict(cropping=2, input_names=["x"], input_shapes=[[3, 5, 7, 5]], input_type=tf.float32), dict(cropping=(1, 2), input_names=["x"], input_shapes=[[2, 3, 7, 5]], input_type=tf.float32), - pytest.param(dict(cropping=((2, 1), (3, 2)), input_names=["x"], input_shapes=[[5, 7, 9, 7]], - input_type=tf.float32), marks=pytest.mark.precommit_tf_fe)] + dict(cropping=((2, 1), (3, 2)), input_names=["x"], input_shapes=[[5, 7, 9, 7]], + input_type=tf.float32)] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_cropping_2d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py index 5516f8aca9c18d..295ddd1b89fb87 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_cropping_3d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -24,16 +24,15 @@ def create_keras_cropping_3d_net(self, cropping, input_names, input_shapes, inpu return tf2_net, ref_net test_data_float32 = [ - pytest.param(dict(cropping=2, input_names=["x"], input_shapes=[[3, 5, 7, 5, 6]], - input_type=tf.float32), - marks=pytest.mark.precommit), - + dict(cropping=2, input_names=["x"], input_shapes=[[3, 5, 7, 5, 6]], + input_type=tf.float32), dict(cropping=(1, 2, 1), input_names=["x"], input_shapes=[[2, 3, 7, 5, 6]], input_type=tf.float32), dict(cropping=((2, 1), (3, 2), (1, 1)), input_names=["x"], input_shapes=[[5, 7, 9, 7, 6]], input_type=tf.float32)] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_cropping_3d_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py index 8ef3ac6f936992..5e387d4dcbdc82 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_dense.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -44,6 +44,7 @@ def create_keras_dense_net(self, input_names, input_shapes, input_type, units, a @pytest.mark.precommit def test_keras_dense_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x") self._test(*self.create_keras_dense_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) @@ -68,6 +69,7 @@ def test_keras_dense_float32(self, params, ie_device, precision, ir_version, tem @pytest.mark.precommit def test_keras_activation_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x") self._test(*self.create_keras_dense_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py index 8dc6a1d1770a64..a22e5edc617e6a 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_depthwiseconv2D.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -43,7 +43,7 @@ def create_keras_dconv2D_net(self, input_names, input_shapes, input_type, kernel data_format='channels_last', dilation_rate=(2, 2), activation=None, use_bias=False), dict(input_names=["x"], input_shapes=[[5, 16, 8, 4]], input_type=tf.float32, - kernel_size=(2, 2), strides=(3, 3), padding='same', depth_multiplier=54, + kernel_size=(2, 2), strides=(3, 3), padding='same', depth_multiplier=4, data_format='channels_first', dilation_rate=1, activation=None, use_bias=False), ] @@ -69,7 +69,7 @@ def test_keras_dconv2D_float32(self, params, ie_device, precision, ir_version, t data_format='channels_last', dilation_rate=(2, 2), activation=None, use_bias=True), dict(input_names=["x"], input_shapes=[[5, 16, 8, 4]], input_type=tf.float32, - kernel_size=(2, 2), strides=(3, 3), padding='same', depth_multiplier=54, + kernel_size=(2, 2), strides=(3, 3), padding='same', depth_multiplier=4, data_format='channels_first', dilation_rate=1, activation=None, use_bias=True), ] @@ -90,12 +90,12 @@ def test_keras_use_bias_float32(self, params, ie_device, precision, ir_version, dict(input_names=["x"], input_shapes=[[5, 16, 16, 4]], input_type=tf.float32, kernel_size=(3, 3), strides=(4, 4), padding='valid', depth_multiplier=2, data_format='channels_first', dilation_rate=1, activation='elu', use_bias=True), - pytest.param(dict(input_names=["x"], input_shapes=[[5, 8, 16, 4]], input_type=tf.float32, - kernel_size=(2, 2), strides=1, padding='same', depth_multiplier=2, - data_format='channels_last', - dilation_rate=(2, 2), activation='linear', use_bias=True), marks=pytest.mark.precommit_tf_fe), + dict(input_names=["x"], input_shapes=[[5, 8, 16, 4]], input_type=tf.float32, + kernel_size=(2, 2), strides=1, padding='same', depth_multiplier=2, + data_format='channels_last', + dilation_rate=(2, 2), activation='linear', use_bias=True), dict(input_names=["x"], input_shapes=[[5, 16, 8, 4]], input_type=tf.float32, - kernel_size=(2, 2), strides=(3, 3), padding='same', depth_multiplier=54, + kernel_size=(2, 2), strides=(3, 3), padding='same', depth_multiplier=4, data_format='channels_first', dilation_rate=1, activation='tanh', use_bias=True), ] diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py index 2bc44c08dcba45..7d3abcce8ed653 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_elu.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasELU(CommonTF2LayerTest): @@ -26,79 +24,6 @@ def create_keras_elu_net(self, input_names, input_shapes, input_type, alpha, ir_ # create reference IR net ref_net = None - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - if alpha == 1.0: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'elu': {'kind': 'op', 'type': 'Elu'}, - 'elu_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input1_data', 'elu', {'in': 0}), - ('elu', 'elu_data'), - ('elu_data', 'result')]) - else: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - - 'alpha_input_data': {'kind': 'data', 'shape': [1], 'value': [0.0]}, - 'alpha': {'kind': 'op', 'type': 'Const'}, - 'alpha_data': {'kind': 'data'}, - - 'const_input_data': {'kind': 'data', 'shape': [1], 'value': [alpha]}, - 'const': {'kind': 'op', 'type': 'Const'}, - 'const_data': {'kind': 'data'}, - - 'greater': {'kind': 'op', 'type': 'Greater'}, - 'greater_data': {'shape': converted_input_shape, 'kind': 'data'}, - - 'elu': {'kind': 'op', 'type': 'Elu'}, - 'elu_data': {'shape': converted_input_shape, 'kind': 'data'}, - - '1select': {'kind': 'op', 'type': 'Select'}, - 'select_data': {'shape': converted_input_shape, 'kind': 'data'}, - - '2multiply': {'kind': 'op', 'type': 'Multiply'}, - 'multiply_data': {'shape': converted_input_shape, 'kind': 'data'}, - - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('alpha_input_data', 'alpha'), - ('alpha', 'alpha_data'), - ('const_input_data', 'const'), - ('const', 'const_data'), - - ('input1_data', 'greater', {'in': 0}), - ('alpha_data', 'greater', {'in': 1}), - ('greater', 'greater_data'), - - ('input1_data', 'elu', {'in': 0}), - ('elu', 'elu_data'), - - ('const_data', '2multiply', {'in': 0}), - ('elu_data', '2multiply', {'in': 1}), - ('2multiply', 'multiply_data'), - - ('greater_data', '1select', {'in': 0}), - ('elu_data', '1select', {'in': 1}), - ('multiply_data', '1select', {'in': 2}), - ('1select', 'select_data'), - - ('select_data', 'result')]) - return tf2_net, ref_net test_data_float32_precommit = [dict(input_names=["x1"], input_shapes=[[5, 4, 8, 3, 2]], @@ -106,6 +31,7 @@ def create_keras_elu_net(self, input_names, input_shapes, input_type, alpha, ir_ @pytest.mark.parametrize("params", test_data_float32_precommit) @pytest.mark.precommit + @pytest.mark.nightly def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): self._test(*self.create_keras_elu_net(**params, ir_version=ir_version), @@ -138,7 +64,6 @@ def test_keras_elu_float32(self, params, ie_device, precision, ir_version, temp_ @pytest.mark.parametrize("params", test_data_float32_alpha2) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.xfail(reason="51109") def test_keras_elu_float32_alpha2(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): self._test(*self.create_keras_elu_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py index 117370076bee36..c0c8fd9dada071 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_embedding.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -27,8 +27,8 @@ def create_keras_emb_net(self, input_names, input_shapes, input_type, input_dim, return tf2_net, ref_net test_data_float32 = [ - pytest.param(dict(input_names=["x"], input_shapes=[[5, 16]], input_type=tf.float32, input_dim=256, - output_dim=8, mask_zero=True, input_length=4), marks=pytest.mark.precommit_tf_fe), + dict(input_names=["x"], input_shapes=[[5, 16]], input_type=tf.float32, input_dim=256, + output_dim=8, mask_zero=True, input_length=4), dict(input_names=["x"], input_shapes=[[5, 16]], input_type=tf.float32, input_dim=256, output_dim=324, mask_zero=True, input_length=16), dict(input_names=["x"], input_shapes=[[5, 16]], input_type=tf.float32, input_dim=256, diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py index dcd0affd162e21..3379868eff457c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -54,7 +54,6 @@ def create_keras_gru_net(self, input_names, input_shapes, input_type, units, act @pytest.mark.parametrize("params", test_data_simple) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.precommit_tf_fe def test_keras_gru_with_bias_float32(self, params, ie_device, precision, temp_dir, ir_version, use_legacy_frontend): self._test(*self.create_keras_gru_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py index 27aa4bd0fa6af0..caee7f343b7c97 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_gru_cell.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py index 3b7bba65bd37f6..0d41e7f1e152a8 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_layer_normalization.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -47,11 +47,12 @@ def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_ve dict(input_names=["x"], input_shapes=[[2, 3, 8]], input_type=tf.float32, axis=2, epsilon=1e-6, center=False, scale=True), - pytest.param(dict(input_names=["x"], input_shapes=[[2, 2, 3, 5]], input_type=tf.float32, axis=(1, 2, 3), - epsilon=1e-5, - center=True, scale=True), marks=pytest.mark.precommit_tf_fe)] + dict(input_names=["x"], input_shapes=[[2, 2, 3, 5]], input_type=tf.float32, axis=(1, 2, 3), + epsilon=1e-5, + center=True, scale=True)] @pytest.mark.parametrize("params", test_data_float32) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_dense_float32(self, params, ie_device, precision, temp_dir, ir_version): self._test(*self.create_keras_lnorm_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py index 3fc85331f89c60..1d893972779ae8 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_lstm.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -54,7 +54,6 @@ def create_keras_lstm_net(self, input_names, input_shapes, input_type, units, ac @pytest.mark.parametrize("params", test_data_simple) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.precommit_tf_fe def test_keras_lstm_with_bias_float32(self, params, ie_device, precision, temp_dir, ir_version, use_legacy_frontend): self._test(*self.create_keras_lstm_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py index 1dbb54eb2f8e17..f4046c9e4277fc 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maximum.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasMaximum(CommonTF2LayerTest): @@ -27,62 +25,6 @@ def create_keras_maximum_net(self, input_names, input_shapes, input_type, ir_ver # create reference IR net ref_net = None - op_name = "Maximum" - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - if len(input_names) == 2: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op': {'kind': 'op', 'type': op_name}, - 'op_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op', {'in': 0}), - ('input2_data', 'op', {'in': 1}), - ('op', 'op_data'), - ('op_data', 'result') - ]) - elif len(input_names) == 3: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op1': {'kind': 'op', 'type': op_name}, - 'op1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input3': {'kind': 'op', 'type': 'Parameter'}, - 'input3_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op2': {'kind': 'op', 'type': op_name}, - 'op2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op1', {'in': 0}), - ('input2_data', 'op1', {'in': 1}), - ('op1', 'op1_data'), - ('input3', 'input3_data'), - ('op1_data', 'op2', {'in': 0}), - ('input3_data', 'op2', {'in': 1}), - ('op2', 'op2_data'), - ('op2_data', 'result') - ]) - else: - AssertionError("Not supported case with input number greater 2") - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py index 52a2106197771a..227dfe0b49e804 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_maxpool2D.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -37,10 +37,9 @@ def create_keras_maxpool2D_net(self, input_names, input_shapes, input_type, pool strides=None, padding='valid', dataformat='channels_last'), dict(input_names=["x"], input_shapes=[[5, 4, 5, 12]], input_type=tf.float32, pool_size=1, strides=4, padding='valid', dataformat='channels_last'), - pytest.param(dict(input_names=["x"], input_shapes=[[5, 4, 6, 6]], input_type=tf.float32, + dict(input_names=["x"], input_shapes=[[5, 4, 6, 6]], input_type=tf.float32, pool_size=(2, 3), strides=(3, 3), padding='valid', dataformat='channels_last'), - marks=pytest.mark.precommit_tf_fe), dict(input_names=["x"], input_shapes=[[5, 4, 4, 8]], input_type=tf.float32, pool_size=2, strides=2, padding='valid', dataformat='channels_last'), ] diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py index 17bd87cbc07ea0..ffd6ab01ec4aee 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_minimum.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasMinimum(CommonTF2LayerTest): @@ -27,62 +25,6 @@ def create_keras_minimum_net(self, input_names, input_shapes, input_type, ir_ver # create reference IR net ref_net = None - op_name = "Minimum" - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - if len(input_names) == 2: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op': {'kind': 'op', 'type': op_name}, - 'op_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op', {'in': 0}), - ('input2_data', 'op', {'in': 1}), - ('op', 'op_data'), - ('op_data', 'result') - ]) - elif len(input_names) == 3: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op1': {'kind': 'op', 'type': op_name}, - 'op1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input3': {'kind': 'op', 'type': 'Parameter'}, - 'input3_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op2': {'kind': 'op', 'type': op_name}, - 'op2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op1', {'in': 0}), - ('input2_data', 'op1', {'in': 1}), - ('op1', 'op1_data'), - ('input3', 'input3_data'), - ('op1_data', 'op2', {'in': 0}), - ('input3_data', 'op2', {'in': 1}), - ('op2', 'op2_data'), - ('op2_data', 'result') - ]) - else: - AssertionError("Not supported case with input number greater 2") - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py index 480895f9df3318..76655a0b85476c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiheadattention.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -73,7 +73,7 @@ def create_keras_multiheadattention_net(self, num_heads=1, key_dim=3, value_dim=4, dropout=0.0, use_bias=True, output_shape=None, attention_axes=None, return_attention_scores=True, training=False), - marks=[pytest.mark.xfail(reason="45432"), pytest.mark.precommit_tf_fe]) + marks=pytest.mark.xfail(reason="45432")) ] @pytest.mark.skip(reason='Einsum is unsupported in MO') diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py index c906ed4c7cf0b6..e3b62352978b9e 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_multiply.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasMultiply(CommonTF2LayerTest): @@ -27,62 +25,6 @@ def create_keras_multiply_net(self, input_names, input_shapes, input_type, ir_ve # create reference IR net ref_net = None - op_name = "Multiply" - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - if len(input_names) == 2: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op': {'kind': 'op', 'type': op_name}, - 'op_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op', {'in': 0}), - ('input2_data', 'op', {'in': 1}), - ('op', 'op_data'), - ('op_data', 'result') - ]) - elif len(input_names) == 3: - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op1': {'kind': 'op', 'type': op_name}, - 'op1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input3': {'kind': 'op', 'type': 'Parameter'}, - 'input3_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op2': {'kind': 'op', 'type': op_name}, - 'op2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op1', {'in': 0}), - ('input2_data', 'op1', {'in': 1}), - ('op1', 'op1_data'), - ('input3', 'input3_data'), - ('op1_data', 'op2', {'in': 0}), - ('input3_data', 'op2', {'in': 1}), - ('op2', 'op2_data'), - ('op2_data', 'result') - ]) - else: - AssertionError("Not supported case with input number greater 2") - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py index 09d62264875faf..9df3eb5b5daf20 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_relu.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasRelu(CommonTF2LayerTest): @@ -26,26 +24,6 @@ def create_keras_relu_net(self, input_names, input_shapes, input_type, ir_versio # create reference IR net ref_net = None - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'relu': {'kind': 'op', 'type': 'ReLU'}, - 'relu_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input1_data', 'relu', {'in': 0}), - ('relu', 'relu_data'), - ('relu_data', 'result')]) - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py index 94fcb3b889ae4e..1ff6a2891ce4c3 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_rnn.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -58,7 +58,6 @@ def create_keras_rnn_net(self, input_names, input_shapes, @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.precommit_tf_fe def test_keras_rnn(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): self._test(*self.create_keras_rnn_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py index 4b769bada988e3..37d4897aa24044 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_roll.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -31,14 +31,13 @@ def create_keras_roll_net(self, shift, axis, input_names, input_shapes, input_ty input_type=tf.float16), dict(shift=[11, -8], axis=[-1, -2], input_names=["x1"], input_shapes=[[3, 4, 3, 1]], input_type=tf.int32), - pytest.param(dict(shift=[7, -2, 5], axis=[0, -1, -1], input_names=["x1"], input_shapes=[[5, 2, 3, 7]], - input_type=tf.int64), marks=pytest.mark.precommit_tf_fe), - pytest.param( - dict(shift=[1, -2], axis=[0, 1], input_names=["x1"], input_shapes=[[2, 4, 3, 5]], - input_type=tf.float32), - marks=pytest.mark.precommit)] + dict(shift=[7, -2, 5], axis=[0, -1, -1], input_names=["x1"], input_shapes=[[5, 2, 3, 7]], + input_type=tf.int64), + dict(shift=[1, -2], axis=[0, 1], input_names=["x1"], input_shapes=[[2, 4, 3, 5]], + input_type=tf.float32)] @pytest.mark.parametrize("params", test_data) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_roll(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py index 613126134a9fcd..0c38060c896d80 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_separableconv2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -114,13 +114,13 @@ def test_keras_separableconv2d_different_padding(self, params, ie_device, precis dict(input_names=["x1"], input_shapes=[[5, 17, 14, 3]], input_type=tf.float32, filters=3, kernel_size=(2, 1), strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation='relu', use_bias=True), - pytest.param(dict(input_names=["x1"], input_shapes=[[1, 14, 12, 2]], input_type=tf.float32, - filters=4, kernel_size=(2, 2), strides=1, padding='valid', data_format='channels_last', - dilation_rate=2, depth_multiplier=3, activation='relu', use_bias=False), - marks=pytest.mark.precommit_tf_fe), + dict(input_names=["x1"], input_shapes=[[1, 14, 12, 2]], input_type=tf.float32, + filters=4, kernel_size=(2, 2), strides=1, padding='valid', data_format='channels_last', + dilation_rate=2, depth_multiplier=3, activation='relu', use_bias=False), ] @pytest.mark.parametrize("params", test_data_different_bias) + @pytest.mark.precommit @pytest.mark.nightly def test_keras_separableconv2d_different_bias(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py index 4ff789736f5a1e..1d0034c711a2f7 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_simplernn.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -44,18 +44,15 @@ def create_keras_simplernn_net(self, input_names, input_shapes, input_type, units=3, activation='elu', use_bias=True, dropout=0.0, recurrent_dropout=0.0, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False), - pytest.param( - dict(input_names=["x1"], input_shapes=[[5, 1, 3]], input_type=tf.float32, - units=3, activation='selu', use_bias=True, dropout=0.0, recurrent_dropout=0.0, - return_sequences=False, - return_state=False, go_backwards=False, stateful=False, unroll=False), - marks=pytest.mark.xfail(reason="49512")), + dict(input_names=["x1"], input_shapes=[[5, 1, 3]], input_type=tf.float32, + units=3, activation='selu', use_bias=True, dropout=0.0, recurrent_dropout=0.0, + return_sequences=False, + return_state=False, go_backwards=False, stateful=False, unroll=False), ] @pytest.mark.parametrize("params", test_data_different_activations) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.precommit_tf_fe def test_keras_simplernn_different_activations(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): self._test(*self.create_keras_simplernn_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py index c889a69246489a..486df7dafafbd3 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softmax.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasSoftmax(CommonTF2LayerTest): @@ -26,26 +24,6 @@ def create_keras_softmax_net(self, input_names, input_shapes, input_type, ir_ver # create reference IR net ref_net = None - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'softmax': {'kind': 'op', 'type': 'SoftMax'}, - 'softmax_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input1_data', 'softmax', {'in': 0}), - ('softmax', 'softmax_data'), - ('softmax_data', 'result')]) - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py index 9ad45afb9e22d4..11b2493355376b 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_softplus.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasSoftplus(CommonTF2LayerTest): @@ -26,26 +24,6 @@ def create_keras_softplus_net(self, input_names, input_shapes, input_type, ir_ve # create reference IR net ref_net = None - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'softplus': {'kind': 'op', 'type': 'SoftPlus'}, - 'softplus_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input1_data', 'softplus', {'in': 0}), - ('softplus', 'softplus_data'), - ('softplus_data', 'result')]) - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py index 5c9fc346a2994f..888caaee861d1c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout1d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -32,6 +32,7 @@ def create_keras_spatialdropout1d_net(self, input_names, input_shapes, input_typ @pytest.mark.precommit def test_keras_spatialdropout1d(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x1") self._test(*self.create_keras_spatialdropout1d_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py index 8871c7c0dfb720..2116b3e43cf935 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -35,6 +35,7 @@ def create_keras_spatialdropout2d_net(self, input_names, input_shapes, input_typ @pytest.mark.precommit def test_keras_spatialdropout2d(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x1") self._test(*self.create_keras_spatialdropout2d_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py index 30638ec2aa4687..ec7198c9cfa026 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_spatialdropout3d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -35,6 +35,7 @@ def create_keras_spatialdropout3d_net(self, input_names, input_shapes, input_typ @pytest.mark.precommit def test_keras_spatialdropout3d(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + pytest.skip("Error: failed due to missing a required argument: x1") self._test(*self.create_keras_spatialdropout3d_net(**params, ir_version=ir_version), ie_device, precision, temp_dir=temp_dir, ir_version=ir_version, use_legacy_frontend=use_legacy_frontend, **params) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py index 40db8d22d63f5d..1d1c1f8da20589 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_stackedrnncells.py @@ -40,7 +40,6 @@ def create_keras_stackedrnncells_net(self, input_names, input_shapes, input_type @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.precommit_tf_fe def test_keras_stackedrnncells(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): self._test(*self.create_keras_stackedrnncells_net(**params, ir_version=ir_version), diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py index 70ea1ad9151f84..7c9f13868c0d3f 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_subtract.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasSubtract(CommonTF2LayerTest): @@ -27,32 +25,6 @@ def create_keras_subtract_net(self, input_names, input_shapes, input_type, ir_ve # create reference IR net ref_net = None - op_name = "Subtract" - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'input2': {'kind': 'op', 'type': 'Parameter'}, - 'input2_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'op': {'kind': 'op', 'type': op_name}, - 'op_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input2', 'input2_data'), - ('input1_data', 'op', {'in': 0}), - ('input2_data', 'op', {'in': 1}), - ('op', 'op_data'), - ('op_data', 'result') - ]) - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py index 6981e0a89887e9..e168bf2d655fb7 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_swish.py @@ -1,11 +1,9 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest import tensorflow as tf -from common.layer_test_class import check_ir_version from common.tf2_layer_test_class import CommonTF2LayerTest -from unit_tests.utils.graph import build_graph class TestKerasSWish(CommonTF2LayerTest): @@ -26,26 +24,6 @@ def create_keras_swish_net(self, input_names, input_shapes, input_type, ir_versi # create reference IR net ref_net = None - if check_ir_version(10, None, ir_version): - # convert NHWC to NCHW layout if tensor rank greater 3 - converted_input_shape = input_shapes[0].copy() - if len(converted_input_shape) > 3: - converted_input_shape[1] = input_shapes[0][-1] - converted_input_shape[2:] = input_shapes[0][1:-1] - nodes_attributes = { - 'input1': {'kind': 'op', 'type': 'Parameter'}, - 'input1_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'swish': {'kind': 'op', 'type': 'Swish'}, - 'swish_data': {'shape': converted_input_shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input1', 'input1_data'), - ('input1_data', 'swish', {'in': 0}), - ('swish', 'swish_data'), - ('swish_data', 'result')]) - return tf2_net, ref_net test_data_float32_precommit = [ diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_text_vectorization.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_text_vectorization.py index 3c6f15c176d334..b71f33e9e541e9 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_text_vectorization.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_text_vectorization.py @@ -48,7 +48,7 @@ def create_text_vectorization_net(self, input_shapes, vocabulary, output_mode, o 'ОПЕНВИНО', 'здесь', 'там', '你好', '那裡', '檢查']]) @pytest.mark.parametrize('output_mode', ['int']) @pytest.mark.parametrize('output_sequence_length', [32, 64]) - @pytest.mark.precommit_tf_fe + @pytest.mark.precommit @pytest.mark.nightly def test_text_vectorization(self, input_shapes, vocabulary, output_mode, output_sequence_length, strings_dictionary, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py index 993a74e9e7d511..b4c550ff78b3f6 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_upsampling2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np @@ -49,7 +49,7 @@ def create_keras_upsampling2d_net(self, input_shapes, input_type, size, @pytest.mark.parametrize("interpolation", ['nearest', 'bilinear']) @pytest.mark.parametrize("input_type", [np.float16, np.float32, np.float64, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.int64]) - @pytest.mark.precommit_tf_fe + @pytest.mark.precommit @pytest.mark.nightly def test_keras_upsampling2d_nearest(self, params, input_type, data_format, interpolation, ie_device, precision, ir_version, temp_dir, diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py index 8e957a8e250c39..01be9b6f7df73c 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_zeropadding2d.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -26,8 +26,8 @@ def create_keras_zeropadding2d_net(self, input_names, input_shapes, input_type, padding=2, data_format='channels_last'), dict(input_names=["x1"], input_shapes=[[3, 2, 4, 6]], input_type=tf.float32, padding=(3, 0), data_format='channels_last'), - pytest.param(dict(input_names=["x1"], input_shapes=[[1, 3, 8, 7]], input_type=tf.float32, - padding=((5, 1), (3, 4)), data_format='channels_last'), marks=pytest.mark.precommit_tf_fe), + dict(input_names=["x1"], input_shapes=[[1, 3, 8, 7]], input_type=tf.float32, + padding=((5, 1), (3, 4)), data_format='channels_last'), ] @pytest.mark.parametrize("params", test_data_channels_last) diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py index 97cbb9997bb3dd..85572e719af14a 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_map_fn.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -6,36 +6,46 @@ from common.tf2_layer_test_class import CommonTF2LayerTest + def fn_1(x): return (x[0] * x[1] + x[2]) + def fn_2(x): return (x[0] + x[1] + x[2], x[0] - x[2] + x[1], 2 + x[2]) + def fn_3(x): return (x[0] * x[1]) + def fn_4(x): return (x[0] * x[1] + 2 * x[2]) + def fn_5(x): return (x[0] * x[1], x[0] + x[1]) + def fn_6(x): return (x[0] * x[1] + x[2], x[0] + x[2] * x[1], 2 * x[2]) + def fn_7(x): return (x[0] * x[1] + x[2]) + def fn_8(x): return (x[0] + x[1] + x[2], x[0] - x[2] + x[1], 2 + x[2]) + list_fns = [fn_1, fn_2, fn_3, fn_4, fn_5, fn_6, fn_7, fn_8] + class MapFNLayer(tf.keras.layers.Layer): def __init__(self, fn, input_type, fn_output_signature, back_prop): super(MapFNLayer, self).__init__() - self.fn = list_fns[fn-1] + self.fn = list_fns[fn - 1] self.input_type = input_type self.fn_output_signature = fn_output_signature self.back_prop = back_prop @@ -45,6 +55,7 @@ def call(self, x): fn_output_signature=self.fn_output_signature, back_prop=self.back_prop) + class TestMapFN(CommonTF2LayerTest): def create_map_fn_net(self, fn, input_type, fn_output_signature, back_prop, input_names, input_shapes, ir_version): @@ -66,12 +77,11 @@ def create_map_fn_net(self, fn, input_type, fn_output_signature, back_prop, dict(fn=1, input_type=tf.float32, fn_output_signature=tf.float32, back_prop=False, input_names=["x1", "x2", "x3"], input_shapes=[[2, 3, 4], [2, 3, 4], [2, 3, 4]]), - pytest.param(dict(fn=2, - input_type=tf.float32, - fn_output_signature=(tf.float32, tf.float32, tf.float32), back_prop=True, - input_names=["x1", "x2", "x3"], - input_shapes=[[2, 1, 3, 4], [2, 1, 3, 4], [2, 1, 3, 4]]), - marks=pytest.mark.xfail(reason="61587")) + dict(fn=2, + input_type=tf.float32, + fn_output_signature=(tf.float32, tf.float32, tf.float32), back_prop=True, + input_names=["x1", "x2", "x3"], + input_shapes=[[2, 1, 3, 4], [2, 1, 3, 4], [2, 1, 3, 4]]), ] @pytest.mark.parametrize("params", test_basic) @@ -126,15 +136,15 @@ def test_multiple_outputs(self, params, ie_device, precision, ir_version, temp_d fn_output_signature=tf.int32, back_prop=True, input_names=["x1", "x2", "x3"], input_shapes=[[2, 1, 3], [2, 1, 3], [2, 1, 3]]), - pytest.param(dict(fn=8, - input_type=tf.int32, - fn_output_signature=(tf.int32, tf.int32, tf.int32), back_prop=True, - input_names=["x1", "x2", "x3"], - input_shapes=[[2, 1, 3, 4], [2, 1, 3, 4], [2, 1, 3, 4]]), - marks=[pytest.mark.xfail(reason="61587"), pytest.mark.precommit_tf_fe]) + dict(fn=8, + input_type=tf.int32, + fn_output_signature=(tf.int32, tf.int32, tf.int32), back_prop=True, + input_names=["x1", "x2", "x3"], + input_shapes=[[2, 1, 3, 4], [2, 1, 3, 4], [2, 1, 3, 4]]), ] @pytest.mark.parametrize("params", test_multiple_inputs_outputs_int32) + @pytest.mark.precommit @pytest.mark.nightly def test_multiple_inputs_outputs_int32(self, params, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MatrixInverse.py b/tests/layer_tests/tensorflow_tests/test_tf_MatrixInverse.py new file mode 100644 index 00000000000000..c18de12d0ec7e7 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_MatrixInverse.py @@ -0,0 +1,66 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + +class TestMatrixInverse(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'input:0' in inputs_info + inputs_data = {} + inputs_data['input:0'] = self._generate_invertible_matrices(self.input_shape) + + return inputs_data + + def _generate_invertible_matrices(self, input_shape): + if input_shape == [2, 2]: + return np.array([[1, 2], + [3, 1] + ], dtype=np.float32) + elif input_shape == [3, 3]: + return np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 1] + ], dtype=np.float32) + elif input_shape == [4, 4]: + return np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 2, 1], + [13, 14, 2, 1] + ], dtype=np.float32) + elif input_shape == [2, 4, 4]: + return np.array([[[10, 2, 3, 4], + [5, 10, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]], + [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 6, 12], + [13, 14, 15, 10]] + ], dtype=np.float32) + + def create_matrix_inverse_net(self, input_shape, adjoint): + self.input_shape = input_shape + tf.compat.v1.reset_default_graph() + with tf.compat.v1.Session() as sess: + input_tensor = tf.compat.v1.placeholder(np.float32, input_shape, 'input') + tf.raw_ops.MatrixInverse(input=input_tensor, adjoint=adjoint) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + @pytest.mark.parametrize("input_shape", [[2, 2], [3, 3], [2, 4, 4]]) + @pytest.mark.parametrize("adjoint", [None, False, True]) + @pytest.mark.precommit + @pytest.mark.nightly + def test_matrix_inverse_basic(self, input_shape, adjoint, ie_device, precision, ir_version, temp_dir, use_legacy_frontend): + if ie_device == 'GPU': + pytest.skip("GPU does not support Inverse operation") + if adjoint: + pytest.skip("TF FE does not support MatrixInverse with adjoint equal to True") + self._test(*self.create_matrix_inverse_net(input_shape=input_shape, adjoint=adjoint), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_legacy_frontend=use_legacy_frontend) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py b/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py index 2dfbe97acc0483..82f85f204ff458 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py @@ -4,11 +4,8 @@ import platform import pytest -from common.layer_test_class import check_ir_version from common.tf_layer_test_class import CommonTFLayerTest -from unit_tests.utils.graph import build_graph - class TestPooling(CommonTFLayerTest): def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, method, @@ -39,10 +36,10 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me if method == 'max': tf.raw_ops.MaxPool(input=input, ksize=kernel, strides=stride, padding=padding, - name='Operation') + name='Operation') elif method == 'avg': tf.raw_ops.AvgPool(value=input, ksize=kernel, strides=stride, padding=padding, - name='Operation') + name='Operation') # 5D tensors elif len(in_shape) == 5: @@ -54,10 +51,10 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me if method == 'max': tf.raw_ops.MaxPool3D(input=input, ksize=kernel, strides=stride, padding=padding, - name='Operation') + name='Operation') elif method == 'avg': tf.raw_ops.AvgPool3D(input=input, ksize=kernel, strides=stride, padding=padding, - name='Operation') + name='Operation') tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def @@ -72,10 +69,10 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me in_shape=[1, 3, 224, 224], out_shape=[1, 3, 224, 224], method=method), pytest.param( - dict(kernel_size=[2, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'SAME'], - in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], - method=method), - marks=pytest.mark.precommit), + dict(kernel_size=[2, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'SAME'], + in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], + method=method), + marks=pytest.mark.precommit), dict(kernel_size=[2, 4], strides=[2, 4], pads=[[0, 0], [0, 0], 'SAME'], in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56], method=method), @@ -98,10 +95,10 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 75], method=method), pytest.param( - dict(kernel_size=[111, 111], strides=[111, 111], - pads=[[54, 54], [55, 55], 'SAME'], - in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 3], method=method), - marks=pytest.mark.precommit), + dict(kernel_size=[111, 111], strides=[111, 111], + pads=[[54, 54], [55, 55], 'SAME'], + in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 3], method=method), + marks=pytest.mark.precommit), dict(kernel_size=[111, 113], strides=[111, 113], pads=[[54, 1], [55, 1], 'SAME'], in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 2], method=method), @@ -119,9 +116,9 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me dict(kernel_size=[2, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'], in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], method=method), pytest.param( - dict(kernel_size=[2, 4], strides=[2, 4], pads=[[0, 0], [0, 0], 'VALID'], - in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56], method=method), - marks=pytest.mark.precommit), + dict(kernel_size=[2, 4], strides=[2, 4], pads=[[0, 0], [0, 0], 'VALID'], + in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56], method=method), + marks=pytest.mark.precommit), dict(kernel_size=[4, 2], strides=[4, 2], pads=[[0, 0], [0, 0], 'VALID'], in_shape=[1, 3, 224, 224], out_shape=[1, 3, 56, 112], method=method), dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 0], 'VALID'], @@ -161,9 +158,9 @@ def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_l [dict(kernel_size=[1, 1, 1], strides=[1, 1, 1], pads=[[0, 0, 0], [0, 0, 0], 'SAME'], in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 224, 224, 224], method=method), pytest.param( - dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'], - in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method), - marks=pytest.mark.precommit), + dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'], + in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method), + marks=pytest.mark.precommit), dict(kernel_size=[2, 2, 4], strides=[2, 2, 4], pads=[[0, 0, 0], [0, 0, 0], 'SAME'], in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 56], method=method), dict(kernel_size=[4, 2, 2], strides=[4, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'], @@ -195,9 +192,9 @@ def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_l [dict(kernel_size=[1, 1, 1], strides=[1, 1, 1], pads=[[0, 0, 0], [0, 0, 0], 'VALID'], in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 224, 224, 224], method=method), pytest.param( - dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'], - in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method), - marks=pytest.mark.precommit), + dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'], + in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method), + marks=pytest.mark.precommit), dict(kernel_size=[2, 2, 4], strides=[2, 2, 4], pads=[[0, 0, 0], [0, 0, 0], 'VALID'], in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 56], method=method), dict(kernel_size=[4, 2, 2], strides=[4, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'], diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py b/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py index 005cd886d4f1e8..42bfbc9ccea6d8 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ReLU6.py @@ -2,11 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 import pytest -from common.layer_test_class import check_ir_version from common.tf_layer_test_class import CommonTFLayerTest -from unit_tests.utils.graph import build_graph - class TestReLU6(CommonTFLayerTest): def create_relu6_net(self, shape, ir_version, use_legacy_frontend): @@ -25,22 +22,6 @@ def create_relu6_net(self, shape, ir_version, use_legacy_frontend): ref_net = None - if check_ir_version(10, None, ir_version) and not use_legacy_frontend: - nodes_attributes = { - 'input': {'kind': 'op', 'type': 'Parameter'}, - 'input_data': {'shape': shape, 'kind': 'data'}, - 'ReLU6': {'kind': 'op', 'type': 'Clamp', "max": 6, "min": 0}, - 'ReLU6_data': {'shape': shape, 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_net = build_graph(nodes_attributes, - [('input', 'input_data'), - ('input_data', 'ReLU6'), - ('ReLU6', 'ReLU6_data'), - ('ReLU6_data', 'result') - ]) - return tf_net, ref_net test_data_precommit = [dict(shape=[1, 3, 50, 100, 224])] diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py b/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py index 4088b471bf435f..0e25cf0c05fcf1 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Resample_pattern_new.py @@ -5,8 +5,6 @@ import pytest from common.tf_layer_test_class import CommonTFLayerTest -from unit_tests.utils.graph import build_graph - class TestResamplePattern(CommonTFLayerTest): def _prepare_input(self, inputs_dict): @@ -39,31 +37,7 @@ def create_resample_net(self, shape, factor, use_legacy_frontend): tf.compat.v1.global_variables_initializer() tf_net = sess.graph_def - # - # Create reference IR net - # Please, specify 'type': 'Input' for input node - # Moreover, do not forget to validate ALL layer attributes!!! - # - ref_net = None - if not use_legacy_frontend: - new_shape = shape.copy() - new_shape[2] *= factor - new_shape[3] *= factor - nodes_attributes = { - 'input': {'kind': 'op', 'type': 'Input'}, - 'input_data': {'shape': shape, 'kind': 'data'}, - 'resample': {'kind': 'op', 'type': 'caffe.ResampleParameter.NEAREST', - "factor": factor, - "height": 0, "width": 0, "antialias": 0}, - 'resample_data': {'shape': new_shape, 'kind': 'data'}, - } - - ref_net = build_graph(nodes_attributes, - [('input', 'input_data'), - ('input_data', 'resample'), - ('resample', 'resample_data') - ]) return tf_net, ref_net diff --git a/thirdparty/xbyak b/thirdparty/xbyak index 740dff2e866f3a..58642e0cdd5cbe 160000 --- a/thirdparty/xbyak +++ b/thirdparty/xbyak @@ -1 +1 @@ -Subproject commit 740dff2e866f3ae1a70dd42d6e8836847ed95cc2 +Subproject commit 58642e0cdd5cbe12f5d6e05069ddddbc0f5d5383