diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 0a5091614037ff..0e0df7af27b3fa 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -1,3 +1,15 @@ +resources: + repositories: + - repository: openvino_contrib + type: github + endpoint: openvinotoolkit + name: openvinotoolkit/openvino_contrib + + - repository: testdata + type: github + endpoint: openvinotoolkit + name: openvinotoolkit/testdata + jobs: - job: Lin # About 150% of total time @@ -13,6 +25,8 @@ jobs: WORKERS_NUMBER: 8 BUILD_TYPE: Release REPO_DIR: $(Build.Repository.LocalPath) + OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib + MODELS_PATH: $(REPO_DIR)/../testdata WORK_DIR: $(Pipeline.Workspace)/_w BUILD_DIR: $(WORK_DIR)/build BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE) @@ -48,6 +62,17 @@ jobs: submodules: recursive path: openvino + - checkout: openvino_contrib + clean: true + lfs: false + submodules: recursive + path: openvino_contrib + + - checkout: testdata + clean: true + lfs: true + path: testdata + - script: | sudo apt --assume-yes install libusb-1.0-0-dev python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/requirements.txt @@ -65,7 +90,7 @@ jobs: - task: CMake@1 inputs: # CMake must get Python 3.x version by default - cmakeArgs: -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON $(REPO_DIR) + cmakeArgs: -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR) workingDirectory: $(BUILD_DIR) - script: ninja @@ -116,28 +141,23 @@ jobs: continueOnError: false - script: | - git clone https://github.com/openvinotoolkit/testdata.git - workingDirectory: $(WORK_DIR) - displayName: 'Clone testdata' - - - script: | - export DATA_PATH=$(WORK_DIR)/testdata - export MODELS_PATH=$(WORK_DIR)/testdata + export DATA_PATH=$(MODELS_PATH) + export MODELS_PATH=$(MODELS_PATH) python3 $(WORK_DIR)/gtest-parallel/gtest-parallel $(BIN_DIR)/MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 workingDirectory: $(WORK_DIR) displayName: 'MklDnnFunctionalTests' continueOnError: false - script: | - export DATA_PATH=$(WORK_DIR)/testdata - export MODELS_PATH=$(WORK_DIR)/testdata + export DATA_PATH=$(MODELS_PATH) + export MODELS_PATH=$(MODELS_PATH) $(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml displayName: 'IE CAPITests' continueOnError: false - script: | - export DATA_PATH=$(WORK_DIR)/testdata - export MODELS_PATH=$(WORK_DIR)/testdata + export DATA_PATH=$(MODELS_PATH) + export MODELS_PATH=$(MODELS_PATH) export LD_LIBRARY_PATH=$(BIN_DIR)/lib export PYTHONPATH=$(BIN_DIR)/lib/python_api/python3.6 env diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml index df8e1d2bac4907..30032ddd25a745 100644 --- a/.ci/azure/mac.yml +++ b/.ci/azure/mac.yml @@ -1,3 +1,15 @@ +resources: + repositories: + - repository: openvino_contrib + type: github + endpoint: openvinotoolkit + name: openvinotoolkit/openvino_contrib + + - repository: testdata + type: github + endpoint: openvinotoolkit + name: openvinotoolkit/testdata + jobs: - job: Mac # About 200% of total time (perfomace of Mac hosts is unstable) @@ -13,6 +25,8 @@ jobs: WORKERS_NUMBER: 3 BUILD_TYPE: Release REPO_DIR: $(Build.Repository.LocalPath) + OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)/../openvino_contrib + MODELS_PATH: $(REPO_DIR)/../testdata WORK_DIR: $(Pipeline.Workspace)/_w BUILD_DIR: $(WORK_DIR)/build BIN_DIR: $(REPO_DIR)/bin/intel64/$(BUILD_TYPE) @@ -42,6 +56,17 @@ jobs: submodules: recursive path: openvino + - checkout: openvino_contrib + clean: true + lfs: false + submodules: recursive + path: openvino_contrib + + - checkout: testdata + clean: true + lfs: true + path: testdata + - task: UsePythonVersion@0 inputs: versionSpec: '3.7' @@ -63,7 +88,7 @@ jobs: # Disable errors with Ninja export CXXFLAGS="-Wno-error=unused-command-line-argument" export CFLAGS="-Wno-error=unused-command-line-argument" - cmake -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON $(REPO_DIR) + cmake -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR) workingDirectory: $(BUILD_DIR) displayName: 'CMake' @@ -111,21 +136,16 @@ jobs: continueOnError: false - script: | - git clone https://github.com/openvinotoolkit/testdata.git - workingDirectory: $(WORK_DIR) - displayName: 'Clone testdata' - - - script: | - export DATA_PATH=$(WORK_DIR)/testdata - export MODELS_PATH=$(WORK_DIR)/testdata + export DATA_PATH=$(MODELS_PATH) + export MODELS_PATH=$(MODELS_PATH) python3 $(WORK_DIR)/gtest-parallel/gtest-parallel $(BIN_DIR)/MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric* -- --gtest_print_time=1 workingDirectory: $(WORK_DIR) displayName: 'MklDnnFunctionalTests' continueOnError: false - script: | - export DATA_PATH=$(WORK_DIR)/testdata - export MODELS_PATH=$(WORK_DIR)/testdata + export DATA_PATH=$(MODELS_PATH) + export MODELS_PATH=$(MODELS_PATH) $(BIN_DIR)/InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml displayName: 'IE CAPITests' continueOnError: false diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index 30257601a805d8..efd5afba0a0a9d 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -1,3 +1,15 @@ +resources: + repositories: + - repository: openvino_contrib + type: github + endpoint: openvinotoolkit + name: openvinotoolkit/openvino_contrib + + - repository: testdata + type: github + endpoint: openvinotoolkit + name: openvinotoolkit/testdata + jobs: - job: Win # About 150% of total time @@ -13,6 +25,8 @@ jobs: WORKERS_NUMBER: 8 BUILD_TYPE: Release REPO_DIR: $(Build.Repository.LocalPath) + OPENVINO_CONTRIB_REPO_DIR: $(REPO_DIR)\..\openvino_contrib + MODELS_PATH: $(REPO_DIR)\..\testdata WORK_DIR: $(Pipeline.Workspace)\_w BUILD_DIR: D:\build BIN_DIR: $(REPO_DIR)\bin\intel64 @@ -45,6 +59,17 @@ jobs: submodules: recursive path: openvino + - checkout: openvino_contrib + clean: true + lfs: false + submodules: recursive + path: openvino_contrib + + - checkout: testdata + clean: true + lfs: true + path: testdata + - script: | certutil -urlcache -split -f https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-win.zip ninja-win.zip powershell -command "Expand-Archive -Force ninja-win.zip" @@ -65,7 +90,7 @@ jobs: - script: | set PATH=$(WORK_DIR)\ninja-win;%PATH% - call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR) + call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR) workingDirectory: $(BUILD_DIR) displayName: 'CMake' @@ -141,25 +166,20 @@ jobs: displayName: 'MklDnnBehaviorTests' continueOnError: false - - script: | - git clone https://github.com/openvinotoolkit/testdata.git - workingDirectory: $(BUILD_DIR) - displayName: 'Clone testdata' - # Add for gtest-parallel, it hangs now (CVS-33386) #python $(BUILD_DIR)\gtest-parallel\gtest-parallel $(BIN_DIR)\MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 - script: | set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH% - set DATA_PATH=$(BUILD_DIR)\testdata - set MODELS_PATH=$(BUILD_DIR)\testdata + set DATA_PATH=$(MODELS_PATH) + set MODELS_PATH=$(MODELS_PATH) $(BIN_DIR)\MklDnnFunctionalTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-MklDnnFunctionalTests.xml displayName: 'MklDnnFunctionalTests' continueOnError: false - script: | set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH% - set DATA_PATH=$(BUILD_DIR)\testdata - set MODELS_PATH=$(BUILD_DIR)\testdata + set DATA_PATH=$(MODELS_PATH) + set MODELS_PATH=$(MODELS_PATH) $(BIN_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml displayName: 'IE CAPITests' continueOnError: false diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index 40f62f3cbea821..954b1634ed2a23 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -75,8 +75,8 @@ RUN make -j $(nproc) install # Run tests via tox WORKDIR /openvino/ngraph/python -ENV NGRAPH_CPP_BUILD_PATH=/openvino/dist -ENV LD_LIBRARY_PATH=/openvino/dist/lib +ENV NGRAPH_CPP_BUILD_PATH=/openvino/dist/deployment_tools/ngraph +ENV LD_LIBRARY_PATH=/openvino/dist/deployment_tools/ngraph/lib ENV NGRAPH_ONNX_IMPORT_ENABLE=TRUE ENV PYTHONPATH=/openvino/bin/intel64/Release/lib/python_api/python3.8:${PYTHONPATH} RUN git clone --recursive https://github.com/pybind/pybind11.git -b v2.5.0 --depth 1 diff --git a/CMakeLists.txt b/CMakeLists.txt index 5c3585a3e9625c..d6bf93044b9ce5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -114,7 +114,7 @@ function(build_ngraph) ie_cpack_add_component(ngraph) set(SDL_cmake_included ON) - # set(NGRAPH_COMPONENT_PREFIX "deployment_tools/ngraph/") + set(NGRAPH_COMPONENT_PREFIX "deployment_tools/ngraph/") add_subdirectory(ngraph) set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE) endfunction() diff --git a/inference-engine/cmake/clang_format.cmake b/cmake/clang_format/clang_format.cmake similarity index 92% rename from inference-engine/cmake/clang_format.cmake rename to cmake/clang_format/clang_format.cmake index d2ff778ce5de0b..ae37ae134e3f4f 100644 --- a/inference-engine/cmake/clang_format.cmake +++ b/cmake/clang_format/clang_format.cmake @@ -76,10 +76,10 @@ function(add_clang_format_target TARGET_NAME) -D "CLANG_FORMAT=${CLANG_FORMAT}" -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_check.cmake" + -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_check.cmake" DEPENDS "${source_file}" - "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_check.cmake" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_check.cmake" COMMENT "[clang-format] ${source_file}" VERBATIM) @@ -102,10 +102,10 @@ function(add_clang_format_target TARGET_NAME) -D "CLANG_FORMAT=${CLANG_FORMAT}" -D "INPUT_FILES=${CLANG_FORMAT_FOR_SOURCES}" -D "EXCLUDE_PATTERNS=${CLANG_FORMAT_EXCLUDE_PATTERNS}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_fix.cmake" + -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_fix.cmake" DEPENDS "${CLANG_FORMAT_FOR_SOURCES}" - "${IE_MAIN_SOURCE_DIR}/cmake/clang_format_fix.cmake" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_fix.cmake" COMMENT "[clang-format] ${TARGET_NAME}_fix" VERBATIM) diff --git a/inference-engine/cmake/clang_format_check.cmake b/cmake/clang_format/clang_format_check.cmake similarity index 100% rename from inference-engine/cmake/clang_format_check.cmake rename to cmake/clang_format/clang_format_check.cmake diff --git a/inference-engine/cmake/clang_format_fix.cmake b/cmake/clang_format/clang_format_fix.cmake similarity index 100% rename from inference-engine/cmake/clang_format_fix.cmake rename to cmake/clang_format/clang_format_fix.cmake diff --git a/inference-engine/cmake/cpplint.cmake b/cmake/cpplint/cpplint.cmake similarity index 80% rename from inference-engine/cmake/cpplint.cmake rename to cmake/cpplint/cpplint.cmake index 6c58d4aa532d4d..23e022d6a514ad 100644 --- a/inference-engine/cmake/cpplint.cmake +++ b/cmake/cpplint/cpplint.cmake @@ -68,17 +68,17 @@ function(add_cpplint_target TARGET_NAME) "${output_file}" COMMAND "${CMAKE_COMMAND}" - -D "CPPLINT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py" + -D "CPPLINT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint.py" -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" -D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}" -D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}" -D "CUSTOM_FILTER=${custom_filter}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake" + -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_run.cmake" DEPENDS "${source_file}" - "${IE_MAIN_SOURCE_DIR}/scripts/cpplint.py" - "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_run.cmake" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint.py" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_run.cmake" COMMENT "[cpplint] ${source_file}" VERBATIM) @@ -118,10 +118,10 @@ function(add_cpplint_report_target) "${CMAKE_COMMAND}" -D "FINAL_OUTPUT_FILE=${cpplint_output_file}" -D "OUTPUT_FILES=${CPPLINT_ALL_OUTPUT_FILES}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake" + -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_merge.cmake" DEPENDS ${CPPLINT_ALL_OUTPUT_FILES} - "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_merge.cmake" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_merge.cmake" COMMENT "[cpplint] Merge all output files" VERBATIM) @@ -133,19 +133,19 @@ function(add_cpplint_report_target) COMMAND "${CMAKE_COMMAND}" -D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}" - -D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py" + -D "CONVERT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py" -D "INPUT_FILE=${cpplint_output_file}" -D "OUTPUT_FILE=${cppcheck_output_file}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake" + -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_to_cppcheck_xml.cmake" DEPENDS "${cpplint_output_file}" - "${IE_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py" - "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_to_cppcheck_xml.cmake" + "${OpenVINO_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_to_cppcheck_xml.cmake" COMMENT "[cpplint] Convert to cppcheck XML format" VERBATIM) - set(report_dir "${IE_MAIN_SOURCE_DIR}/report/cpplint") + set(report_dir "${OpenVINO_MAIN_SOURCE_DIR}/report/cpplint") set(html_output_file "${report_dir}/index.html") add_custom_command( OUTPUT @@ -153,16 +153,16 @@ function(add_cpplint_report_target) COMMAND "${CMAKE_COMMAND}" -D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}" - -D "CONVERT_SCRIPT=${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py" + -D "CONVERT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py" -D "INPUT_FILE=${cppcheck_output_file}" -D "REPORT_DIR=${report_dir}" - -D "SOURCE_DIR=${IE_MAIN_SOURCE_DIR}" + -D "SOURCE_DIR=${OpenVINO_MAIN_SOURCE_DIR}" -D "TITLE=${CMAKE_PROJECT_NAME}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake" + -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_html.cmake" DEPENDS "${cppcheck_output_file}" - "${IE_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py" - "${IE_MAIN_SOURCE_DIR}/cmake/cpplint_html.cmake" + "${OpenVINO_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py" + "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_html.cmake" COMMENT "[cpplint] Generate HTML report" VERBATIM) diff --git a/inference-engine/scripts/cpplint.py b/cmake/cpplint/cpplint.py similarity index 100% rename from inference-engine/scripts/cpplint.py rename to cmake/cpplint/cpplint.py diff --git a/inference-engine/cmake/cpplint_html.cmake b/cmake/cpplint/cpplint_html.cmake similarity index 100% rename from inference-engine/cmake/cpplint_html.cmake rename to cmake/cpplint/cpplint_html.cmake diff --git a/inference-engine/cmake/cpplint_merge.cmake b/cmake/cpplint/cpplint_merge.cmake similarity index 100% rename from inference-engine/cmake/cpplint_merge.cmake rename to cmake/cpplint/cpplint_merge.cmake diff --git a/inference-engine/cmake/cpplint_run.cmake b/cmake/cpplint/cpplint_run.cmake similarity index 100% rename from inference-engine/cmake/cpplint_run.cmake rename to cmake/cpplint/cpplint_run.cmake diff --git a/inference-engine/cmake/cpplint_to_cppcheck_xml.cmake b/cmake/cpplint/cpplint_to_cppcheck_xml.cmake similarity index 100% rename from inference-engine/cmake/cpplint_to_cppcheck_xml.cmake rename to cmake/cpplint/cpplint_to_cppcheck_xml.cmake diff --git a/cmake/developer_package.cmake b/cmake/developer_package.cmake index cda7afd2940629..b9ea3e3d3b78fd 100644 --- a/cmake/developer_package.cmake +++ b/cmake/developer_package.cmake @@ -249,3 +249,8 @@ endfunction() set_ci_build_number() include(vs_version/vs_version) + +# Code style utils + +include(cpplint/cpplint) +include(clang_format/clang_format) diff --git a/cmake/features.cmake b/cmake/features.cmake index 3bc43005abd32d..a99de90445a92f 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -46,8 +46,7 @@ ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF) -# Documentation build -ie_option (ENABLE_DOCS "build docs using Doxygen" OFF) +ie_option (ENABLE_DOCS "Build docs using Doxygen" OFF) ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF) @@ -55,8 +54,15 @@ ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to # FIXME: Ah this moment setting this to OFF will only build ngraph a static library ie_option (BUILD_SHARED_LIBS "Build as a shared library" ON) +ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF) + +ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF) + +ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON) + ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \ In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \ Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF ALLOWED_VALUES ON OFF COLLECT) +set(LINKCHECKER_PY "" CACHE FILEPATH "Path to linkchecker.py for documentation check") diff --git a/cmake/os_flags.cmake b/cmake/os_flags.cmake index 0ed6e258298496..9803e7439fd271 100644 --- a/cmake/os_flags.cmake +++ b/cmake/os_flags.cmake @@ -253,10 +253,12 @@ if(WIN32) ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,1879,2586,2651,3180,11075,15335) endif() - # Debug information flags - - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /Z7") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Z7") + # Debug information flags, by default CMake adds /Zi option + # but provides no way to specify CMAKE_COMPILE_PDB_NAME on root level + # In order to avoid issues with ninja we are replacing default flag instead of having two of them + # and observing warning D9025 about flag override + string(REPLACE "/Zi" "/Z7" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}") + string(REPLACE "/Zi" "/Z7" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") else() # TODO: enable for C sources as well # ie_add_compiler_flags(-Werror) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 501564f6af2c14..5d14fd7e16bc5c 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -6,13 +6,17 @@ if(NOT ENABLE_DOCKER) add_subdirectory(snippets) # Detect nGraph - find_package(ngraph QUIET) + find_package(ngraph QUIET + PATHS "${CMAKE_BINARY_DIR}/ngraph" + NO_DEFAULT_PATH) if(NOT ngraph_FOUND) set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph) endif() # Detect InferenceEngine - find_package(InferenceEngine QUIET) + find_package(InferenceEngine QUIET + PATHS "${CMAKE_BINARY_DIR}" + NO_DEFAULT_PATH) if(NOT InferenceEngine_FOUND) set(InferenceEngine_DIR ${CMAKE_BINARY_DIR}) endif() @@ -29,6 +33,9 @@ if(NOT ENABLE_DOCKER) foreach(target_name IN LISTS all_docs_targets) if (TARGET ${target_name}) set_target_properties(${target_name} PROPERTIES FOLDER docs) + if(WIN32) + set_target_properties(${target_name} PROPERTIES COMPILE_PDB_NAME ${target_name}) + endif() endif() endforeach() endif() @@ -50,13 +57,16 @@ function(build_docs) message(FATAL_ERROR "LATEX is required to build the documentation") endif() - set(DOCS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}") + set(DOCS_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}") set(DOXYGEN_DIR "${OpenVINO_MAIN_SOURCE_DIR}/docs/doxygen") set(IE_SOURCE_DIR "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine") set(PYTHON_API_IN "${IE_SOURCE_DIR}/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx") - set(PYTHON_API_OUT "${DOCS_BINARY_DIR}/python_api/ie_api.pyx") + set(PYTHON_API_OUT "${DOCS_BUILD_DIR}/python_api/ie_api.pyx") set(C_API "${IE_SOURCE_DIR}/ie_bridges/c/include") - set(PLUGIN_API_DIR "${DOCS_BINARY_DIR}/IE_PLUGIN_DG") + set(PLUGIN_API_DIR "${DOCS_BUILD_DIR}/IE_PLUGIN_DG") + set(NGRAPH_DIR "${OpenVINO_MAIN_SOURCE_DIR}/ngraph") + set(NGRAPH_PY_DIR "${NGRAPH_DIR}/python/src/ngraph/") + set(NGRAPH_CPP_DIR "${NGRAPH_DIR}/core/include/" "${NGRAPH_DIR}/frontend/onnx_import/include") # Preprocessing scripts set(DOXY_MD_FILTER "${DOXYGEN_DIR}/doxy_md_filter.py") @@ -64,10 +74,10 @@ function(build_docs) file(GLOB_RECURSE doc_source_files LIST_DIRECTORIES true RELATIVE ${OpenVINO_MAIN_SOURCE_DIR} - "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.md" - "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.png" - "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.gif" - "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.jpg" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.md" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.png" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.gif" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.jpg" "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.md" "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.png" "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.gif" @@ -75,55 +85,81 @@ function(build_docs) configure_file(${PYTHON_API_IN} ${PYTHON_API_OUT} @ONLY) + set(NGRAPH_CPP_CONFIG_SOURCE "${DOXYGEN_DIR}/ngraph_cpp_api.config") + set(NGRAPH_PY_CONFIG_SOURCE "${DOXYGEN_DIR}/ngraph_py_api.config") set(IE_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_docs.config") set(C_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_c_api.config") set(PY_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_py_api.config") set(PLUGIN_CONFIG_SOURCE "${DOXYGEN_DIR}/ie_plugin_api.config") - set(IE_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_docs.config") - set(C_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_c_api.config") - set(PY_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_py_api.config") - set(PLUGIN_CONFIG_BINARY "${DOCS_BINARY_DIR}/ie_plugin_api.config") + set(NGRAPH_CPP_CONFIG_BUILD "${DOCS_BUILD_DIR}/ngraph_cpp_api.config") + set(NGRAPH_PY_CONFIG_BUILD "${DOCS_BUILD_DIR}/ngraph_py_api.config") + set(IE_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_docs.config") + set(C_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_c_api.config") + set(PY_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_py_api.config") + set(PLUGIN_CONFIG_BUILD "${DOCS_BUILD_DIR}/ie_plugin_api.config") + set(NGRAPH_CPP_LAYOUT_SOURCE "${DOXYGEN_DIR}/ngraph_cpp_api.xml") + set(NGRAPH_PY_LAYOUT_SOURCE "${DOXYGEN_DIR}/ngraph_py_api.xml") set(IE_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_docs.xml") set(C_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_c_api.xml") set(PY_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_py_api.xml") set(PLUGIN_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_plugin_api.xml") - set(IE_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_docs.xml") - set(C_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_c_api.xml") - set(PY_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_py_api.xml") - set(PLUGIN_LAYOUT_BINARY "${DOCS_BINARY_DIR}/ie_plugin_api.xml") + set(NGRAPH_CPP_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ngraph_cpp_api.xml") + set(NGRAPH_PY_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ngraph_py_api.xml") + set(IE_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_docs.xml") + set(C_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_c_api.xml") + set(PY_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_py_api.xml") + set(PLUGIN_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_plugin_api.xml") # Tables of contents - configure_file(${IE_LAYOUT_SOURCE} ${IE_LAYOUT_BINARY} @ONLY) - configure_file(${C_LAYOUT_SOURCE} ${C_LAYOUT_BINARY} @ONLY) - configure_file(${PY_LAYOUT_SOURCE} ${PY_LAYOUT_BINARY} @ONLY) - configure_file(${PLUGIN_LAYOUT_SOURCE} ${PLUGIN_LAYOUT_BINARY} @ONLY) + configure_file(${NGRAPH_CPP_LAYOUT_SOURCE} ${NGRAPH_CPP_LAYOUT_BUILD} @ONLY) + configure_file(${NGRAPH_PY_LAYOUT_SOURCE} ${NGRAPH_PY_LAYOUT_BUILD} @ONLY) + configure_file(${IE_LAYOUT_SOURCE} ${IE_LAYOUT_BUILD} @ONLY) + configure_file(${C_LAYOUT_SOURCE} ${C_LAYOUT_BUILD} @ONLY) + configure_file(${PY_LAYOUT_SOURCE} ${PY_LAYOUT_BUILD} @ONLY) + configure_file(${PLUGIN_LAYOUT_SOURCE} ${PLUGIN_LAYOUT_BUILD} @ONLY) # Doxygen config files - configure_file(${IE_CONFIG_SOURCE} ${IE_CONFIG_BINARY} @ONLY) - configure_file(${C_CONFIG_SOURCE} ${C_CONFIG_BINARY} @ONLY) - configure_file(${PY_CONFIG_SOURCE} ${PY_CONFIG_BINARY} @ONLY) - configure_file(${PLUGIN_CONFIG_SOURCE} ${PLUGIN_CONFIG_BINARY} @ONLY) + configure_file(${NGRAPH_CPP_CONFIG_SOURCE} ${NGRAPH_CPP_CONFIG_BUILD} @ONLY) + configure_file(${NGRAPH_PY_CONFIG_SOURCE} ${NGRAPH_PY_CONFIG_BUILD} @ONLY) + configure_file(${IE_CONFIG_SOURCE} ${IE_CONFIG_BUILD} @ONLY) + configure_file(${C_CONFIG_SOURCE} ${C_CONFIG_BUILD} @ONLY) + configure_file(${PY_CONFIG_SOURCE} ${PY_CONFIG_BUILD} @ONLY) + configure_file(${PLUGIN_CONFIG_SOURCE} ${PLUGIN_CONFIG_BUILD} @ONLY) # Preprocessing scripts set(DOXY_MD_FILTER "${DOXYGEN_DIR}/doxy_md_filter.py") set(PYX_FILTER "${DOXYGEN_DIR}/pyx_filter.py") + # nGraph C++ API + + add_custom_target(ngraph_cpp_api + COMMAND ${DOXYGEN_EXECUTABLE} ${NGRAPH_CPP_CONFIG_BUILD} + WORKING_DIRECTORY ${DOCS_BUILD_DIR} + VERBATIM) + + # nGraph Python API + + add_custom_target(ngraph_py_api + COMMAND ${DOXYGEN_EXECUTABLE} ${NGRAPH_PY_CONFIG_BUILD} + WORKING_DIRECTORY ${DOCS_BUILD_DIR} + VERBATIM) + # C API add_custom_target(c_api - COMMAND ${DOXYGEN_EXECUTABLE} ${C_CONFIG_BINARY} - WORKING_DIRECTORY ${DOCS_BINARY_DIR} + COMMAND ${DOXYGEN_EXECUTABLE} ${C_CONFIG_BUILD} + WORKING_DIRECTORY ${DOCS_BUILD_DIR} COMMENT "Generating C API Reference" VERBATIM) # Python API add_custom_target(py_api - COMMAND ${DOXYGEN_EXECUTABLE} ${PY_CONFIG_BINARY} - WORKING_DIRECTORY ${DOCS_BINARY_DIR} + COMMAND ${DOXYGEN_EXECUTABLE} ${PY_CONFIG_BUILD} + WORKING_DIRECTORY ${DOCS_BUILD_DIR} COMMENT "Generating Python API Reference" VERBATIM) @@ -132,14 +168,6 @@ function(build_docs) COMMAND ${Python3_EXECUTABLE} ${PYX_FILTER} ${PYTHON_API_OUT} COMMENT "Pre-process Python API") - # Plugin API - - add_custom_target(plugin_api - COMMAND ${DOXYGEN_EXECUTABLE} ${PLUGIN_CONFIG_BINARY} - WORKING_DIRECTORY ${DOCS_BINARY_DIR} - COMMENT "Generating Plugin API Reference" - VERBATIM) - # Preprocess docs add_custom_target(preprocess_docs @@ -148,33 +176,56 @@ function(build_docs) foreach(source_file ${doc_source_files}) list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy - "${OpenVINO_MAIN_SOURCE_DIR}/${source_file}" "${DOCS_BINARY_DIR}/${source_file}") + "${OpenVINO_MAIN_SOURCE_DIR}/${source_file}" "${DOCS_BUILD_DIR}/${source_file}") endforeach() add_custom_command(TARGET preprocess_docs PRE_BUILD ${commands} - COMMAND ${Python3_EXECUTABLE} ${DOXY_MD_FILTER} ${DOCS_BINARY_DIR} + COMMAND ${Python3_EXECUTABLE} ${DOXY_MD_FILTER} ${DOCS_BUILD_DIR} COMMENT "Pre-process markdown and image links") # IE dev guide and C++ API add_custom_target(ie_docs - DEPENDS preprocess_docs - COMMAND ${DOXYGEN_EXECUTABLE} ${IE_CONFIG_BINARY} - WORKING_DIRECTORY ${DOCS_BINARY_DIR} + DEPENDS ngraph_cpp_api preprocess_docs + COMMAND ${DOXYGEN_EXECUTABLE} ${IE_CONFIG_BUILD} + WORKING_DIRECTORY ${DOCS_BUILD_DIR} + VERBATIM) + + # Plugin API + + add_custom_target(plugin_api + DEPENDS ngraph_cpp_api ie_docs + COMMAND ${DOXYGEN_EXECUTABLE} ${PLUGIN_CONFIG_BUILD} + WORKING_DIRECTORY ${DOCS_BUILD_DIR} + COMMENT "Generating Plugin API Reference" VERBATIM) # Umbrella OpenVINO target add_custom_target(openvino_docs - DEPENDS c_api py_api ie_docs plugin_api + DEPENDS ngraph_cpp_api ngraph_py_api c_api py_api ie_docs plugin_api COMMENT "Generating OpenVINO documentation" VERBATIM) set_target_properties(openvino_docs ie_docs c_api py_api preprocess_docs plugin_api + ngraph_py_api ngraph_cpp_api PROPERTIES FOLDER docs) + # added linkcheker + + if(EXISTS "${LINKCHECKER_PY}") + add_custom_target(docs_check + COMMAND ${Python3_EXECUTABLE} "${LINKCHECKER_PY}" + "${DOCS_BUILD_DIR}/html/" -f "${DOXYGEN_DIR}/linkchecker_filter.yaml" + --no_recursive -l "${DOCS_BUILD_DIR}" + COMMENT "Check links in generated documentation" + WORKING_DIRECTORY "${DOCS_BUILD_DIR}" + VERBATIM) + set_target_properties(docs_check PROPERTIES FOLDER docs) + endif() + find_program(browser NAMES xdg-open) if(browser) add_custom_target(ie_docs_open diff --git a/docs/IE_DG/API_Changes.md b/docs/IE_DG/API_Changes.md index cd3311b6a22fc7..41681e58d8a3ad 100644 --- a/docs/IE_DG/API_Changes.md +++ b/docs/IE_DG/API_Changes.md @@ -8,16 +8,22 @@ The sections below contain detailed list of changes made to the Inference Engine **State API** - * InferRequest::QueryState query state value of network on current infer request - * IVariableState class instead of IMemoryState (rename) - * IVariableState::GetState instead of IMemoryState::GetLastState (rename) + * InferenceEngine::InferRequest::QueryState query state value of network on current infer request + * InferenceEngine::IVariableState class instead of IMemoryState (rename) + * InferenceEngine::IVariableState::GetState instead of IMemoryState::GetLastState (rename) + + **BatchedBlob** - represents a InferenceEngine::BatchedBlob containing other blobs - one per batch. + + **Transformations API** - added a new header `ie_transformations.hpp` which contains transformations for InferenceEngine::CNNNetwork object. Such transformations can be called prior to loading network for compilation for particular device: + + * InferenceEngine::LowLatency ### Deprecated API **State API** - * ExecutableNetwork::QueryState - use InferRequest::QueryState - * IVariableState::GetLastState - use IVariableState::GetState + * InferenceEngine::ExecutableNetwork::QueryState - use InferenceEngine::InferRequest::QueryState + * InferenceEngine::IVariableState::GetLastState - use InferenceEngine::IVariableState::GetState ## 2021.1 diff --git a/docs/IE_DG/Bfloat16Inference.md b/docs/IE_DG/Bfloat16Inference.md index 8e2028ea773a9d..e814a8948c44bb 100644 --- a/docs/IE_DG/Bfloat16Inference.md +++ b/docs/IE_DG/Bfloat16Inference.md @@ -20,7 +20,7 @@ There are two ways to check if CPU device can support bfloat16 computations for 1. Query the instruction set via system `lscpu | grep avx512_bf16` or `cat /proc/cpuinfo | grep avx512_bf16`. 2. Use [Query API](InferenceEngine_QueryAPI.md) with `METRIC_KEY(OPTIMIZATION_CAPABILITIES)`, which should return `BF16` in the list of CPU optimization options: -@snippet openvino/docs/snippets/Bfloat16Inference0.cpp part0 +@snippet snippets/Bfloat16Inference0.cpp part0 Current Inference Engine solution for bfloat16 inference uses Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the following layers in BF16 computation mode: * Convolution @@ -46,11 +46,11 @@ Bfloat16 data usage provides the following benefits that increase performance: For default optimization on CPU, source model converts from FP32 or FP16 to BF16 and executes internally on platforms with native BF16 support. In that case, `KEY_ENFORCE_BF16` is set to `YES`. The code below demonstrates how to check if the key is set: -@snippet openvino/docs/snippets/Bfloat16Inference1.cpp part1 +@snippet snippets/Bfloat16Inference1.cpp part1 To disable BF16 internal transformations, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers AS IS without modifications with precisions that were set on each layer edge. -@snippet openvino/docs/snippets/Bfloat16Inference2.cpp part2 +@snippet snippets/Bfloat16Inference2.cpp part2 An exception with message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support. diff --git a/docs/IE_DG/DynamicBatching.md b/docs/IE_DG/DynamicBatching.md index 3f4df0ce843300..a05c218b6193e3 100644 --- a/docs/IE_DG/DynamicBatching.md +++ b/docs/IE_DG/DynamicBatching.md @@ -18,7 +18,7 @@ The batch size that was set in passed CNNNetwork object will be use Here is a code example: -@snippet openvino/docs/snippets/DynamicBatching.cpp part0 +@snippet snippets/DynamicBatching.cpp part0 ## Limitations diff --git a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md index 383ce0ec9f9cca..42eda8f83c0fa4 100644 --- a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md +++ b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md @@ -20,7 +20,7 @@ To add your custom nGraph operation, create a new class that extends `ngraph::Op Based on that, declaration of a operation class can look as follows: -@snippet op.hpp op:header +@snippet template_extension/op.hpp op:header ### Class Fields @@ -33,37 +33,37 @@ The provided implementation has several fields: nGraph operation contains two constructors: a default constructor, which allows to create operation without attributes and a constructor that creates and validates operation with specified inputs and attributes. -@snippet op.cpp op:ctor +@snippet template_extension/op.cpp op:ctor ### `validate_and_infer_types()` `ngraph::Node::validate_and_infer_types` method validates operation attributes and calculates output shapes using attributes of operation. -@snippet op.cpp op:validate +@snippet template_extension/op.cpp op:validate ### `clone_with_new_inputs()` `ngraph::Node::clone_with_new_inputs` method creates a copy of nGraph operation with new inputs. -@snippet op.cpp op:copy +@snippet template_extension/op.cpp op:copy ### `visit_attributes()` `ngraph::Node::visit_attributes` method allows to visit all operation attributes. -@snippet op.cpp op:visit_attributes +@snippet template_extension/op.cpp op:visit_attributes ### `evaluate()` `ngraph::Node::evaluate` method allows to apply constant folding to an operation. -@snippet op.cpp op:evaluate +@snippet template_extension/op.cpp op:evaluate ## Register Custom Operations in Extension Class To add custom operations to the [Extension](Extension.md) class, create an operation set with custom operations and implement the `InferenceEngine::IExtension::getOpSets` method: -@snippet extension.cpp extension:getOpSets +@snippet template_extension/extension.cpp extension:getOpSets This method returns a map of opsets that exist in the extension library. diff --git a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md index d04e47858d1ea3..205ae64a6e1825 100644 --- a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md @@ -7,7 +7,7 @@ The primary vehicle for the performance of the CPU codepath in the Inference Eng All custom kernels for the CPU plugin should be inherited from the InferenceEngine::ILayerExecImpl interface. Based on that, declaration of a kernel implementation class can look as follows: -@snippet cpu_kernel.hpp cpu_implementation:header +@snippet template_extension/cpu_kernel.hpp cpu_implementation:header ### Class Fields @@ -22,25 +22,25 @@ The provided implementation has several fields: An implementation constructor checks parameters of nGraph operation, stores needed attributes, and stores an error message in the case of an error. -@snippet cpu_kernel.cpp cpu_implementation:ctor +@snippet template_extension/cpu_kernel.cpp cpu_implementation:ctor ### `getSupportedConfigurations` InferenceEngine::ILayerExecImpl::getSupportedConfigurations method returns all supported configuration formats (input/output tensor layouts) for your implementation. To specify formats of data, use InferenceEngine::TensorDesc. Refer to the [Memory Primitives](../Memory_primitives.md) section for instructions on how to do it. -@snippet cpu_kernel.cpp cpu_implementation:getSupportedConfigurations +@snippet template_extension/cpu_kernel.cpp cpu_implementation:getSupportedConfigurations ### `init` InferenceEngine::ILayerExecImpl::init method gets a runtime-selected configuration from a vector that is populated from the `getSupportedConfigurations` method and checks the parameters: -@snippet cpu_kernel.cpp cpu_implementation:init +@snippet template_extension/cpu_kernel.cpp cpu_implementation:init ### `execute` InferenceEngine::ILayerExecImpl::execute method accepts and processes the actual tenors as input/output blobs: -@snippet cpu_kernel.cpp cpu_implementation:execute +@snippet template_extension/cpu_kernel.cpp cpu_implementation:execute ## Register Implementation in `Extension` Class @@ -52,18 +52,18 @@ To register custom kernel implementation in the [Extension](Extension.md) class, InferenceEngine::IExtension::getImplTypes returns a vector of implementation types for an operation. -@snippet extension.cpp extension:getImplTypes +@snippet template_extension/extension.cpp extension:getImplTypes ### getImplementation InferenceEngine::IExtension::getImplementation returns the kernel implementation with a specified type for an operation. -@snippet extension.cpp extension:getImplementation +@snippet template_extension/extension.cpp extension:getImplementation ## Load Extension with Executable Kernels to Plugin Use the `AddExtension` method of the general plugin interface to load your primitives: -@snippet openvino/docs/snippets/CPU_Kernel.cpp part0 +@snippet snippets/CPU_Kernel.cpp part0 diff --git a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md index 47d80ba8ca921a..0999679ae0caa2 100644 --- a/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md +++ b/docs/IE_DG/Extensibility_DG/Custom_ONNX_Ops.md @@ -38,12 +38,12 @@ If operator is no longer needed, it can be unregistered by calling `unregister_o The same principles apply when registering custom ONNX operator based on custom nGraph operations. This example shows how to register custom ONNX operator based on `Operation` presented in [this tutorial](AddingNGraphOps.md), which is used in [TemplateExtension](Extension.md). -@snippet extension.cpp extension:ctor +@snippet template_extension/extension.cpp extension:ctor Here, the `register_operator` function is called in Extension's constructor, which makes sure that it is called before InferenceEngine::Core::ReadNetwork (since InferenceEngine::Core::AddExtension must be called before a model with custom operator is read). The example below demonstrates how to unregister operator from Extension's destructor: -@snippet extension.cpp extension:dtor +@snippet template_extension/extension.cpp extension:dtor Note that it is mandatory to unregister custom ONNX operator if it is defined in dynamic shared library. ## Requirements for building with CMake diff --git a/docs/IE_DG/Extensibility_DG/Extension.md b/docs/IE_DG/Extensibility_DG/Extension.md index 3bc96f90376ce8..6df3a1424ec0e4 100644 --- a/docs/IE_DG/Extensibility_DG/Extension.md +++ b/docs/IE_DG/Extensibility_DG/Extension.md @@ -5,11 +5,11 @@ All extension libraries should be inherited from this interface. Based on that, declaration of an extension class can look as follows: -@snippet extension.hpp extension:header +@snippet template_extension/extension.hpp extension:header The extension library should contain and export the method InferenceEngine::CreateExtension, which creates an `Extension` class: -@snippet extension.cpp extension:CreateExtension +@snippet template_extension/extension.cpp extension:CreateExtension Also, an `Extension` object should implement the following methods: @@ -17,7 +17,7 @@ Also, an `Extension` object should implement the following methods: * InferenceEngine::IExtension::GetVersion returns information about version of the library -@snippet extension.cpp extension:GetVersion +@snippet template_extension/extension.cpp extension:GetVersion Implement the InferenceEngine::IExtension::getOpSets method if the extension contains custom layers. Read the [guide about custom operations](AddingNGraphOps.md) for more information. diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md index 7a0d794457c890..a918076e756112 100644 --- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md @@ -7,7 +7,7 @@ There are two options of using custom layer configuration file: * Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder * Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom layers to the plugin: -@snippet openvino/docs/snippets/GPU_Kernel.cpp part0 +@snippet snippets/GPU_Kernel.cpp part0 All Inference Engine samples, except trivial `hello_classification`, feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom layers for the classification sample, run the command below: @@ -227,7 +227,7 @@ floating-point, and integer kernel parameters. To get the dump, add the following line to your code that configures the GPU plugin to output the custom kernels: -@snippet openvino/docs/snippets/GPU_Kernel.cpp part1 +@snippet snippets/GPU_Kernel.cpp part1 When the Inference Engine compiles the kernels for the specific network, it also outputs the resulting code for the custom kernels. In the diff --git a/docs/IE_DG/GPU_Kernels_Tuning.md b/docs/IE_DG/GPU_Kernels_Tuning.md index 47ed958bf24063..4bbe315e42c2f3 100644 --- a/docs/IE_DG/GPU_Kernels_Tuning.md +++ b/docs/IE_DG/GPU_Kernels_Tuning.md @@ -30,7 +30,7 @@ File with tuned data is the result of this step. The example below shows how to set and use the key files: -@snippet openvino/docs/snippets/GPU_Kernels_Tuning.cpp part0 +@snippet snippets/GPU_Kernels_Tuning.cpp part0 --- diff --git a/docs/IE_DG/Glossary.md b/docs/IE_DG/Glossary.md index 047d4484a6682b..5a05757977a6eb 100644 --- a/docs/IE_DG/Glossary.md +++ b/docs/IE_DG/Glossary.md @@ -72,7 +72,7 @@ Glossary of terms used in the Inference Engine | InferenceEngineProfileInfo | Represents basic inference profiling information per layer | | Inference Engine | A C++ library with a set of classes that you can use in your application to infer input data (images) and get the result | | Inference Engine API | The basic default API for all supported devices, which allows you to load a model from Intermediate Representation, set input and output formats and execute the model on various devices | -| Inference Engine Core | Inference Engine Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, MYRIAD, GNA, etc. | +| Inference Engine Core | Inference Engine Core is a software component that manages inference on certain Intel(R) hardware devices: CPU, GPU, MYRIAD, GNA, etc. | | Layer catalog or Operations specification | A list of supported layers or operations and its parameters. Sets of supported layers are different for different plugins, please check the documentation on plugins to verify if the Inference Engine supports certain layer on the dedicated hardware | | Layout | Image data layout refers to the representation of images batch. Layout shows a sequence of 4D or 5D tensor data in memory. A typical NCHW format represents pixel in horizontal direction, rows by vertical dimension, planes by channel and images into batch | | OutputsDataMap | Structure which contains information about output precisions and layouts | diff --git a/docs/IE_DG/InferenceEngine_QueryAPI.md b/docs/IE_DG/InferenceEngine_QueryAPI.md index 9ee5beaa479494..788c2d580324a9 100644 --- a/docs/IE_DG/InferenceEngine_QueryAPI.md +++ b/docs/IE_DG/InferenceEngine_QueryAPI.md @@ -23,7 +23,7 @@ The `InferenceEngine::ExecutableNetwork` class is also extended to support the Q ### GetAvailableDevices -@snippet openvino/docs/snippets/InferenceEngine_QueryAPI0.cpp part0 +@snippet snippets/InferenceEngine_QueryAPI0.cpp part0 The function returns list of available devices, for example: ``` @@ -46,7 +46,7 @@ Each device name can then be passed to: The code below demonstrates how to understand whether `HETERO` device dumps `.dot` files with split graphs during the split stage: -@snippet openvino/docs/snippets/InferenceEngine_QueryAPI1.cpp part1 +@snippet snippets/InferenceEngine_QueryAPI1.cpp part1 For documentation about common configuration keys, refer to `ie_plugin_config.hpp`. Device specific configuration keys can be found in corresponding plugin folders. @@ -54,7 +54,7 @@ For documentation about common configuration keys, refer to `ie_plugin_config.hp * To extract device properties such as available device, device name, supported configuration keys, and others, use the `InferenceEngine::Core::GetMetric` method: -@snippet openvino/docs/snippets/InferenceEngine_QueryAPI2.cpp part2 +@snippet snippets/InferenceEngine_QueryAPI2.cpp part2 A returned value looks as follows: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz`. @@ -66,17 +66,17 @@ A returned value looks as follows: `Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz`. The method is used to get executable network specific metric such as `METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)`: -@snippet openvino/docs/snippets/InferenceEngine_QueryAPI3.cpp part3 +@snippet snippets/InferenceEngine_QueryAPI3.cpp part3 Or the current temperature of `MYRIAD` device: -@snippet openvino/docs/snippets/InferenceEngine_QueryAPI4.cpp part4 +@snippet snippets/InferenceEngine_QueryAPI4.cpp part4 ### GetConfig() The method is used to get information about configuration values the executable network has been created with: -@snippet openvino/docs/snippets/InferenceEngine_QueryAPI5.cpp part5 +@snippet snippets/InferenceEngine_QueryAPI5.cpp part5 ### SetConfig() diff --git a/docs/IE_DG/Integrate_with_customer_application_new_API.md b/docs/IE_DG/Integrate_with_customer_application_new_API.md index e9909848f6533a..108c7cd06f3404 100644 --- a/docs/IE_DG/Integrate_with_customer_application_new_API.md +++ b/docs/IE_DG/Integrate_with_customer_application_new_API.md @@ -29,20 +29,20 @@ Integration process includes the following steps: 1) **Create Inference Engine Core** to manage available devices and read network objects: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part0 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part0 2) **Read a model IR** created by the Model Optimizer (.xml is supported format): -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part1 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part1 **Or read the model from ONNX format** (.onnx and .prototxt are supported formats). You can find more information about the ONNX format support in the document [ONNX format support in the OpenVINO™](./ONNX_Support.md). -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part2 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part2 3) **Configure input and output**. Request input and output information using `InferenceEngine::CNNNetwork::getInputsInfo()`, and `InferenceEngine::CNNNetwork::getOutputsInfo()` methods: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part3 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part3 Optionally, set the number format (precision) and memory layout for inputs and outputs. Refer to the [Supported configurations](supported_plugins/Supported_Devices.md) chapter to choose the relevant configuration. @@ -67,7 +67,7 @@ methods: You can use the following code snippet to configure input and output: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part4 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part4 > **NOTE**: NV12 input color format pre-processing differs from other color conversions. In case of NV12, > Inference Engine expects two separate image planes (Y and UV). You must use a specific @@ -91,31 +91,31 @@ methods: 4) **Load the model** to the device using `InferenceEngine::Core::LoadNetwork()`: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part5 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part5 It creates an executable network from a network object. The executable network is associated with single hardware device. It is possible to create as many networks as needed and to use them simultaneously (up to the limitation of the hardware resources). Third parameter is a configuration for plugin. It is map of pairs: (parameter name, parameter value). Choose device from [Supported devices](supported_plugins/Supported_Devices.md) page for more details about supported configuration parameters. -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part6 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part6 5) **Create an infer request**: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part7 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part7 6) **Prepare input**. You can use one of the following options to prepare input: * **Optimal way for a single network.** Get blobs allocated by an infer request using `InferenceEngine::InferRequest::GetBlob()` and feed an image and the input data to the blobs. In this case, input data must be aligned (resized manually) with a given blob size and have a correct color format. -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part8 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part8 * **Optimal way for a cascade of networks (output of one network is input for another).** Get output blob from the first request using `InferenceEngine::InferRequest::GetBlob()` and set it as input for the second request using `InferenceEngine::InferRequest::SetBlob()`. -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part9 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part9 * **Optimal way to handle ROI (a ROI object located inside of input of one network is input for another).** It is possible to re-use shared input by several networks. You do not need to allocate separate input blob for a network if @@ -126,7 +126,7 @@ methods: ROI without allocation of new memory using `InferenceEngine::make_shared_blob()` with passing of `InferenceEngine::Blob::Ptr` and `InferenceEngine::ROI` as parameters. -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part10 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part10 Make sure that shared input is kept valid during execution of each network. Otherwise, ROI blob may be corrupted if the original input blob (that ROI is cropped from) has already been rewritten. @@ -134,7 +134,7 @@ methods: * Allocate input blobs of the appropriate types and sizes, feed an image and the input data to the blobs, and call `InferenceEngine::InferRequest::SetBlob()` to set these blobs for an infer request: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part11 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part11 A blob can be filled before and after `SetBlob()`. @@ -157,11 +157,11 @@ methods: 7) **Do inference** by calling the `InferenceEngine::InferRequest::StartAsync` and `InferenceEngine::InferRequest::Wait` methods for asynchronous request: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part12 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part12 or by calling the `InferenceEngine::InferRequest::Infer` method for synchronous request: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part13 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part13 `StartAsync` returns immediately and starts inference without blocking main thread, `Infer` blocks main thread and returns when inference is completed. @@ -185,7 +185,7 @@ exception. Note that casting `Blob` to `TBlob` via `std::dynamic_pointer_cast` is not recommended way, better to access data via `buffer()` and `as()` methods as follows: -@snippet openvino/docs/snippets/Integrate_with_customer_application_new_API.cpp part14 +@snippet snippets/Integrate_with_customer_application_new_API.cpp part14 ## Build Your Application diff --git a/docs/IE_DG/Migration_CoreAPI.md b/docs/IE_DG/Migration_CoreAPI.md index 5edac6052633f8..d49bd425bc87c6 100644 --- a/docs/IE_DG/Migration_CoreAPI.md +++ b/docs/IE_DG/Migration_CoreAPI.md @@ -27,44 +27,44 @@ Common migration process includes the following steps: 1. Migrate from the `InferenceEngine::InferencePlugin` initialization: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part0 +@snippet snippets/Migration_CoreAPI.cpp part0 to the `InferenceEngine::Core` class initialization: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part1 +@snippet snippets/Migration_CoreAPI.cpp part1 2. Instead of using `InferenceEngine::CNNNetReader` to read IR: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part2 +@snippet snippets/Migration_CoreAPI.cpp part2 read networks using the Core class: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part3 +@snippet snippets/Migration_CoreAPI.cpp part3 The Core class also allows reading models from the ONNX format (more information is [here](./ONNX_Support.md)): -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part4 +@snippet snippets/Migration_CoreAPI.cpp part4 3. Instead of adding CPU device extensions to the plugin: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part5 +@snippet snippets/Migration_CoreAPI.cpp part5 add extensions to CPU device using the Core class: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part6 +@snippet snippets/Migration_CoreAPI.cpp part6 4. Instead of setting configuration keys to a particular plugin, set (key, value) pairs via `InferenceEngine::Core::SetConfig` -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part7 +@snippet snippets/Migration_CoreAPI.cpp part7 > **NOTE**: If `deviceName` is omitted as the last argument, configuration is set for all Inference Engine devices. 5. Migrate from loading the network to a particular plugin: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part8 +@snippet snippets/Migration_CoreAPI.cpp part8 to `InferenceEngine::Core::LoadNetwork` to a particular device: -@snippet openvino/docs/snippets/Migration_CoreAPI.cpp part9 +@snippet snippets/Migration_CoreAPI.cpp part9 After you have an instance of `InferenceEngine::ExecutableNetwork`, all other steps are as usual. diff --git a/docs/IE_DG/OnnxImporterTutorial.md b/docs/IE_DG/OnnxImporterTutorial.md index 5f63e1754f0f71..f4538633a7e805 100644 --- a/docs/IE_DG/OnnxImporterTutorial.md +++ b/docs/IE_DG/OnnxImporterTutorial.md @@ -18,7 +18,7 @@ Two categories of API functions: To list all supported ONNX ops in a specific version and domain, use the `get_supported_operators` as shown in the example below: -@snippet openvino/docs/snippets/OnnxImporterTutorial0.cpp part0 +@snippet snippets/OnnxImporterTutorial0.cpp part0 The above code produces a list of all the supported operators for the `version` and `domain` you specified and outputs a list similar to this: ```cpp @@ -30,7 +30,7 @@ Xor To determine whether a specific ONNX operator in a particular version and domain is supported by the importer, use the `is_operator_supported` function as shown in the example below: -@snippet openvino/docs/snippets/OnnxImporterTutorial1.cpp part1 +@snippet snippets/OnnxImporterTutorial1.cpp part1 ## Import ONNX Model @@ -55,13 +55,13 @@ As it was shown in [Build a Model with nGraph Library](../nGraph_DG/build_functi The code below shows how to convert the ONNX ResNet50 model to the nGraph function using `import_onnx_model` with the stream as an input: -@snippet openvino/docs/snippets/OnnxImporterTutorial2.cpp part2 +@snippet snippets/OnnxImporterTutorial2.cpp part2 ### Filepath as Input The code below shows how to convert the ONNX ResNet50 model to the nGraph function using `import_onnx_model` with the filepath as an input: -@snippet openvino/docs/snippets/OnnxImporterTutorial3.cpp part3 +@snippet snippets/OnnxImporterTutorial3.cpp part3 [onnx_header]: https://github.com/NervanaSystems/ngraph/blob/master/src/ngraph/frontend/onnx_import/onnx.hpp [onnx_model_zoo]: https://github.com/onnx/models diff --git a/docs/IE_DG/ShapeInference.md b/docs/IE_DG/ShapeInference.md index f684b4a38cc1b5..a7cdddb784d676 100644 --- a/docs/IE_DG/ShapeInference.md +++ b/docs/IE_DG/ShapeInference.md @@ -94,7 +94,7 @@ The algorithm for resizing network is the following: Here is a code example: -@snippet openvino/docs/snippets/ShapeInference.cpp part0 +@snippet snippets/ShapeInference.cpp part0 Shape Inference feature is used in [Smart classroom sample](@ref omz_demos_smart_classroom_demo_README). diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md index dbcc3244a13a5f..41e1b1dd1b08e8 100644 --- a/docs/IE_DG/inference_engine_intro.md +++ b/docs/IE_DG/inference_engine_intro.md @@ -16,17 +16,17 @@ For complete API Reference, see the [Inference Engine API References](./api_refe Inference Engine uses a plugin architecture. Inference Engine plugin is a software component that contains complete implementation for inference on a certain Intel® hardware device: CPU, GPU, VPU, etc. Each plugin implements the unified API and provides additional hardware-specific APIs. Modules in the Inference Engine component ---------------------------------------- +----------------------------------------- ### Core Inference Engine Libraries ### Your application must link to the core Inference Engine libraries: * Linux* OS: - - `libinference_engine.so`, which depends on `libinference_engine_transformations.so` and `libngraph.so` - - `libinference_engine_legacy.so`, which depends on `libtbb.so` + - `libinference_engine.so`, which depends on `libinference_engine_transformations.so`, `libtbb.so`, `libtbbmalloc.so` and `libngraph.so` * Windows* OS: - - `inference_engine.dll`, which depends on `inference_engine_transformations.dll` and `ngraph.dll` - - `inference_engine_legacy.dll`, which depends on `tbb.dll` + - `inference_engine.dll`, which depends on `inference_engine_transformations.dll`, `tbb.dll`, `tbbmalloc.dll` and `ngraph.dll` +* macOS*: + - `libinference_engine.dylib`, which depends on `libinference_engine_transformations.dylib`, `libtbb.dylib`, `libtbbmalloc.dylib` and `libngraph.dylib` The required C++ header files are located in the `include` directory. @@ -49,26 +49,26 @@ Starting from 2020.4 release, Inference Engine introduced a concept of `CNNNetwo For each supported target device, Inference Engine provides a plugin — a DLL/shared library that contains complete implementation for inference on this particular device. The following plugins are available: -| Plugin | Device Type | -| ------------- | ------------- | -|CPU| Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE | -|GPU| Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics -|MYRIAD| Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X| -|GNA| Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor -|HETERO|Automatic splitting of a network inference between several devices (for example if a device doesn't support certain layers| -|MULTI| Simultaneous inference of the same network on several devices in parallel| - -The table below shows the plugin libraries and additional dependencies for Linux and Windows platforms. - -| Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows | -|--------|------------------------|-------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------| -| CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` | -| GPU | `libclDNNPlugin.so` | `libinference_engine_lp_transformations.so`, `libOpenCL.so` | `clDNNPlugin.dll` | `OpenCL.dll`, `inference_engine_lp_transformations.dll` | -| MYRIAD | `libmyriadPlugin.so` | `libusb.so`, `libinference_engine_lp_transformations.so` | `myriadPlugin.dll` | `usb.dll`, `inference_engine_lp_transformations.dll` | -| HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so`, `libinference_engine_lp_transformations.so`| `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll`, `inference_engine_lp_transformations.dll` | -| GNA | `libGNAPlugin.so` | `libgna.so`, `libinference_engine_lp_transformations.so` | `GNAPlugin.dll` | `gna.dll`, `inference_engine_lp_transformations.dll` | -| HETERO | `libHeteroPlugin.so` | Same as for selected plugins | `HeteroPlugin.dll` | Same as for selected plugins | -| MULTI | `libMultiDevicePlugin.so` | Same as for selected plugins | `MultiDevicePlugin.dll` | Same as for selected plugins | +| Plugin | Device Type | +| ------- | ----------------------------- | +|CPU | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Core™ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE | +|GPU | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics | +|MYRIAD | Intel® Neural Compute Stick 2 powered by the Intel® Movidius™ Myriad™ X | +|GNA | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver J5005 Processor, Intel® Pentium® Silver N5000 Processor, Intel® Celeron® J4005 Processor, Intel® Celeron® J4105 Processor, Intel® Celeron® Processor N4100, Intel® Celeron® Processor N4000, Intel® Core™ i3-8121U Processor, Intel® Core™ i7-1065G7 Processor, Intel® Core™ i7-1060G7 Processor, Intel® Core™ i5-1035G4 Processor, Intel® Core™ i5-1035G7 Processor, Intel® Core™ i5-1035G1 Processor, Intel® Core™ i5-1030G7 Processor, Intel® Core™ i5-1030G4 Processor, Intel® Core™ i3-1005G1 Processor, Intel® Core™ i3-1000G1 Processor, Intel® Core™ i3-1000G4 Processor | +|HETERO | Automatic splitting of a network inference between several devices (for example if a device doesn't support certain layers| +|MULTI | Simultaneous inference of the same network on several devices in parallel| + +The table below shows the plugin libraries and additional dependencies for Linux, Windows and macOS platforms. + +| Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows | Library name for macOS | Dependency libraries for macOS | +|--------|-----------------------------|-------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------|------------------------------|---------------------------------------------| +| CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` | `libMKLDNNPlugin.dylib` | `inference_engine_lp_transformations.dylib` | +| GPU | `libclDNNPlugin.so` | `libinference_engine_lp_transformations.so`, `libOpenCL.so` | `clDNNPlugin.dll` | `OpenCL.dll`, `inference_engine_lp_transformations.dll` | Is not supported | - | +| MYRIAD | `libmyriadPlugin.so` | `libusb.so`, | `myriadPlugin.dll` | `usb.dll` | `libmyriadPlugin.dylib` | `libusb.dylib` | +| HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so` | `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll` | Is not supported | - | +| GNA | `libGNAPlugin.so` | `libgna.so`, | `GNAPlugin.dll` | `gna.dll` | Is not supported | - | +| HETERO | `libHeteroPlugin.so` | Same as for selected plugins | `HeteroPlugin.dll` | Same as for selected plugins | `libHeteroPlugin.dylib` | Same as for selected plugins | +| MULTI | `libMultiDevicePlugin.so` | Same as for selected plugins | `MultiDevicePlugin.dll` | Same as for selected plugins | `libMultiDevicePlugin.dylib` | Same as for selected plugins | > **NOTE**: All plugin libraries also depend on core Inference Engine libraries. @@ -76,15 +76,16 @@ Make sure those libraries are in your computer's path or in the place you pointe * Linux: `LD_LIBRARY_PATH` * Windows: `PATH` +* macOS: `DYLD_LIBRARY_PATH` -On Linux, use the script `bin/setupvars.sh` to set the environment variables. +On Linux and macOS, use the script `bin/setupvars.sh` to set the environment variables. On Windows, run the `bin\setupvars.bat` batch file to set the environment variables. To learn more about supported devices and corresponding plugins, see the [Supported Devices](supported_plugins/Supported_Devices.md) chapter. Common Workflow for Using the Inference Engine API ---------------------------- +-------------------------------------------------- The common workflow contains the following steps: 1. **Create Inference Engine Core object** - Create an `InferenceEngine::Core` object to work with different devices, all device plugins are managed internally by the `Core` object. Register extensions with custom nGraph operations (`InferenceEngine::Core::AddExtension`). diff --git a/docs/IE_DG/protecting_model_guide.md b/docs/IE_DG/protecting_model_guide.md index 59ac3ba6ca2c03..99b7836b1b25d1 100644 --- a/docs/IE_DG/protecting_model_guide.md +++ b/docs/IE_DG/protecting_model_guide.md @@ -33,7 +33,7 @@ a temporary memory block for model decryption, and use For more information, see the `InferenceEngine::Core` Class Reference Documentation. -@snippet openvino/docs/snippets/protecting_model_guide.cpp part0 +@snippet snippets/protecting_model_guide.cpp part0 Hardware-based protection, such as Intel® Software Guard Extensions (Intel® SGX), can be utilized to protect decryption operation secrets and @@ -47,7 +47,7 @@ Currently there are no possibility to read external weights from memory for ONNX The `ReadNetwork(const std::string& model, const Blob::CPtr& weights)` function should be called with `weights` passed as an empty `Blob`. -@snippet openvino/docs/snippets/protecting_model_guide.cpp part1 +@snippet snippets/protecting_model_guide.cpp part1 [deploy_encrypted_model]: img/deploy_encrypted_model.png diff --git a/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md b/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md index 55169fb2aabfcb..227ce101723283 100644 --- a/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md +++ b/docs/IE_DG/supported_plugins/GPU_RemoteBlob_API.md @@ -102,15 +102,15 @@ Refer to the sections below to see pseudo-code of usage examples. This example uses the OpenCL context obtained from an executable network object. -@snippet openvino/docs/snippets/GPU_RemoteBlob_API0.cpp part0 +@snippet snippets/GPU_RemoteBlob_API0.cpp part0 ### Running GPU Plugin Inference within User-Supplied Shared Context -@snippet openvino/docs/snippets/GPU_RemoteBlob_API1.cpp part1 +@snippet snippets/GPU_RemoteBlob_API1.cpp part1 ### Direct Consuming of the NV12 VAAPI Video Decoder Surface on Linux -@snippet openvino/docs/snippets/GPU_RemoteBlob_API2.cpp part2 +@snippet snippets/GPU_RemoteBlob_API2.cpp part2 ## See Also diff --git a/docs/IE_DG/supported_plugins/HETERO.md b/docs/IE_DG/supported_plugins/HETERO.md index e417907b0a7620..9b5f69ce687e95 100644 --- a/docs/IE_DG/supported_plugins/HETERO.md +++ b/docs/IE_DG/supported_plugins/HETERO.md @@ -28,17 +28,17 @@ Default fallback policy decides which layer goes to which device automatically a Another way to annotate a network is to set affinity manually using ngraph::Node::get_rt_info with key `"affinity"`: -@snippet openvino/docs/snippets/HETERO0.cpp part0 +@snippet snippets/HETERO0.cpp part0 The fallback policy does not work if even one layer has an initialized affinity. The sequence should be calling of automating affinity settings and then fix manually. > **NOTE**: If you set affinity manually, be careful at the current moment Inference Engine plugins don't support constant (`Constant`->`Result`) and empty (`Parameter`->`Result`) networks. Please avoid such subgraphs when you set affinity manually. -@snippet openvino/docs/snippets/HETERO1.cpp part1 +@snippet snippets/HETERO1.cpp part1 If you rely on the default affinity distribution, you can avoid calling InferenceEngine::Core::QueryNetwork and just call InferenceEngine::Core::LoadNetwork instead: -@snippet openvino/docs/snippets/HETERO2.cpp part2 +@snippet snippets/HETERO2.cpp part2 > **NOTE**: `InferenceEngine::Core::QueryNetwork` does not depend on affinities set by a user, but queries for layer support based on device capabilities. @@ -74,7 +74,7 @@ Heterogeneous plugin can generate two files: * `hetero_affinity_.dot` - annotation of affinities per layer. This file is written to the disk only if default fallback policy was executed * `hetero_subgraphs_.dot` - annotation of affinities per graph. This file is written to the disk during execution of ICNNNetwork::LoadNetwork() for heterogeneous plugin -@snippet openvino/docs/snippets/HETERO3.cpp part3 +@snippet snippets/HETERO3.cpp part3 You can use GraphViz* utility or converters to `.png` formats. On Ubuntu* operating system, you can use the following utilities: * `sudo apt-get install xdot` diff --git a/docs/IE_DG/supported_plugins/MULTI.md b/docs/IE_DG/supported_plugins/MULTI.md index 32a9555b380f07..a3166c3de8e956 100644 --- a/docs/IE_DG/supported_plugins/MULTI.md +++ b/docs/IE_DG/supported_plugins/MULTI.md @@ -32,11 +32,11 @@ You can use name of the configuration directly as a string, or use MultiDeviceCo Basically, there are three ways to specify the devices to be use by the "MULTI": -@snippet openvino/docs/snippets/MULTI0.cpp part0 +@snippet snippets/MULTI0.cpp part0 Notice that the priorities of the devices can be changed in real-time for the executable network: -@snippet openvino/docs/snippets/MULTI1.cpp part1 +@snippet snippets/MULTI1.cpp part1 Finally, there is a way to specify number of requests that the multi-device will internally keep for each device. Say if your original app was running 4 cameras with 4 inference requests now you would probably want to share these 4 requests between 2 devices used in the MULTI. The easiest way is to specify a number of requests for each device using parentheses: "MULTI:CPU(2),GPU(2)" and use the same 4 requests in your app. However, such an explicit configuration is not performance portable and hence not recommended. Instead, the better way is to configure the individual devices and query the resulting number of requests to be used in the application level (see [Configuring the Individual Devices and Creating the Multi-Device On Top](#configuring-the-individual-devices-and-creating-the-multi-device-on-top)). @@ -55,7 +55,7 @@ Available devices: ``` Simple programmatic way to enumerate the devices and use with the multi-device is as follows: -@snippet openvino/docs/snippets/MULTI2.cpp part2 +@snippet snippets/MULTI2.cpp part2 Beyond trivial "CPU", "GPU", "HDDL" and so on, when multiple instances of a device are available the names are more qualified. For example this is how two Intel® Movidius™ Myriad™ X sticks are listed with the hello_query_sample: @@ -68,13 +68,13 @@ For example this is how two Intel® Movidius™ Myriad™ X sticks are listed wi So the explicit configuration to use both would be "MULTI:MYRIAD.1.2-ma2480,MYRIAD.1.4-ma2480". Accordingly, the code that loops over all available devices of "MYRIAD" type only is below: -@snippet openvino/docs/snippets/MULTI3.cpp part3 +@snippet snippets/MULTI3.cpp part3 ## Configuring the Individual Devices and Creating the Multi-Device On Top As discussed in the first section, you shall configure each individual device as usual and then just create the "MULTI" device on top: -@snippet openvino/docs/snippets/MULTI4.cpp part4 +@snippet snippets/MULTI4.cpp part4 Alternatively, you can combine all the individual device settings into single config and load that, allowing the multi-device plugin to parse and apply that to the right devices. See code example in the next section. @@ -84,7 +84,7 @@ See section of the [Using the multi-device with OpenVINO samples and benchmarkin ## Querying the Optimal Number of Inference Requests Notice that until R2 you had to calculate number of requests in your application for any device, e.g. you had to know that Intel® Vision Accelerator Design with Intel® Movidius™ VPUs required at least 32 inference requests to perform well. Now you can use the new GetMetric API to query the optimal number of requests. Similarly, when using the multi-device you don't need to sum over included devices yourself, you can query metric directly: -@snippet openvino/docs/snippets/MULTI5.cpp part5 +@snippet snippets/MULTI5.cpp part5 ## Using the Multi-Device with OpenVINO Samples and Benchmarking the Performance Notice that every OpenVINO sample that supports "-d" (which stays for "device") command-line option transparently accepts the multi-device. diff --git a/docs/IE_PLUGIN_DG/Doxyfile b/docs/IE_PLUGIN_DG/Doxyfile index d72cbe5b9fcb40..3d66d22b4a2000 100644 --- a/docs/IE_PLUGIN_DG/Doxyfile +++ b/docs/IE_PLUGIN_DG/Doxyfile @@ -844,11 +844,7 @@ EXCLUDE_SYMLINKS = NO # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* -EXCLUDE_PATTERNS = cnn_network_ngraph_impl.hpp \ - ie_imemory_state_internal.hpp \ - ie_memory_state_internal.hpp \ - ie_memory_state_base.hpp \ - generic_ie.hpp \ +EXCLUDE_PATTERNS = generic_ie.hpp \ function_name.hpp \ macro_overload.hpp diff --git a/docs/IE_PLUGIN_DG/ExecutableNetwork.md b/docs/IE_PLUGIN_DG/ExecutableNetwork.md index a52872946c2bf3..2685c518a0ec58 100644 --- a/docs/IE_PLUGIN_DG/ExecutableNetwork.md +++ b/docs/IE_PLUGIN_DG/ExecutableNetwork.md @@ -92,7 +92,7 @@ Returns a metric value for a metric with the name `name`. A metric is a static @snippet src/template_executable_network.cpp executable_network:get_metric -The IE_SET_METRIC helper macro sets metric value and checks that the actual metric type matches a type of the specified value. +The IE_SET_METRIC_RETURN helper macro sets metric value and checks that the actual metric type matches a type of the specified value. ### `GetConfig()` diff --git a/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md b/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md index 9ff8088a366745..c00507d6c37453 100644 --- a/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md +++ b/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.md @@ -1,11 +1,11 @@ -# Representation of low-precision models +# Representation of low-precision models {#lp_representation} The goal of this document is to describe how optimized models are represented in OpenVINO Intermediate Representation (IR) and provide guidance on interpretation rules for such models at runtime. Currently, there are two groups of optimization methods that can influence on the IR after applying them to the full-precision model: - **Sparsity**. It is represented by zeros inside the weights and this is up to the hardware plugin how to interpret these zeros (use weights as is or apply special compression algorithms and sparse arithmetic). No additional mask is provided with the model. - **Quantization**. The rest of this document is dedicated to the representation of quantized models. ## Representation of quantized models -The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in [this document](../MO_DG/prepare_model/convert_model/Legacy_IR_Layers_Catalog_Spec.md)). This operation is very expressive and allows mapping values from arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. It can be considered as an emulation of the quantization process which happens at runtime. +The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in [this document](@ref openvino_docs_ops_quantization_FakeQuantize_1)). This operation is very expressive and allows mapping values from arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. It can be considered as an emulation of the quantization process which happens at runtime. In order to be able to execute a particular DL operation in low-precision all its inputs should be quantized i.e. should have FakeQuantize between operation and data blobs. The figure below shows an example of quantized Convolution which contains two FakeQuantize nodes: one for weights and one for activations (bias is quantized using the same parameters). ![quantized_convolution]
Figure 1. Example of quantized Convolution operation.
diff --git a/docs/IE_PLUGIN_DG/QuantizedNetworks.md b/docs/IE_PLUGIN_DG/QuantizedNetworks.md index 6e6cdd337b118e..c327c3775fbfe5 100644 --- a/docs/IE_PLUGIN_DG/QuantizedNetworks.md +++ b/docs/IE_PLUGIN_DG/QuantizedNetworks.md @@ -3,13 +3,13 @@ One of the feature of Inference Engine is the support of quantized networks with different precisions: INT8, INT4, etc. However, it is up to the plugin to define what exact precisions are supported by the particular HW. All quantized networks which can be expressed in IR have a unified representation by means of *FakeQuantize* operation. -For more details about low-precision model representation please refer to this [document](LowPrecisionModelRepresentation.md). +For more details about low-precision model representation please refer to this [document](@ref lp_representation). ### Interpreting FakeQuantize at runtime During the model load each plugin can interpret quantization rules expressed in *FakeQuantize* operations: - Independently based on the definition of *FakeQuantize* operation. - Using a special library of low-precision transformations (LPT) which applies common rules for generic operations, -such as Convolution, Fully-Connected, Eltwise, etc., and translates "fake-quantized" models into the models with low-precision operations. For more information about low-precision flow please refer to the following [document](../IE_DG/Int8Inference.md). +such as Convolution, Fully-Connected, Eltwise, etc., and translates "fake-quantized" models into the models with low-precision operations. For more information about low-precision flow please refer to the following [document](@ref openvino_docs_IE_DG_Int8Inference). Here we provide only a high-level overview of the interpretation rules of FakeQuantize. At runtime each FakeQuantize can be split into two independent operations: **Quantize** and **Dequantize**. diff --git a/docs/IE_PLUGIN_DG/layout.xml b/docs/IE_PLUGIN_DG/layout.xml index 667785db71c28c..3dc629d959c13b 100644 --- a/docs/IE_PLUGIN_DG/layout.xml +++ b/docs/IE_PLUGIN_DG/layout.xml @@ -17,8 +17,10 @@ - - + + + + diff --git a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md index 0cdd936f189f10..98de8d014145c7 100644 --- a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md +++ b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md @@ -85,6 +85,7 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi * [Converting a Style Transfer Model from MXNet](prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md) * [Converting Your Kaldi* Model](prepare_model/convert_model/Convert_Model_From_Kaldi.md) * [Converting Your ONNX* Model](prepare_model/convert_model/Convert_Model_From_ONNX.md) + * [Converting Faster-RCNN ONNX* Model](prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md) * [Converting Mask-RCNN ONNX* Model](prepare_model/convert_model/onnx_specific/Convert_Mask_RCNN.md) * [Converting DLRM ONNX* Model](prepare_model/convert_model/onnx_specific/Convert_DLRM.md) * [Model Optimizations Techniques](prepare_model/Model_Optimization_Techniques.md) diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md index 096254ce19e25d..b208a5f5b5c307 100644 --- a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md @@ -99,7 +99,7 @@ If you want to cut your model at the end, you have the following options: ```sh python3 mo.py --input_model=inception_v1.pb -b 1 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu ``` - The resulting Intermediate Representation has three layers: + The resulting Intermediate Representation has three layers: ```xml @@ -137,13 +137,13 @@ python3 mo.py --input_model=inception_v1.pb -b 1 --output=InceptionV1/InceptionV ``` - As you can see in the TensorBoard picture, the original model has more nodes than Intermediate Representation. Model Optimizer has fused batch normalization `InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm` to the convolution `InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution`, and it is not present in the final Intermediate Representation. This is not an effect of the `--output` option, it is usual behavior of the Model Optimizer for batch normalizations and convolutions. The effect of the `--output` is that the `ReLU` layer becomes the last one in the converted model. + As you can see in the TensorBoard picture, the original model has more nodes than Intermediate Representation. Model Optimizer has fused batch normalization `InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm` to the convolution `InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution`, and it is not present in the final Intermediate Representation. This is not an effect of the `--output` option, it is usual behavior of the Model Optimizer for batch normalizations and convolutions. The effect of the `--output` is that the `ReLU` layer becomes the last one in the converted model. 2. The following command cuts the edge that comes from 0 output port of the `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` and the rest of the model, making this node the last one in the model: ```sh python3 mo.py --input_model=inception_v1.pb -b 1 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu:0 ``` - The resulting Intermediate Representation has three layers, which are the same as in the previous case: + The resulting Intermediate Representation has three layers, which are the same as in the previous case: ```xml @@ -181,13 +181,13 @@ python3 mo.py --input_model=inception_v1.pb -b 1 --output=InceptionV1/InceptionV ``` - This type of cutting is useful to cut edges in case of multiple output edges. - + This type of cutting is useful to cut edges in case of multiple output edges. + 3. The following command cuts the edge that comes to 0 input port of the `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` and the rest of the model including `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu`, deleting this node and making the previous node `InceptionV1/InceptionV1/Conv2d_1a_7x7/Conv2D` the last in the model: ```sh python3 mo.py --input_model=inception_v1.pb -b 1 --output=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu ``` - The resulting Intermediate Representation has two layers, which are the same as the first two layers in the previous case: + The resulting Intermediate Representation has two layers, which are the same as the first two layers in the previous case: ```xml @@ -225,7 +225,7 @@ If you want to go further and cut the beginning of the model, leaving only the ` ```sh python3 mo.py --input_model=inception_v1.pb -b 1 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --input=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu ``` - The resulting Intermediate Representation looks as follows: + The resulting Intermediate Representation looks as follows: ```xml @@ -249,16 +249,15 @@ python3 mo.py --input_model=inception_v1.pb -b 1 --output=InceptionV1/InceptionV ``` - `Input` layer is automatically created to feed the layer that is converted from the node specified in `--input`, which is `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` in this case. Model Optimizer does not replace the `ReLU` node by the `Input` layer, it produces such Intermediate Representation to make the node be the first executable node in the final Intermediate Representation. So the Model Optimizer creates enough `Inputs` to feed all input ports of the node that is passed in `--input`. - - Even though `--input_shape` is not specified in the command line, the shapes for layers are inferred from the beginning of the original TensorFlow* model to the point at which the new input is defined. It has the same shape [1,64,112,112] as the model converted as a whole or without cutting off the beginning. + `Input` layer is automatically created to feed the layer that is converted from the node specified in `--input`, which is `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` in this case. Model Optimizer does not replace the `ReLU` node by the `Input` layer, it produces such Intermediate Representation to make the node be the first executable node in the final Intermediate Representation. So the Model Optimizer creates enough `Inputs` to feed all input ports of the node that is passed in `--input`.
+Even though `--input_shape` is not specified in the command line, the shapes for layers are inferred from the beginning of the original TensorFlow* model to the point at which the new input is defined. It has the same shape [1,64,112,112] as the model converted as a whole or without cutting off the beginning. 2. You can cut edge incoming to layer by port number. To specify incoming port use notation `--input=port:input_node`. So, to cut everything before `ReLU` layer, cut edge incoming in port 0 of `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` node: ```sh python3 mo.py --input_model=inception_v1.pb -b 1 --input=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu ``` - The resulting Intermediate Representation looks as follows: + The resulting Intermediate Representation looks as follows: ```xml @@ -282,16 +281,15 @@ python3 mo.py --input_model=inception_v1.pb -b 1 --input=0:InceptionV1/Inception ``` - `Input` layer is automatically created to feed the layer that is converted from the node specified in `--input`, which is `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` in this case. Model Optimizer does not replace the `ReLU` node by the `Input` layer, it produces such Intermediate Representation to make the node be the first executable node in the final Intermediate Representation. So the Model Optimizer creates enough `Inputs` to feed all input ports of the node that is passed in `--input`. - - Even though `--input_shape` is not specified in the command line, the shapes for layers are inferred from the beginning of the original TensorFlow* model to the point at which the new input is defined. It has the same shape [1,64,112,112] as the model converted as a whole or without cutting off the beginning. + `Input` layer is automatically created to feed the layer that is converted from the node specified in `--input`, which is `InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu` in this case. Model Optimizer does not replace the `ReLU` node by the `Input` layer, it produces such Intermediate Representation to make the node be the first executable node in the final Intermediate Representation. So the Model Optimizer creates enough `Inputs` to feed all input ports of the node that is passed in `--input`.
+Even though `--input_shape` is not specified in the command line, the shapes for layers are inferred from the beginning of the original TensorFlow* model to the point at which the new input is defined. It has the same shape [1,64,112,112] as the model converted as a whole or without cutting off the beginning. 3. You can cut edge outcoming from layer by port number. To specify outcoming port use notation `--input=input_node:port`. So, to cut everything before `ReLU` layer, cut edge from `InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1` node to `ReLU`: ```sh python3 mo.py --input_model=inception_v1.pb -b 1 --input=InceptionV1/InceptionV1/Conv2d_1a_7x7/BatchNorm/batchnorm/add_1:0 --output=InceptionV1/InceptionV1/Conv2d_1a_7x7/Relu ``` - The resulting Intermediate Representation looks as follows: + The resulting Intermediate Representation looks as follows: ```xml @@ -389,4 +387,4 @@ In this case, when `--input_shape` is specified and the node contains multiple i The correct command line is: ```sh python3 mo.py --input_model=inception_v1.pb --input=0:InceptionV1/InceptionV1/Conv2d_1a_7x7/convolution --input_shape=[1,224,224,3] -``` \ No newline at end of file +``` diff --git a/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md b/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md new file mode 100644 index 00000000000000..e749e37780da68 --- /dev/null +++ b/docs/MO_DG/prepare_model/convert_model/onnx_specific/Convert_Faster_RCNN.md @@ -0,0 +1,18 @@ +# Convert ONNX* Faster R-CNN Model to the Intermediate Representation {#openvino_docs_MO_DG_prepare_model_convert_model_onnx_specific_Convert_Faster_RCNN} + +These instructions are applicable only to the Faster R-CNN model converted to the ONNX* file format from the [facebookresearch/maskrcnn-benchmark model](https://github.com/facebookresearch/maskrcnn-benchmark). + +**Step 1**. Download the pre-trained model file from [onnx/models](https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/faster-rcnn) (commit-SHA: 8883e49e68de7b43e263d56b9ed156dfa1e03117). + +**Step 2**. To generate the Intermediate Representation (IR) of the model, change your current working directory to the Model Optimizer installation directory and run the Model Optimizer with the following parameters: +```sh +python3 ./mo_onnx.py +--input_model FasterRCNN-10.onnx \ +--input_shape [3,800,800] \ +--mean_values [102.9801,115.9465,122.7717] \ +--transformations_config ./extensions/front/onnx/faster_rcnn.json +``` + +Note that the height and width specified with the `input_shape` command line parameter could be different. Refer to the [documentation](https://github.com/onnx/models/tree/master/vision/object_detection_segmentation/faster-rcnn) for more information about supported input image dimensions and required pre- and post-processing steps. + +**Step 3**. Interpret the outputs. The generated IR file has several outputs: class indices, probabilities and box coordinates. These are outputs from the "DetectionOutput" layer. diff --git a/docs/doxygen/ie_c_api.config b/docs/doxygen/ie_c_api.config index e9678615081fbc..541a21efe54254 100644 --- a/docs/doxygen/ie_c_api.config +++ b/docs/doxygen/ie_c_api.config @@ -1,25 +1,26 @@ -@INCLUDE = @IE_CONFIG_BINARY@ +@INCLUDE = "@IE_CONFIG_BUILD@" EXCLUDE_SYMBOLS = INFERENCE_ENGINE_C_API_EXTERN \ INFERENCE_ENGINE_C_API \ + INFERENCE_ENGINE_C_API_CALLBACK \ IE_NODISCARD PREDEFINED = "__attribute__(x)=" \ "__VA_ARGS__=" \ "INFERENCE_ENGINE_C_API_EXTERN=" \ + "INFERENCE_ENGINE_C_API_CALLBACK=" \ "INFERENCE_ENGINE_C_API=" \ "IE_NODISCARD=" \ "__cdecl=" \ "__declspec(x)=" \ - "__GNUC__=" \ "_WIN32" FILE_PATTERNS = *.h -LAYOUT_FILE = "@C_LAYOUT_BINARY@" +LAYOUT_FILE = "@C_LAYOUT_BUILD@" INPUT = "@C_API@" HTML_OUTPUT = ie_c_api -GENERATE_TAGFILE = "@DOCS_BINARY_DIR@/ie_c_api.tag" +GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ie_c_api.tag" diff --git a/docs/doxygen/ie_docs.config b/docs/doxygen/ie_docs.config index 48dca68bef85de..c6264fac258fbf 100644 --- a/docs/doxygen/ie_docs.config +++ b/docs/doxygen/ie_docs.config @@ -58,7 +58,7 @@ PROJECT_LOGO = # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = "@DOCS_BINARY_DIR@/html" +OUTPUT_DIRECTORY = "@DOCS_BUILD_DIR@/html" # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -735,7 +735,7 @@ FILE_VERSION_FILTER = # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. -LAYOUT_FILE = @IE_LAYOUT_BINARY@ +LAYOUT_FILE = "@IE_LAYOUT_BUILD@" # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib @@ -823,7 +823,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = "@DOCS_BINARY_DIR@" \ +INPUT = "@DOCS_BUILD_DIR@" \ "@IE_SOURCE_DIR@/include" # This tag can be used to specify the character encoding of the source files @@ -903,8 +903,8 @@ EXCLUDE_PATTERNS = */temp/* \ # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = InferenceEngine::details \ + InferenceEngine::gpu::details \ PRECISION_NAME \ - TBLOB_TOP_RESULT \ CASE \ CASE2 \ _CONFIG_KEY \ @@ -929,24 +929,26 @@ EXCLUDE_SYMBOLS = InferenceEngine::details \ INFERENCE_ENGINE_API_CPP \ INFERENCE_ENGINE_API_CLASS \ INFERENCE_ENGINE_DEPRECATED \ - INFERENCE_ENGINE_NN_BUILDER_API_CLASS \ - INFERENCE_ENGINE_NN_BUILDER_DEPRECATED \ IE_SUPPRESS_DEPRECATED_START \ IE_SUPPRESS_DEPRECATED_END \ IE_SUPPRESS_DEPRECATED_START_WIN \ IE_SUPPRESS_DEPRECATED_END_WIN \ IE_SUPPRESS_DEPRECATED_END_WIN \ INFERENCE_ENGINE_INTERNAL \ - INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS \ IE_DO_PRAGMA \ - REG_VALIDATOR_FOR + parallel_* \ + for_* \ + splitter \ + InferenceEngine::parallel_* \ + NOMINMAX \ + TBB_PREVIEW_NUMA_SUPPORT \ + IE_THREAD_* # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). -EXAMPLE_PATH = template_extension \ - ../inference-engine/samples +EXAMPLE_PATH = "@CMAKE_CURRENT_SOURCE_DIR@" # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and @@ -2256,13 +2258,13 @@ SKIP_FUNCTION_MACROS = YES # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. -TAGFILES = +TAGFILES = "@DOCS_BUILD_DIR@/ngraph_cpp_api.tag=ngraph_cpp_api" # When a file name is specified after GENERATE_TAGFILE, doxygen will create a # tag file that is based on the input files it reads. See section "Linking to # external documentation" for more information about the usage of tag files. -GENERATE_TAGFILE = "@DOCS_BINARY_DIR@/ie_api.tag" +GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ie_api.tag" # If the ALLEXTERNALS tag is set to YES, all external class will be listed in # the class index. If set to NO, only the inherited external classes will be diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index a17dc4439ed9fd..3acfa3a1d8fedb 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -32,6 +32,7 @@ + diff --git a/docs/doxygen/ie_plugin_api.config b/docs/doxygen/ie_plugin_api.config index 4d6dea7992e203..9c933ff249f733 100644 --- a/docs/doxygen/ie_plugin_api.config +++ b/docs/doxygen/ie_plugin_api.config @@ -1,15 +1,20 @@ -@INCLUDE = @IE_CONFIG_BINARY@ +@INCLUDE = "@IE_CONFIG_BUILD@" -LAYOUT_FILE = "@PLUGIN_LAYOUT_BINARY@" +LAYOUT_FILE = "@PLUGIN_LAYOUT_BUILD@" HTML_OUTPUT = ie_plugin_api -GENERATE_TAGFILE = "@DOCS_BINARY_DIR@/ie_plugin_api.tag" +GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ie_plugin_api.tag" EXTRACT_LOCAL_CLASSES = NO -INPUT = "@DOCS_BINARY_DIR@/docs/IE_PLUGIN_DG" \ - "@IE_SOURCE_DIR@/src/plugin_api" +INPUT = "@DOCS_BUILD_DIR@/docs/IE_PLUGIN_DG" \ + "@IE_SOURCE_DIR@/src/plugin_api" \ + "@IE_SOURCE_DIR@/src/transformations/include" \ + "@OpenVINO_MAIN_SOURCE_DIR@/openvino/itt/include/openvino" + + +RECURSIVE = YES FILE_PATTERNS = *.c \ *.cpp \ @@ -18,21 +23,21 @@ FILE_PATTERNS = *.c \ *.hpp \ *.md -EXCLUDE_PATTERNS = cnn_network_ngraph_impl.hpp \ - ie_imemory_state_internal.hpp \ - ie_memory_state_internal.hpp \ - ie_memory_state_base.hpp \ - convert_function_to_cnn_network.hpp \ - generic_ie.hpp +EXCLUDE_PATTERNS = generic_ie.hpp + +EXCLUDE_SYMBOLS = InferenceEngine::details -EXCLUDE_SYMBOLS = +TAGFILES = "@DOCS_BUILD_DIR@/ie_api.tag=.." \ + "@DOCS_BUILD_DIR@/ngraph_cpp_api.tag=../ngraph_cpp_api" EXAMPLE_PATH = "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/src" \ "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/include" \ "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/src/CMakeLists.txt" \ - "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/" - CMakeLists.txt \ - "@CMAKE_CURRENT_SOURCE_DIR@/examples" + "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/CMakeLists.txt" \ + "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/transformations" \ + "@CMAKE_CURRENT_SOURCE_DIR@/template_plugin/tests/functional/shared_tests_instances/" \ + "@CMAKE_CURRENT_SOURCE_DIR@/snippets" + "@IE_SOURCE_DIR@/tests/functional/plugin/shared/include" \ EXAMPLE_PATTERNS = *.cpp \ *.hpp @@ -41,12 +46,17 @@ ENUM_VALUES_PER_LINE = 1 EXPAND_ONLY_PREDEF = YES -PREDEFINED = INFERENCE_ENGINE_API \ - INFERENCE_ENGINE_API_CPP \ - INFERENCE_ENGINE_API_CLASS \ - INFERENCE_ENGINE_DEPRECATED \ - IE_SUPPRESS_DEPRECATED_START \ - IE_SUPPRESS_DEPRECATED_END \ - IE_SUPPRESS_DEPRECATED_START_WIN \ - IE_SUPPRESS_DEPRECATED_END_WIN \ - IE_THREAD=IE_THREAD_TBB +PREDEFINED = "INFERENCE_ENGINE_API=" \ + "INFERENCE_ENGINE_API_CPP=" \ + "INFERENCE_ENGINE_API_CLASS=" \ + "INFERENCE_ENGINE_DEPRECATED=" \ + "inference_engine_transformations_EXPORTS" \ + "TRANSFORMATIONS_API=" \ + "NGRAPH_HELPER_DLL_EXPORT=" \ + "NGRAPH_HELPER_DLL_IMPORT=" \ + "IE_SUPPRESS_DEPRECATED_START=" \ + "IE_SUPPRESS_DEPRECATED_END=" \ + "IE_SUPPRESS_DEPRECATED_START_WIN=" \ + "IE_SUPPRESS_DEPRECATED_END_WIN=" \ + "IE_THREAD=IE_THREAD_TBB" \ + "NGRAPH_RTTI_DECLARATION=" diff --git a/docs/doxygen/ie_plugin_api.xml b/docs/doxygen/ie_plugin_api.xml index b2839444af8421..d7617c9a94bfa7 100644 --- a/docs/doxygen/ie_plugin_api.xml +++ b/docs/doxygen/ie_plugin_api.xml @@ -16,8 +16,10 @@ - - + + + + diff --git a/docs/doxygen/ie_py_api.config b/docs/doxygen/ie_py_api.config index caeb7fcac5b8e0..f89931421205b2 100644 --- a/docs/doxygen/ie_py_api.config +++ b/docs/doxygen/ie_py_api.config @@ -1,4 +1,4 @@ -@INCLUDE = @IE_CONFIG_BINARY@ +@INCLUDE = "@IE_CONFIG_BUILD@" EXCLUDE_SYMBOLS = ie_api::BlobBuffer \ *impl* \ @@ -26,10 +26,10 @@ EXTENSION_MAPPING = pyx=Python FILE_PATTERNS = *.pyx -LAYOUT_FILE = "@PY_LAYOUT_BINARY@" +LAYOUT_FILE = "@PY_LAYOUT_BUILD@" INPUT = "@PYTHON_API_OUT@" HTML_OUTPUT = ie_python_api -GENERATE_TAGFILE = "@DOCS_BINARY_DIR@/ie_python_api.tag" +GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ie_python_api.tag" diff --git a/docs/doxygen/linkchecker_filter.yaml b/docs/doxygen/linkchecker_filter.yaml new file mode 100644 index 00000000000000..fc5e941c82d974 --- /dev/null +++ b/docs/doxygen/linkchecker_filter.yaml @@ -0,0 +1,4 @@ +exclude_links: + - '.*?\@ref omz.*' + - '.*?\@ref pot.*' + - '.*?\@ref workbench.*' diff --git a/docs/doxygen/ngraph_cpp_api.config b/docs/doxygen/ngraph_cpp_api.config new file mode 100644 index 00000000000000..0fba49bb28cd61 --- /dev/null +++ b/docs/doxygen/ngraph_cpp_api.config @@ -0,0 +1,21 @@ +@INCLUDE = "@IE_CONFIG_BUILD@" + +EXCLUDE_SYMBOLS = + +PREDEFINED = + +EXCLUDE_PATTERNS = */python/* + +FILE_PATTERNS = *.cpp \ + *.c \ + *.hpp \ + *.h + +LAYOUT_FILE = "@NGRAPH_CPP_LAYOUT_BUILD@" + +INPUT = "@NGRAPH_DIR@/core/include/" \ + "@NGRAPH_DIR@/frontend/onnx_import/include" + +HTML_OUTPUT = ngraph_cpp_api + +GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ngraph_cpp_api.tag" diff --git a/docs/doxygen/ngraph_py_api.config b/docs/doxygen/ngraph_py_api.config new file mode 100644 index 00000000000000..0f5c087e3d8f83 --- /dev/null +++ b/docs/doxygen/ngraph_py_api.config @@ -0,0 +1,22 @@ +@INCLUDE = "@IE_CONFIG_BUILD@" + +PREDEFINED = + +EXTRACT_ALL = YES + +EXCLUDE_PATTERNS = */exceptions.py \ + */impl/* \ + */utils/* \ + */opset_utils.py + +EXCLUDE_SYMBOLS = ngraph::utils + +FILE_PATTERNS = *.py + +LAYOUT_FILE = "@NGRAPH_PY_LAYOUT_BUILD@" + +INPUT = "@NGRAPH_PY_DIR@" + +HTML_OUTPUT = ngraph_python_api + +PYTHON_DOCSTRING = NO diff --git a/docs/doxygen/openvino_docs.xml b/docs/doxygen/openvino_docs.xml index ba3e4c71764fc0..9cae58660f8364 100644 --- a/docs/doxygen/openvino_docs.xml +++ b/docs/doxygen/openvino_docs.xml @@ -123,6 +123,7 @@ + diff --git a/docs/install_guides/movidius-programming-guide.md b/docs/install_guides/movidius-programming-guide.md index b2b9ef0cb99078..184910a1471101 100644 --- a/docs/install_guides/movidius-programming-guide.md +++ b/docs/install_guides/movidius-programming-guide.md @@ -18,11 +18,11 @@ The structure should hold: 1. A pointer to an inference request. 2. An ID to keep track of the request. -@snippet openvino/docs/snippets/movidius-programming-guide.cpp part0 +@snippet snippets/movidius-programming-guide.cpp part0 ### Declare a Vector of Requests -@snippet openvino/docs/snippets/movidius-programming-guide.cpp part1 +@snippet snippets/movidius-programming-guide.cpp part1 Declare and initialize 2 mutex variables: 1. For each request @@ -34,9 +34,9 @@ Conditional variable indicates when at most 8 requests are done at a time. For inference requests, use the asynchronous IE API calls: -@snippet openvino/docs/snippets/movidius-programming-guide.cpp part2 +@snippet snippets/movidius-programming-guide.cpp part2 -@snippet openvino/docs/snippets/movidius-programming-guide.cpp part3 +@snippet snippets/movidius-programming-guide.cpp part3 ### Create a Lambda Function @@ -45,7 +45,7 @@ Lambda Function enables the parsing and display of results. Inside the Lambda body use the completion callback function: -@snippet openvino/docs/snippets/movidius-programming-guide.cpp part4 +@snippet snippets/movidius-programming-guide.cpp part4 ## Additional Resources diff --git a/docs/nGraph_DG/nGraphTransformation.md b/docs/nGraph_DG/nGraphTransformation.md index 5a17b3504d6a10..318aa50f43547d 100644 --- a/docs/nGraph_DG/nGraphTransformation.md +++ b/docs/nGraph_DG/nGraphTransformation.md @@ -14,10 +14,11 @@ Transformation library is independent from Inference Engine target library named and is located in the `inference-engine/src/transformations` directory. Transformations root directory contains two folders: -* `ngraph_ops` - Contains legacy opset operations needed for nGraph to CNNNetwork conversion. -> **NOTE**: This operation is prohibited inside new plugins until they are not moved to a separate directory with allowed operations. +* `ngraph_ops` - Contains internal opset operations that are common for plugins. * `transformations` - Includes all transformations, utils, runtime info attributes, and pass managers. -> **NOTE**: Do not use transformation that belongs to `ngraph::pass::ConvertOpSet1ToLegacy` transformations until they are not moved to a separate directory with allowed transformations. + +All internal operations and transformations located inside the [Transformation Library](group__ie__transformation__api.html) can be used inside plugins. +All legacy operations and transformations were moved to a legacy library and are not recommended to be used. ### Transformation Flow Layers Transformation flow in the transformation library has several layers: @@ -32,15 +33,15 @@ But if some transformation parts can potentially be reused in other transformati To decide where to store your transformation code, please follow these rules: 1. If it is a plugin-specific transformation and cannot be reused by other plugins, keep source code inside plugin. -2. If this transformation relates to the OpSetXToOpSetY conversion or it is common optimization, keep sources inside the transformation library. +2. If this transformation relates to opset operation conversion or optimization, keep sources inside the transformation library. After you decide where to store your transformation code, you can start developing your own nGraph transformation. ## ngraph::Function and graph representation -An nGraph function is a simple thing: it stores shared pointers to `ngraph::op::Result` and `ngraph::op::Parameter` operations that are inputs and outputs of the graph. -All other operations hold each other via shared pointers: child operation holds its parent (hard link). If the operation has no consumers and it is not a Result operation -(shared pointer counter is zero), it is destructed and is not accessible anymore. Each operation in `ngraph::Function` has a `std::shared_ptr` type. +nGraph function is a very simple thing: it stores shared pointers to `ngraph::op::Parameter`, `ngraph::op::Result` and `ngraph::op::Sink` operations that are inputs, outputs and sinks of the graph. +Sinks of the graph have no consumers and not included into results vector. All other operations hold each other via shared pointers: child operation holds its parent (hard link). If operation has no consumers and it's not Result or Sink operation +(shared pointer counter is zero) then it will be destructed and won't be accessible anymore. Each operation in `ngraph::Function` has a `std::shared_ptr` type. For examples of how to build an nGraph function, see the [Build nGraph Function](./build_function.md) page. @@ -50,7 +51,7 @@ nGraph has three main transformation types: * `ngraph::pass::FunctionPass` - straightforward way to work with `ngraph::Function` directly * `ngraph::pass::MatcherPass` - pattern-based transformation approach -* `ngraph::pass::GraphRewrite` - container for matcher passes +* `ngraph::pass::GraphRewrite` - container for matcher passes needed for efficient execution ![transformations_structure] @@ -87,14 +88,15 @@ To use `ngraph::pass::MatcherPass`, you need to complete these steps: So let's go through each of these steps. ### Create a pattern -Pattern is a single root `ngraph::Function`. But the only difference is that you do not need to create a function object, you just need to create and connect nGraph or special pattern operations. Then you need to take the last created operation and put it as a root of the pattern. This root node will be used as a root node in pattern matching. +Pattern is a single root `ngraph::Function`. But the only difference is that you do not need to create a function object, you just need to create and connect opset or special pattern operations. +Then you need to take the last created operation and put it as a root of the pattern. This root node will be used as a root node in pattern matching. > **NOTE**: Any nodes in a pattern that have no consumers and are not registered as root will not be used in pattern matching. @snippet example_ngraph_utils.cpp pattern:simple_example The `Parameter` operation in the example above has type and shape specified. These attributes are needed only to create Parameter operation class and will not be used in pattern matching. -For instructions on how to match a pattern where `ShapeOf` takes any operation as an input, follow the [pattern matching](#pattern_matching) section. +For more pattern examples, refer to the [pattern matching](#pattern_matching) section. ### Implement callback Callback is an action applied to every pattern entrance. In general, callback is the lambda function that takes Matcher object with detected subgraph. @@ -153,6 +155,8 @@ And then creates map from registered MatcherPasses. That helps to avoid addition ![graph_rewrite_efficient_search] +> **NOTE**: GraphRewrite execution algorithm cannot be set manually and depends only on root nodes registered inside MatcherPasses. + ## Pattern Matching Sometimes patterns cannot be expressed via regular nGraph operations or it is too complicated. @@ -255,7 +259,7 @@ When developing a transformation, you need to follow these transformation rules: ###1. Operation Set (OpSet) -Use the latest version of OpSet in your transformation. An exception is ConvertOpSetXToOpSetY transformations, where you must use operations from OpSetX and OpSetY. +Use the latest version of OpSet in your transformation. An exception is op_conversion transformations, where different opsets can be used. @snippet example_ngraph_utils.cpp ngraph:include @@ -399,33 +403,22 @@ NGRAPH_ENABLE_VISUALIZE_TRACING=1 - enables visualization after each transforma ## Disabling/Enabling specific transformations for plugin X -This topic is mostly related to conversion to legacy opset and plugins that are based on CNNNetwork. But this mechanism still can be applied for other cases. -Let's suppose that plugin X enabled the `opset3::StridedSlice` operation support and you want to disable the `ngraph::pass::ConvertStridedSliceToCrop` transformation for plugin X. -To do this, you need to create a callback on plugin side and pass it to transformation. And also you need to update particular transformation to use this callback. +In transformation library, we provide plugins transformations like CommonOptimizations, which contains predefined sequence of transformations. +We also provide a tool that helps to disable or partially disable particular transformations in a transformation pipeline. +For example, if a plugin uses the CommonOptimization transformation and needs to disable the ConvertGELU transformation, then inside the plugin we have to take the PassConfig instance +from pass::Manger and call disable method. -```cpp -// Update callback to be able to use m_transformation_callback if this transformation based on GraphRewrite. -ngraph::graph_rewrite_callback callback = [this](pattern::Matcher &m) { - ... -} - -// Use transformation_callback not to execute transformation if callback returns true for given node -if (m_transformation_callback(node)) { - return false; -} - -// Implement transformation callback and pass it directly to transformation or pass::Manager -const auto transformations_callback = [](const std::shared_ptr &node) -> bool { - return std::dynamic_pointer_cast(node) != nullptr; -}; - -// Register transformation and pass callback to pass::Manager -ngraph::pass::Manager manager; -manager.register_pass(); -// pass::Manager will set callback to all reistered transformations automatically -manager.set_callback(transformations_callback); -manager.run_passes(f); -``` +@snippet example_ngraph_utils.cpp ngraph:disable_gelu + +In some cases, we need to disable transformation for some condition: + +@snippet example_ngraph_utils.cpp ngraph:disable_callback + +In some cases, pass::Manager pipelines inside transformations may have transformations disabled by default but enabled inside plugins. + +@snippet example_ngraph_utils.cpp ngraph:disabled_by_default + +PassConfig instance taken from pass::Manager is shared across all registered transformations including nested transformations. So it does not matter where we work with this object (before passes registration or after). ## Transformations testing diff --git a/docs/nGraph_DG/nGraph_basic_concepts.md b/docs/nGraph_DG/nGraph_basic_concepts.md index a8bd0446f9d1a2..2d6bed7027258f 100644 --- a/docs/nGraph_DG/nGraph_basic_concepts.md +++ b/docs/nGraph_DG/nGraph_basic_concepts.md @@ -4,8 +4,8 @@ The nGraph represents neural networks in uniform format. User can create differe ## nGraph Function and Graph Representation -nGraph function is a very simple thing: it stores shared pointers to `ngraph::op::Result` and `ngraph::op::Parameter` operations that are inputs and outputs of the graph. -All other operations hold each other via shared pointers: child operation holds its parent (hard link). If operation has no consumers and it's not Result operation +nGraph function is a very simple thing: it stores shared pointers to `ngraph::op::Parameter`, `ngraph::op::Result` and `ngraph::op::Sink` operations that are inputs, outputs and sinks of the graph. +Sinks of the graph have no consumers and not included into results vector. All other operations hold each other via shared pointers: child operation holds its parent (hard link). If operation has no consumers and it's not Result or Sink operation (shared pointer counter is zero) then it will be destructed and won't be accessible anymore. Each operation in `ngraph::Function` has a `std::shared_ptr` type. For details on how to build an nGraph Function, see the [Build nGraph Function](./build_function.md) page. diff --git a/docs/ops/activation/Clamp_1.md b/docs/ops/activation/Clamp_1.md index 4a4151a4d18291..8ee374fdf25254 100644 --- a/docs/ops/activation/Clamp_1.md +++ b/docs/ops/activation/Clamp_1.md @@ -30,7 +30,7 @@ **Outputs**: -* **1**: Multidimensional output tensor with shape and type matching the input tensor. Required. +* **1**: Multidimensional output tensor with shape and type matching the input tensor. **Detailed description**: diff --git a/docs/ops/activation/Elu_1.md b/docs/ops/activation/Elu_1.md index bc69b40de18a0f..8d5d424d02312b 100644 --- a/docs/ops/activation/Elu_1.md +++ b/docs/ops/activation/Elu_1.md @@ -33,4 +33,4 @@ elu(x) = \left\{\begin{array}{ll} **Outputs**: -* **1**: Result of Elu function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. Required. +* **1**: Result of Elu function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. diff --git a/docs/ops/activation/Exp_1.md b/docs/ops/activation/Exp_1.md index c3f05c72db96f9..b5815a271603cb 100644 --- a/docs/ops/activation/Exp_1.md +++ b/docs/ops/activation/Exp_1.md @@ -14,4 +14,4 @@ **Outputs**: -* **1**: Result of Exp function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. Required. +* **1**: Result of Exp function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. diff --git a/docs/ops/activation/GELU_2.md b/docs/ops/activation/GELU_2.md index c22e72d2b99821..461defb02c9e56 100644 --- a/docs/ops/activation/GELU_2.md +++ b/docs/ops/activation/GELU_2.md @@ -28,6 +28,10 @@ Similarly, the following Gelu approximation (typical for the TensorFlow*) is rec * **1**: Multidimensional input tensor. Required. +**Outputs**: + +* **1**: Floating point tensor with shape and type matching the input tensor. + **Example** ```xml @@ -46,4 +50,4 @@ Similarly, the following Gelu approximation (typical for the TensorFlow*) is rec -``` \ No newline at end of file +``` diff --git a/docs/ops/activation/HSwish_4.md b/docs/ops/activation/HSwish_4.md index a2bf8407ea34ec..bf572c39f43f27 100644 --- a/docs/ops/activation/HSwish_4.md +++ b/docs/ops/activation/HSwish_4.md @@ -9,9 +9,9 @@ **Detailed description**: For each element from the input tensor calculates corresponding element in the output tensor with the following formula: - \f[ - HSwish(x) = x \frac{min(max(x + 3, 0), 6)}{6} - \f] +\f[ +HSwish(x) = x \frac{min(max(x + 3, 0), 6)}{6} +\f] The HSwish operation is introduced in the following [article](https://arxiv.org/pdf/1905.02244.pdf). diff --git a/docs/ops/activation/Mish_4.md b/docs/ops/activation/Mish_4.md index de8397c188825a..6163131e11073f 100644 --- a/docs/ops/activation/Mish_4.md +++ b/docs/ops/activation/Mish_4.md @@ -16,7 +16,7 @@ **Outputs**: -* **1**: Floating point tensor with shape and type matching the input tensor. Required. +* **1**: Floating point tensor with shape and type matching the input tensor. **Types** @@ -47,4 +47,4 @@ -``` \ No newline at end of file +``` diff --git a/docs/ops/activation/Sigmoid_1.md b/docs/ops/activation/Sigmoid_1.md index f14e58e53b3115..17e012061f9c70 100644 --- a/docs/ops/activation/Sigmoid_1.md +++ b/docs/ops/activation/Sigmoid_1.md @@ -24,7 +24,7 @@ **Outputs**: -* **1**: Result of Sigmoid function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. Required. +* **1**: Result of Sigmoid function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. **Example** @@ -44,4 +44,4 @@ -``` \ No newline at end of file +``` diff --git a/docs/ops/activation/SoftPlus_4.md b/docs/ops/activation/SoftPlus_4.md index 112faa2873098e..135c4cb9dccae4 100644 --- a/docs/ops/activation/SoftPlus_4.md +++ b/docs/ops/activation/SoftPlus_4.md @@ -9,9 +9,9 @@ **Detailed description**: For each element from the input tensor calculates corresponding element in the output tensor with the following formula: - \f[ - SoftPlus(x) = ln(e^{x} + 1.0) - \f] +\f[ +SoftPlus(x) = ln(e^{x} + 1.0) +\f] **Attributes**: *SoftPlus* operation has no attributes. diff --git a/docs/ops/arithmetic/Sinh_1.md b/docs/ops/arithmetic/Sinh_1.md index a4a0264f31c59e..7bac2216a42af3 100644 --- a/docs/ops/arithmetic/Sinh_1.md +++ b/docs/ops/arithmetic/Sinh_1.md @@ -16,7 +16,7 @@ **Outputs** -* **1**: The result of element-wise sinh operation. A tensor of type T. +* **1**: The result of element-wise sinh operation. A tensor of type *T*. **Types** @@ -47,4 +47,4 @@ a_{i} = sinh(a_{i}) -``` \ No newline at end of file +``` diff --git a/docs/ops/arithmetic/Tanh_1.md b/docs/ops/arithmetic/Tanh_1.md index 9f6e2d8079fb2f..d50fe2fdade034 100644 --- a/docs/ops/arithmetic/Tanh_1.md +++ b/docs/ops/arithmetic/Tanh_1.md @@ -14,7 +14,7 @@ **Outputs**: -* **1**: Result of Tanh function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. Required. +* **1**: Result of Tanh function applied to the input tensor *x*. Floating point tensor with shape and type matching the input tensor. **Detailed description** @@ -22,4 +22,4 @@ For each element from the input tensor calculates corresponding element in the output tensor with the following formula: \f[ tanh ( x ) = \frac{2}{1+e^{-2x}} - 1 = 2sigmoid(2x) - 1 -\f] \ No newline at end of file +\f] diff --git a/docs/ops/convolution/BinaryConvolution_1.md b/docs/ops/convolution/BinaryConvolution_1.md index d6aabd0b20c11a..21d3a5b7fb5f2b 100644 --- a/docs/ops/convolution/BinaryConvolution_1.md +++ b/docs/ops/convolution/BinaryConvolution_1.md @@ -35,4 +35,5 @@ The operation has the same attributes as a regular *Convolution* layer and sever **Outputs**: -* **1**: output tensor containing float values. Required. +* **1**: output tensor containing float values. + diff --git a/docs/ops/detection/Proposal_4.md b/docs/ops/detection/Proposal_4.md index 8bf9dca4969bff..a22cd1684c64b1 100644 --- a/docs/ops/detection/Proposal_4.md +++ b/docs/ops/detection/Proposal_4.md @@ -153,7 +153,7 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil * **1**: tensor of type *T* and shape `[batch_size * post_nms_topn, 5]`. -* **2**: tensor of type *T* and shape `[batch_size * post_nms_topn]` with probabilities. *Optional*. +* **2**: tensor of type *T* and shape `[batch_size * post_nms_topn]` with probabilities. **Types** @@ -191,4 +191,4 @@ the second optional tensor of shape `[batch_size * post_nms_topn]` with probabil -``` \ No newline at end of file +``` diff --git a/docs/ops/detection/ROIPooling_1.md b/docs/ops/detection/ROIPooling_1.md index 4ab319875dc45f..7699b133f3c128 100644 --- a/docs/ops/detection/ROIPooling_1.md +++ b/docs/ops/detection/ROIPooling_1.md @@ -6,7 +6,18 @@ **Short description**: *ROIPooling* is a *pooling layer* used over feature maps of non-uniform input sizes and outputs a feature map of a fixed size. -**Detailed description**: [deepsense.io reference](https://blog.deepsense.ai/region-of-interest-pooling-explained/) +**Detailed description**: + +*ROIPooling* performs the following operations for each Region of Interest (ROI) over the input feature maps: +1. Produce box coordinates relative to the input feature map size, based on *method* attribute. +2. Calculate box height and width. +3. Divide the box into bins according to the pooled size attributes, `[pooled_h, pooled_w]`. +4. Apply maximum or bilinear interpolation pooling, for each bin, based on *method* attribute to produce output feature map element. + +The box height and width have different representation based on **method** attribute: + * *max*: Expressed in relative coordinates. The box height and width are calculated the following way: `roi_width = max(spatial_scale * (x_2 - x_1), 1.0)`, +`roi_height = max(spatial_scale * (y_2 - y_1), 1.0)`, so the malformed boxes are expressed as a box of size `1 x 1`. + * *bilinear*: Expressed in absolute coordinates and normalized to the `[0, 1]` interval. The box height and width are calculated the following way: `roi_width = (W - 1) * (x_2 - x_1)`, `roi_height = (H - 1) * (y_2 - y_1)`. **Attributes** @@ -44,13 +55,19 @@ **Inputs**: -* **1**: 4D input tensor of shape `[1, C, H, W]` with feature maps. Required. +* **1**: 4D input tensor of shape `[N, C, H, W]` with feature maps of type *T*. Required. + +* **2**: 2D input tensor of shape `[NUM_ROIS, 5]` describing region of interest box consisting of 5 element tuples of type *T*: `[batch_id, x_1, y_1, x_2, y_2]`. Required. +Batch indices must be in the range of `[0, N-1]`. -* **2**: 2D input tensor of shape `[NUM_ROIS, 5]` describing box consisting of 5 element tuples: `[batch_id, x_1, y_1, x_2, y_2]`. Required. **Outputs**: -* **1**: 4D output tensor of shape `[NUM_ROIS, C, pooled_h, pooled_w]` with feature maps. Required. +* **1**: 4D output tensor of shape `[NUM_ROIS, C, pooled_h, pooled_w]` with feature maps of type *T*. + +**Types** + +* *T*: any supported floating point type. **Example** @@ -60,4 +77,4 @@ ... ... -``` \ No newline at end of file +``` diff --git a/docs/ops/detection/ReorgYolo_1.md b/docs/ops/detection/ReorgYolo_1.md index 4801e5f750fbe2..156657a98ba32f 100644 --- a/docs/ops/detection/ReorgYolo_1.md +++ b/docs/ops/detection/ReorgYolo_1.md @@ -26,7 +26,7 @@ **Outputs**: -* **1**: 4D output tensor of the same type as input tensor and shape `[N, C*stride*stride, H/stride, W/stride]`. Required. +* **1**: 4D output tensor of the same type as input tensor and shape `[N, C*stride*stride, H/stride, W/stride]`. **Example** diff --git a/docs/ops/pooling/AvgPool_1.md b/docs/ops/pooling/AvgPool_1.md index dfa04c476b02ed..b8f0ecb2f31ff3 100644 --- a/docs/ops/pooling/AvgPool_1.md +++ b/docs/ops/pooling/AvgPool_1.md @@ -78,9 +78,9 @@ **Mathematical Formulation** - \f[ - output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n} - \f] +\f[ +output_{j} = \frac{\sum_{i = 0}^{n}x_{i}}{n} +\f] **Example** diff --git a/docs/ops/pooling/MaxPool_1.md b/docs/ops/pooling/MaxPool_1.md index 6e705e49a22c8e..6c54d387913e12 100644 --- a/docs/ops/pooling/MaxPool_1.md +++ b/docs/ops/pooling/MaxPool_1.md @@ -52,15 +52,16 @@ * *floor* * **Type**: string * **Default value**: *floor* + * **Required**: *no* * *auto_pad* * **Description**: *auto_pad* how the padding is calculated. Possible values: - * None (not specified): use explicit padding values. + * *explicit*: use explicit padding values. * *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning). * *valid* - do not use padding. * **Type**: string - * **Default value**: None + * **Default value**: *explicit* * **Required**: *no* * **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad* is specified. @@ -71,7 +72,7 @@ **Mathematical Formulation** \f[ - output_{j} = MAX\{ x_{0}, ... x_{i}\} + output_{j} = max(x_{0}, ..., x_{i}) \f] **Example** diff --git a/docs/ops/shape/Squeeze_1.md b/docs/ops/shape/Squeeze_1.md index a17120e3137423..9dff893cd8a419 100644 --- a/docs/ops/shape/Squeeze_1.md +++ b/docs/ops/shape/Squeeze_1.md @@ -14,6 +14,10 @@ * **2**: 0D or 1D tensor of type *T_SHAPE* with dimensions indices to squeeze. Values could be negative. *Optional*. +**Outputs**: + +* **1**: Tensor with squeezed values of type *T*. + **Types** * *T*: supported type. @@ -65,4 +69,4 @@ -``` \ No newline at end of file +``` diff --git a/docs/ops/shape/Unsqueeze_1.md b/docs/ops/shape/Unsqueeze_1.md index 371882a2c92eda..7518605313116b 100644 --- a/docs/ops/shape/Unsqueeze_1.md +++ b/docs/ops/shape/Unsqueeze_1.md @@ -14,6 +14,10 @@ * **2**: OD or 1D tensor of type *T_SHAPE* with dimensions indices to be set to 1. Values could be negative. *Required*. +**Outputs**: + +* **1**: Tensor with unsqueezed values of type *T*. + **Types** * *T*: supported type. @@ -65,4 +69,4 @@ -``` \ No newline at end of file +``` diff --git a/docs/ops/sort/NonMaxSuppression_5.md b/docs/ops/sort/NonMaxSuppression_5.md index a60b0206392794..6fc70ed7424999 100644 --- a/docs/ops/sort/NonMaxSuppression_5.md +++ b/docs/ops/sort/NonMaxSuppression_5.md @@ -72,7 +72,7 @@ class must not exceed `max_output_boxes_per_class`. * **2**: `selected_scores` - tensor of type *T_THRESHOLDS* and shape `[number of selected boxes, 3]` containing information about scores for each selected box as triplets `[batch_index, class_index, box_score]`. -* **3**: `valid_outputs` - 1D tensor with 1 element of type *T_IND* representing the total number of selected boxes. Optional. +* **3**: `valid_outputs` - 1D tensor with 1 element of type *T_IND* representing the total number of selected boxes. Plugins which do not support dynamic output tensors produce `selected_indices` and `selected_scores` tensors of shape `[min(num_boxes, max_output_boxes_per_class) * num_batches * num_classes, 3]` which is an upper bound for the number of possible selected boxes. Output tensor elements following the really selected boxes are filled with value -1. diff --git a/docs/ops/sort/TopK_3.md b/docs/ops/sort/TopK_3.md index 7bf5c828cb62f8..d3a03ae2b6f58d 100644 --- a/docs/ops/sort/TopK_3.md +++ b/docs/ops/sort/TopK_3.md @@ -51,7 +51,7 @@ * **1**: Output tensor of type *T* with top *k* values from the input tensor along specified dimension *axis*. The shape of the tensor is `[input1.shape[0], ..., input1.shape[axis-1], k, input1.shape[axis+1], ...]`. -* **2**: Output tensor with top *k* indices for each slice along *axis* dimension of type *T_IND*. The shape of the tensor is the same as for the 1st output, that is `[input1.shape[0], ..., input1.shape[axis-1], k, input1.shape[axis+1], ...]` +* **2**: Output tensor with top *k* indices for each slice along *axis* dimension of type *T_IND*. The shape of the tensor is the same as for the 1st output, that is `[input1.shape[0], ..., input1.shape[axis-1], k, input1.shape[axis+1], ...]`. **Types** diff --git a/docs/optimization_guide/dldt_optimization_guide.md b/docs/optimization_guide/dldt_optimization_guide.md index d523ce66dace0d..73e99437ac8828 100644 --- a/docs/optimization_guide/dldt_optimization_guide.md +++ b/docs/optimization_guide/dldt_optimization_guide.md @@ -110,6 +110,26 @@ Also: The resulting IR precision, for instance, `FP16` or `FP32`, directly affects performance. As CPU now supports `FP16` (while internally upscaling to `FP32` anyway) and because this is the best precision for a GPU target, you may want to always convert models to `FP16`. Notice that this is the only precision that Intel® Movidius™ Myriad™ 2 and Intel® Myriad™ X VPUs support. +## Multi-Device Execution +OpenVINO™ toolkit supports automatic multi-device execution, please see [MULTI-Device plugin description](../IE_DG/supported_plugins/MULTI.md). +In the next chapter you can find the device-specific tips, while this section covers few recommendations +for the multi-device execution: +- MULTI usually performs best when the fastest device is specified first in the list of the devices. + This is particularly important when the parallelism is not sufficient + (e.g. the number of request in the flight is not enough to saturate all devices). +- It is highly recommended to query the optimal number of inference requests directly from the instance of the ExecutionNetwork + (resulted from the LoadNetwork call with the specific multi-device configuration as a parameter). +Please refer to the code of the [Benchmark App](../../inference-engine/samples/benchmark_app/README.md) sample for details. +- Notice that for example CPU+GPU execution performs better with certain knobs + which you can find in the code of the same [Benchmark App](../../inference-engine/samples/benchmark_app/README.md) sample. + One specific example is disabling GPU driver polling, which in turn requires multiple GPU streams (which is already a default for the GPU) to amortize slower + inference completion from the device to the host. +- Multi-device logic always attempts to save on the (e.g. inputs) data copies between device-agnostic, user-facing inference requests + and device-specific 'worker' requests that are being actually scheduled behind the scene. + To facilitate the copy savings, it is recommended to start the requests in the order that they were created + (with ExecutableNetwork's CreateInferRequest). + + ## Device-Specific Optimizations The Inference Engine supports several target devices (CPU, GPU, Intel® Movidius™ Myriad™ 2 VPU, Intel® Movidius™ Myriad™ X VPU, Intel® Vision Accelerator Design with Intel® Movidius™ Vision Processing Units (VPU) and FPGA), and each of them has a corresponding plugin. If you want to optimize a specific device, you must keep in mind the following tips to increase the performance. @@ -123,7 +143,7 @@ The only hint you can get from that is how the major primitives are accelerated Internally, the Inference Engine has a threading abstraction level, which allows for compiling the [open source version](https://github.com/opencv/dldt) with either Intel® Threading Building Blocks (Intel® TBB) which is now default, or OpenMP* as an alternative parallelism solution. When using inference on the CPU, this is particularly important to align threading model with the rest of your application (and any third-party libraries that you use) to avoid oversubscription. For more information, see Note on the App-Level Threading section. Since R1 2019, the OpenVINO™ toolkit comes pre-compiled with Intel TBB, - so any OpenMP* API or environment settings (like `OMP_NUM_THREADS`) has no effect anymore. + so any OpenMP* API or environment settings (like `OMP_NUM_THREADS`) has no effect. Certain tweaks (like number of threads used for inference on the CPU) are still possible via [CPU configuration options](../IE_DG/supported_plugins/CPU.md). Finally, the OpenVINO CPU inference is NUMA-aware, please refer to the Tips for inference on NUMA systems section. @@ -332,7 +352,7 @@ In many cases, a network expects a pre-processed image, so make sure you do not - Model Optimizer can efficiently bake the mean and normalization (scale) values into the model (for example, weights of the first convolution). See Model Optimizer Knobs Related to Performance. - If regular 8-bit per channel images are your native media (for instance, decoded frames), do not convert to the `FP32` on your side, as this is something that plugins can accelerate. Use the `InferenceEngine::Precision::U8` as your input format:
-@snippet openvino/docs/snippets/dldt_optimization_guide1.cpp part1 +@snippet snippets/dldt_optimization_guide1.cpp part1 Note that in many cases, you can directly share the (input) data with the Inference Engine. @@ -342,15 +362,15 @@ The general approach for sharing data between Inference Engine and media/graphic For Intel MSS, it is recommended to perform a viable pre-processing, for example, crop/resize, and then convert to RGB again with the [Video Processing Procedures (VPP)](https://software.intel.com/en-us/node/696108). Then lock the result and create an Inference Engine blob on top of that. The resulting pointer can be used for the `SetBlob`: -@snippet openvino/docs/snippets/dldt_optimization_guide2.cpp part2 +@snippet snippets/dldt_optimization_guide2.cpp part2 **WARNING**: The `InferenceEngine::NHWC` layout is not supported natively by most InferenceEngine plugins so internal conversion might happen. -@snippet openvino/docs/snippets/dldt_optimization_guide3.cpp part3 +@snippet snippets/dldt_optimization_guide3.cpp part3 Alternatively, you can use RGBP (planar RGB) output from Intel MSS. This allows to wrap the (locked) result as regular NCHW which is generally friendly for most plugins (unlike NHWC). Then you can use it with `SetBlob` just like in previous example: -@snippet openvino/docs/snippets/dldt_optimization_guide4.cpp part4 +@snippet snippets/dldt_optimization_guide4.cpp part4 The only downside of this approach is that VPP conversion to RGBP is not hardware accelerated (and performed on the GPU EUs). Also, it is available only on LInux. @@ -362,7 +382,7 @@ Again, if the OpenCV and Inference Engine layouts match, the data can be wrapped **WARNING**: The `InferenceEngine::NHWC` layout is not supported natively by most InferenceEngine plugins so internal conversion might happen. -@snippet openvino/docs/snippets/dldt_optimization_guide5.cpp part5 +@snippet snippets/dldt_optimization_guide5.cpp part5 Notice that original `cv::Mat`/blobs cannot be used simultaneously by the application and the Inference Engine. Alternatively, the data that the pointer references to can be copied to unlock the original data and return ownership to the original API. @@ -372,7 +392,7 @@ Infer Request based API offers two types of request: Sync and Async. The Sync is More importantly, an infer request encapsulates the reference to the “executable” network and actual inputs/outputs. Now, when you load the network to the plugin, you get a reference to the executable network (you may consider that as a queue). Actual infer requests are created by the executable network: -@snippet openvino/docs/snippets/dldt_optimization_guide6.cpp part6 +@snippet snippets/dldt_optimization_guide6.cpp part6 `GetBlob` is a recommend way to communicate with the network, as it internally allocates the data with right padding/alignment for the device. For example, the GPU inputs/outputs blobs are mapped to the host (which is fast) if the `GetBlob` is used. But if you called the `SetBlob`, the copy (from/to the blob you have set) into the internal GPU plugin structures will happen. @@ -383,7 +403,7 @@ If your application simultaneously executes multiple infer requests: - For the CPU, the best solution, you can use the CPU "throughput" mode. - If latency is of more concern, you can try the `EXCLUSIVE_ASYNC_REQUESTS` [configuration option](../IE_DG/supported_plugins/CPU.md) that limits the number of the simultaneously executed requests for all (executable) networks that share the specific device to just one:
-@snippet openvino/docs/snippets/dldt_optimization_guide7.cpp part7 +@snippet snippets/dldt_optimization_guide7.cpp part7
For more information on the executable networks notation, see Request-Based API and “GetBlob” Idiom. @@ -407,13 +427,13 @@ You can compare the pseudo-codes for the regular and async-based approaches: - In the regular way, the frame is captured with OpenCV and then immediately processed:
-@snippet openvino/docs/snippets/dldt_optimization_guide8.cpp part8 +@snippet snippets/dldt_optimization_guide8.cpp part8 ![Intel® VTune™ screenshot](../img/vtune_regular.png) - In the "true" async mode, the `NEXT` request is populated in the main (application) thread, while the `CURRENT` request is processed:
-@snippet openvino/docs/snippets/dldt_optimization_guide9.cpp part9 +@snippet snippets/dldt_optimization_guide9.cpp part9 ![Intel® VTune™ screenshot](../img/vtune_async.png) diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index a5f1d238144823..25e7cadf7263a6 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -54,4 +54,4 @@ if(NGRAPH_ONNX_IMPORT_ENABLE) target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer) endif() -target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph) +target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph inference_engine_transformations) diff --git a/docs/snippets/example_ngraph_utils.cpp b/docs/snippets/example_ngraph_utils.cpp index 1b17fc157e7d51..44780aa4615e2f 100644 --- a/docs/snippets/example_ngraph_utils.cpp +++ b/docs/snippets/example_ngraph_utils.cpp @@ -5,6 +5,11 @@ #include #include +#include +#include +#include +#include +#include // ! [ngraph:include] #include @@ -134,7 +139,7 @@ Output output = node->output(0); auto pshape = data.get_partial_shape(); auto el_type = data.get_element_type(); -// Ggetting parent for input port +// Getting parent for input port Output parent_output; parent_output = data.get_source_output(); @@ -249,3 +254,61 @@ void visualization_example(std::shared_ptr f) { manager.run_passes(f); } // ! [ngraph:visualize] + +void pass_manager_example1(std::shared_ptr f) { +// ! [ngraph:disable_gelu] +ngraph::pass::Manager manager; +manager.register_pass(); + +auto pass_config = manager.get_pass_config(); +pass_config->disable(); + +manager.run_passes(f); +// ! [ngraph:disable_gelu] +} + +void pass_manager_example2(std::shared_ptr f) { + ngraph::pass::Manager manager; + std::function)> transformation_callback; +// ! [ngraph:disable_callback] +// Set callback to particular transformation with specific condition +auto pass_config = manager.get_pass_config(); +pass_config->set_callback( + [](const std::shared_ptr &node) -> bool { + return node->input_value(0).get_shape().size() <= 5lu && + node->input_value(0).get_shape().size() == node->get_output_shape(0).size(); + }); + +// Update transformation to call callback +ngraph::matcher_pass_callback callback = [=](pattern::Matcher &m) { + auto node = m.get_match_root(); + if (transformation_callback(node)) { + return false; + } + // transformation code + return false; +}; +// ! [ngraph:disable_callback] +} + +void pass_manager_example3(std::shared_ptr f) { + std::function)> transformation_callback; +// ! [ngraph:disabled_by_default] +// Example of disabled by default transformation +{ + ngraph::pass::Manager manager; + manager.register_pass(); + manager.run_passes(f); +} + +// Enable disabled by default transformation inside plugin +{ + ngraph::pass::Manager manager; + manager.register_pass(); + auto pass_config = manager.get_pass_config(); + pass_config->enable(); + manager.run_passes(f); +} +// ! [ngraph:disabled_by_default] +} \ No newline at end of file diff --git a/docs/template_plugin/src/template_function_transformation.cpp b/docs/template_plugin/src/template_function_transformation.cpp index a33994effc542e..731ebdb4096d60 100644 --- a/docs/template_plugin/src/template_function_transformation.cpp +++ b/docs/template_plugin/src/template_function_transformation.cpp @@ -8,6 +8,8 @@ using namespace ngraph; // ! [function_pass:template_transformation_cpp] // template_function_transformation.cpp +NGRAPH_RTTI_DEFINITION(ngraph::pass::MyFunctionTransformation, "MyFunctionTransformation", 0); + bool pass::MyFunctionTransformation::run_on_function(std::shared_ptr f) { // Example transformation code NodeVector nodes; diff --git a/docs/template_plugin/src/template_function_transformation.hpp b/docs/template_plugin/src/template_function_transformation.hpp index 5691e8bb1a92c0..2876c6e8f53aa6 100644 --- a/docs/template_plugin/src/template_function_transformation.hpp +++ b/docs/template_plugin/src/template_function_transformation.hpp @@ -18,6 +18,7 @@ class MyFunctionTransformation; // template_function_transformation.hpp class ngraph::pass::MyFunctionTransformation: public ngraph::pass::FunctionPass { public: + NGRAPH_RTTI_DECLARATION; bool run_on_function(std::shared_ptr f) override; }; // ! [function_pass:template_transformation_hpp] diff --git a/docs/template_plugin/src/template_pattern_transformation.cpp b/docs/template_plugin/src/template_pattern_transformation.cpp index 0dc0cf6b51fc80..cda77d889c8533 100644 --- a/docs/template_plugin/src/template_pattern_transformation.cpp +++ b/docs/template_plugin/src/template_pattern_transformation.cpp @@ -14,6 +14,8 @@ using namespace ngraph; // ! [graph_rewrite:template_transformation_cpp] // template_pattern_transformation.cpp +NGRAPH_RTTI_DEFINITION(ngraph::pass::DecomposeDivideMatcher, "DecomposeDivideMatcher", 0); + ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() { // Pattern example auto input0 = pattern::any_input(); @@ -54,6 +56,8 @@ ngraph::pass::DecomposeDivideMatcher::DecomposeDivideMatcher() { // ! [graph_rewrite:template_transformation_cpp] // ! [matcher_pass:relu_fusion] +NGRAPH_RTTI_DEFINITION(ngraph::pass::ReluReluFusionMatcher, "ReluReluFusionMatcher", 0); + ngraph::pass::ReluReluFusionMatcher::ReluReluFusionMatcher() { auto m_relu1 = ngraph::pattern::wrap_type(pattern::consumers_count(1)); auto m_relu2 = ngraph::pattern::wrap_type({m_relu1}); diff --git a/docs/template_plugin/src/template_pattern_transformation.hpp b/docs/template_plugin/src/template_pattern_transformation.hpp index b51a2330a51ded..68dc777ebb4334 100644 --- a/docs/template_plugin/src/template_pattern_transformation.hpp +++ b/docs/template_plugin/src/template_pattern_transformation.hpp @@ -23,11 +23,13 @@ class ReluReluFusionMatcher; */ class ngraph::pass::DecomposeDivideMatcher: public ngraph::pass::MatcherPass { public: + NGRAPH_RTTI_DECLARATION; DecomposeDivideMatcher(); }; // ! [graph_rewrite:template_transformation_hpp] class ngraph::pass::ReluReluFusionMatcher: public ngraph::pass::MatcherPass { public: + NGRAPH_RTTI_DECLARATION; ReluReluFusionMatcher(); }; diff --git a/docs/template_plugin/src/template_plugin.cpp b/docs/template_plugin/src/template_plugin.cpp index 0824c707c45c76..c66b22c46156f0 100644 --- a/docs/template_plugin/src/template_plugin.cpp +++ b/docs/template_plugin/src/template_plugin.cpp @@ -80,14 +80,11 @@ std::shared_ptr TransformNetwork(const std::shared_ptr &config) override; InferenceEngine::QueryNetworkResult - QueryNetwork(const InferenceEngine::ICNNNetwork &network, + QueryNetwork(const InferenceEngine::CNNNetwork &network, const std::map& config) const override; InferenceEngine::ExecutableNetworkInternal::Ptr - LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, + LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std::map &config) override; void AddExtension(InferenceEngine::IExtensionPtr extension) override; InferenceEngine::Parameter GetConfig(const std::string& name, const std::map & options) const override; diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt index 5de49d128b0444..e8b50063529944 100644 --- a/inference-engine/CMakeLists.txt +++ b/inference-engine/CMakeLists.txt @@ -89,11 +89,6 @@ endfunction() # they must be built even if samples build is disabled (required for tests and tools). ie_build_samples() -file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h) -add_cpplint_target(sample_cpplint - FOR_SOURCES ${SAMPLES_SOURCES} - EXCLUDE_PATTERNS "thirdparty/*" "pugixml/*") - if (ENABLE_PYTHON) add_subdirectory(ie_bridges/python) endif() diff --git a/inference-engine/cmake/add_ie_target.cmake b/inference-engine/cmake/add_ie_target.cmake index 35b96542f1e7aa..f6d4dd19ca6a5f 100644 --- a/inference-engine/cmake/add_ie_target.cmake +++ b/inference-engine/cmake/add_ie_target.cmake @@ -126,6 +126,11 @@ function(addIeTarget) ie_developer_export_targets(${ARG_NAME} ${ARG_EXPORT_DEPENDENCIES}) endif() endif() + if(WIN32) + # Provide default compile pdb name equal to target name + set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME}) + endif() + endfunction() #[[ diff --git a/inference-engine/cmake/dependencies.cmake b/inference-engine/cmake/dependencies.cmake index 1cf5464ac92f87..4c1a7ff18d1e31 100644 --- a/inference-engine/cmake/dependencies.cmake +++ b/inference-engine/cmake/dependencies.cmake @@ -126,6 +126,13 @@ if (THREADING STREQUAL "OMP") update_deps_cache(OMP "${OMP}" "Path to OMP root folder") log_rpath_from_dir(OMP "${OMP}/lib") debug_message(STATUS "intel_omp=" ${OMP}) + + ie_cpack_add_component(omp) + file(GLOB_RECURSE source_list "${OMP}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") + install(FILES ${source_list} + DESTINATION "deployment_tools/inference_engine/external/omp/lib" + COMPONENT omp) + endif () ## TBB package diff --git a/inference-engine/cmake/developer_package_ie.cmake b/inference-engine/cmake/developer_package_ie.cmake index 7a44bc117a5e15..86e9b111774b3c 100644 --- a/inference-engine/cmake/developer_package_ie.cmake +++ b/inference-engine/cmake/developer_package_ie.cmake @@ -2,9 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -include(cpplint) -include(clang_format) - set(TBB_FIND_RELEASE_ONLY ${ENABLE_TBB_RELEASE_ONLY}) include(plugins/plugins) diff --git a/inference-engine/cmake/features_ie.cmake b/inference-engine/cmake/features_ie.cmake index df4105ccf33bd9..1f65b910867f9f 100644 --- a/inference-engine/cmake/features_ie.cmake +++ b/inference-engine/cmake/features_ie.cmake @@ -98,14 +98,6 @@ ie_option (ENABLE_PYTHON "enables ie python bridge build" OFF) ie_option (ENABLE_V7_SERIALIZE "enables serialization to IR v7" OFF) -ie_option (ENABLE_JAVA "enables ie java bridge build" OFF) - -ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF) - -ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF) - -ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON) - set(IE_EXTRA_MODULES "" CACHE STRING "Extra paths for extra modules to include into OpenVINO build") ie_dependent_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are linked to the Inference Engine binaries" ON "THREADING MATCHES TBB;LINUX" OFF) diff --git a/inference-engine/cmake/ie_parallel.cmake b/inference-engine/cmake/ie_parallel.cmake index de39ac7cfb4743..f8988aa1057634 100644 --- a/inference-engine/cmake/ie_parallel.cmake +++ b/inference-engine/cmake/ie_parallel.cmake @@ -7,6 +7,7 @@ function(set_ie_threading_interface_for TARGET_NAME) find_package(TBB COMPONENTS tbb tbbmalloc) set("TBB_FOUND" ${TBB_FOUND} PARENT_SCOPE) set("TBB_IMPORTED_TARGETS" ${TBB_IMPORTED_TARGETS} PARENT_SCOPE) + set("TBB_VERSION" ${TBB_VERSION} PARENT_SCOPE) if (TBB_FOUND) if (TBB_VERSION VERSION_LESS 2020) ext_message(WARNING "TBB version is less than OpenVINO recommends to use.\ diff --git a/inference-engine/cmake/plugins/plugins.cmake b/inference-engine/cmake/plugins/plugins.cmake index 52b033d87b3970..683f02ff0a8fe4 100644 --- a/inference-engine/cmake/plugins/plugins.cmake +++ b/inference-engine/cmake/plugins/plugins.cmake @@ -106,7 +106,6 @@ function(ie_add_plugin) install(TARGETS ${IE_PLUGIN_NAME} RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component} - ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT ${install_component} LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}) endif() endfunction() diff --git a/inference-engine/cmake/vpu_dependencies.cmake b/inference-engine/cmake/vpu_dependencies.cmake index 51a1a1be6329d0..a20f2b6d0c6674 100644 --- a/inference-engine/cmake/vpu_dependencies.cmake +++ b/inference-engine/cmake/vpu_dependencies.cmake @@ -13,16 +13,16 @@ endif() include(dependency_solver) -set(VPU_SUPPORTED_FIRMWARES usb-ma2x8x pcie-ma248x) +set(VPU_SUPPORTED_FIRMWARES usb-ma2x8x pcie-ma2x8x) set(VPU_SUPPORTED_FIRMWARES_HASH - "67f91ad33170ac6304772f8f7bbb9ea92bf41a86c080980644a12f66a5ef956c" - "bfb1b2e465e4b3c7d003a54f2d910c872a042c5b09e77a0fb12913fe253d53ae") + "becaeea32805cc59a59fced0ed08235255a43a3c8535a36fa376351607b24ad6" + "fa0303c0c073c68076190cb71ce8bf1cc04ade74ca9a7b5a538ceb99d24d3289") # # Default packages # -set(FIRMWARE_PACKAGE_VERSION 1508) +set(FIRMWARE_PACKAGE_VERSION 1521) set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.2") # @@ -37,7 +37,7 @@ foreach(idx RANGE 0 ${num_firmwares}) set(firmware_name_full ${firmware_name}.mvcmd) # Handle PCIe elf firmware for Windows - if (WIN32 AND "${firmware_name}" STREQUAL "pcie-ma248x") + if (WIN32 AND "${firmware_name}" STREQUAL "pcie-ma2x8x") set(firmware_name_full ${firmware_name}.elf) endif () @@ -76,7 +76,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES) set(firmware_out_file "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR}/${firmware_name}.mvcmd") # Handle PCIe elf firmware for Windows - if (WIN32 AND "${firmware_name}" STREQUAL "pcie-ma248x") + if (WIN32 AND "${firmware_name}" STREQUAL "pcie-ma2x8x") set(firmware_out_file "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR}/${firmware_name}.elf") endif () diff --git a/inference-engine/ie_bridges/c/docs/api_overview.md b/inference-engine/ie_bridges/c/docs/api_overview.md index 4f69111a91d6fe..bad0d4ebc4d5cb 100644 --- a/inference-engine/ie_bridges/c/docs/api_overview.md +++ b/inference-engine/ie_bridges/c/docs/api_overview.md @@ -120,6 +120,10 @@ enum precision_e{ ​ FP16 = 11, /**< 16bit floating point value */ + BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/ + + FP64 = 13, /**< 64bit floating point value */ + ​ Q78 = 20, /**< 16bit specific signed fixed point precision */ ​ I16 = 30, /**< 16bit signed integer value */ diff --git a/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h b/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h index 4801d6e2ea5d62..fca7493be1c2bc 100644 --- a/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h +++ b/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h @@ -45,7 +45,7 @@ #endif #ifndef INFERENCE_ENGINE_C_API_CALLBACK -#define INFERENCE_ENGINE_C_API_CALLBACK + #define INFERENCE_ENGINE_C_API_CALLBACK #endif typedef struct ie_core ie_core_t; @@ -59,39 +59,39 @@ typedef struct ie_blob ie_blob_t; * @brief Represents an API version information that reflects the set of supported features */ typedef struct ie_version { - char *api_version; -}ie_version_t; + char *api_version; //!< A string representing Inference Engine version +} ie_version_t; /** * @struct ie_core_version * @brief Represents version information that describes devices and the inference engine runtime library */ typedef struct ie_core_version { - size_t major; - size_t minor; - const char *device_name; - const char *build_number; - const char *description; -}ie_core_version_t; + size_t major; //!< A major version + size_t minor; //!< A minor version + const char *device_name; //!< A device name + const char *build_number; //!< A build number + const char *description; //!< A device description +} ie_core_version_t; /** * @struct ie_core_versions * @brief Represents all versions information that describes all devices and the inference engine runtime library */ typedef struct ie_core_versions { - ie_core_version_t *versions; - size_t num_vers; -}ie_core_versions_t; + ie_core_version_t *versions; //!< An array of device versions + size_t num_vers; //!< A number of versions in the array +} ie_core_versions_t; /** * @struct ie_config * @brief Represents configuration information that describes devices */ typedef struct ie_config { - const char *name; - const char *value; - struct ie_config *next; -}ie_config_t; + const char *name; //!< A configuration key + const char *value; //!< A configuration value + struct ie_config *next; //!< A pointer to the next configuration value +} ie_config_t; /** * @struct ie_param @@ -99,12 +99,12 @@ typedef struct ie_config { */ typedef struct ie_param { union { - char *params; - unsigned int number; - unsigned int range_for_async_infer_request[3]; - unsigned int range_for_streams[2]; + char *params; + unsigned int number; + unsigned int range_for_async_infer_request[3]; + unsigned int range_for_streams[2]; }; -}ie_param_t; +} ie_param_t; /** * @struct ie_param_config @@ -113,57 +113,57 @@ typedef struct ie_param { typedef struct ie_param_config { char *name; ie_param_t *param; -}ie_param_config_t; +} ie_param_config_t; /** * @struct desc * @brief Represents detailed information for an error */ typedef struct desc { - char msg[256]; -}desc_t; + char msg[256]; //!< A description message +} desc_t; /** * @struct dimensions * @brief Represents dimensions for input or output data */ typedef struct dimensions { - size_t ranks; - size_t dims[8]; -}dimensions_t; + size_t ranks; //!< A runk representing a number of dimensions + size_t dims[8]; //!< An array of dimensions +} dimensions_t; /** * @enum layout_e * @brief Layouts that the inference engine supports */ typedef enum { - ANY = 0, // "any" layout + ANY = 0, //!< "ANY" layout // I/O data layouts - NCHW = 1, - NHWC = 2, - NCDHW = 3, - NDHWC = 4, + NCHW = 1, //!< "NCHW" layout + NHWC = 2, //!< "NHWC" layout + NCDHW = 3, //!< "NCDHW" layout + NDHWC = 4, //!< "NDHWC" layout // weight layouts - OIHW = 64, + OIHW = 64, //!< "OIHW" layout // Scalar - SCALAR = 95, + SCALAR = 95, //!< "SCALAR" layout // bias layouts - C = 96, + C = 96, //!< "C" layout // Single image layout (for mean image) - CHW = 128, + CHW = 128, //!< "CHW" layout // 2D - HW = 192, - NC = 193, - CN = 194, + HW = 192, //!< "HW" layout + NC = 193, //!< "NC" layout + CN = 194, //!< "CN" layout - BLOCKED = 200, -}layout_e; + BLOCKED = 200, //!< "BLOCKED" layout +} layout_e; /** * @enum precision_e @@ -174,6 +174,7 @@ typedef enum { MIXED = 0, /**< Mixed value. Can be received from network. No applicable for tensors */ FP32 = 10, /**< 32bit floating point value */ FP16 = 11, /**< 16bit floating point value */ + FP64 = 13, /**< 64bit floating point value */ Q78 = 20, /**< 16bit specific signed fixed point precision */ I16 = 30, /**< 16bit signed integer value */ U8 = 40, /**< 8bit unsigned integer value */ @@ -185,7 +186,7 @@ typedef enum { U32 = 74, /**< 32bit unsigned integer value */ BIN = 71, /**< 1bit integer value */ CUSTOM = 80 /**< custom precision has it's own name and size of elements */ -}precision_e; +} precision_e; /** * @struct tensor_desc @@ -195,31 +196,31 @@ typedef struct tensor_desc { layout_e layout; dimensions_t dims; precision_e precision; -}tensor_desc_t; +} tensor_desc_t; /** * @enum colorformat_e * @brief Extra information about input color format for preprocessing */ typedef enum { - RAW = 0u, ///< Plain blob (default), no extra color processing required - RGB, ///< RGB color format - BGR, ///< BGR color format, default in DLDT - RGBX, ///< RGBX color format with X ignored during inference - BGRX, ///< BGRX color format with X ignored during inference - NV12, ///< NV12 color format represented as compound Y+UV blob - I420, ///< I420 color format represented as compound Y+U+V blob -}colorformat_e; + RAW = 0u, //!< Plain blob (default), no extra color processing required + RGB, //!< RGB color format + BGR, //!< BGR color format, default in DLDT + RGBX, //!< RGBX color format with X ignored during inference + BGRX, //!< BGRX color format with X ignored during inference + NV12, //!< NV12 color format represented as compound Y+UV blob + I420, //!< I420 color format represented as compound Y+U+V blob +} colorformat_e; /** * @enum resize_alg_e * @brief Represents the list of supported resize algorithms. */ typedef enum { - NO_RESIZE = 0, - RESIZE_BILINEAR, - RESIZE_AREA -}resize_alg_e; + NO_RESIZE = 0, //!< "No resize" mode + RESIZE_BILINEAR, //!< "Bilinear resize" mode + RESIZE_AREA //!< "Area resize" mode +} resize_alg_e; /** * @enum IEStatusCode @@ -242,19 +243,19 @@ typedef enum { NOT_ALLOCATED = -10, INFER_NOT_STARTED = -11, NETWORK_NOT_READ = -12 -}IEStatusCode; +} IEStatusCode; /** * @struct roi_t * @brief This structure describes roi data. */ typedef struct roi { - size_t id; // ID of a roi - size_t posX; // W upper left coordinate of roi - size_t posY; // H upper left coordinate of roi - size_t sizeX; // W size of roi - size_t sizeY; // H size of roi -}roi_t; + size_t id; //!< ID of a roi + size_t posX; //!< W upper left coordinate of roi + size_t posY; //!< H upper left coordinate of roi + size_t sizeX; //!< W size of roi + size_t sizeY; //!< H size of roi +} roi_t; /** * @struct input_shape @@ -263,7 +264,7 @@ typedef struct roi { typedef struct input_shape { char *name; dimensions_t shape; -}input_shape_t; +} input_shape_t; /** * @struct input_shapes @@ -272,7 +273,7 @@ typedef struct input_shape { typedef struct input_shapes { input_shape_t *shapes; size_t shape_num; -}input_shapes_t; +} input_shapes_t; /** * @struct ie_blob_buffer @@ -280,10 +281,10 @@ typedef struct input_shapes { */ typedef struct ie_blob_buffer { union { - void *buffer; // buffer can be written - const void *cbuffer; // cbuffer is read-only + void *buffer; //!< buffer can be written + const void *cbuffer; //!< cbuffer is read-only }; -}ie_blob_buffer_t; +} ie_blob_buffer_t; /** * @struct ie_complete_call_back @@ -292,7 +293,7 @@ typedef struct ie_blob_buffer { typedef struct ie_complete_call_back { void (INFERENCE_ENGINE_C_API_CALLBACK *completeCallBackFunc)(void *args); void *args; -}ie_complete_call_back_t; +} ie_complete_call_back_t; /** * @struct ie_available_devices @@ -301,7 +302,7 @@ typedef struct ie_complete_call_back { typedef struct ie_available_devices { char **devices; size_t num_devices; -}ie_available_devices_t; +} ie_available_devices_t; /** * @brief Returns number of version that is exported. Use the ie_version_free() to free memory. @@ -317,7 +318,7 @@ INFERENCE_ENGINE_C_API(void) ie_version_free(ie_version_t *version); /** * @brief Release the memory allocated by ie_param_t. - * @param version A pointer to the ie_param_t to free memory. + * @param param A pointer to the ie_param_t to free memory. */ INFERENCE_ENGINE_C_API(void) ie_param_free(ie_param_t *param); @@ -662,6 +663,7 @@ INFERENCE_ENGINE_C_API(void) ie_network_free(ie_network_t **network); /** * @brief Get name of network. * @ingroup Network + * @param network A pointer to the instance of the ie_network_t to get a name from. * @param name Name of the network. * @return Status code of the operation: OK(0) for success. */ @@ -729,7 +731,7 @@ INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_layout(co INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_set_input_layout(ie_network_t *network, const char *input_name, const layout_e l); /** - * @Gets dimensions/shape of the input data with reversed order. + * @brief Gets dimensions/shape of the input data with reversed order. * @ingroup Network * @param network A pointer to ie_network_t instance. * @param input_name Name of input data. @@ -743,11 +745,10 @@ INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_dims(cons * @ingroup Network * @param network A pointer to ie_network_t instance. * @param input_name Name of input data. - * @parm resize_alg_result The pointer to the resize algorithm used for input blob creation. + * @param resize_alg_result The pointer to the resize algorithm used for input blob creation. * @return Status code of the operation: OK(0) for success. */ -INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_resize_algorithm(const ie_network_t *network, const char *input_name, \ - resize_alg_e *resize_alg_result); +INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_network_get_input_resize_algorithm(const ie_network_t *network, const char *input_name, resize_alg_e *resize_alg_result); /** * @brief Sets resize algorithm to be used during pre-processing @@ -1014,7 +1015,7 @@ INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_layout(const ie_bl INFERENCE_ENGINE_C_API(IE_NODISCARD IEStatusCode) ie_blob_get_precision(const ie_blob_t *blob, precision_e *prec_result); /** - * @Releases the memory occupied by the ie_blob_t pointer. + * @brief Releases the memory occupied by the ie_blob_t pointer. * @ingroup Blob * @param blob A pointer to the blob pointer to release memory. */ diff --git a/inference-engine/ie_bridges/c/src/ie_c_api.cpp b/inference-engine/ie_bridges/c/src/ie_c_api.cpp index d4eb6eb09c5b2e..9e276a080f166d 100644 --- a/inference-engine/ie_bridges/c/src/ie_c_api.cpp +++ b/inference-engine/ie_bridges/c/src/ie_c_api.cpp @@ -80,6 +80,7 @@ std::map precision_map = {{IE::Precision::UNSPECIFIE {IE::Precision::MIXED, precision_e::MIXED}, {IE::Precision::FP32, precision_e::FP32}, {IE::Precision::FP16, precision_e::FP16}, + {IE::Precision::FP64, precision_e::FP64}, {IE::Precision::Q78, precision_e::Q78}, {IE::Precision::I16, precision_e::I16}, {IE::Precision::U8, precision_e::U8}, @@ -1433,6 +1434,8 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl _blob->object = IE::make_shared_blob(tensor); } else if (prec == IE::Precision::FP32) { _blob->object = IE::make_shared_blob(tensor); + } else if (prec == IE::Precision::FP64) { + _blob->object = IE::make_shared_blob(tensor); } else { _blob->object = IE::make_shared_blob(tensor); } @@ -1505,6 +1508,9 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe } else if (prec == IE::Precision::FP32) { float *p = reinterpret_cast(ptr); _blob->object = IE::make_shared_blob(tensor, p, size); + } else if (prec == IE::Precision::FP64) { + double *p = reinterpret_cast(ptr); + _blob->object = IE::make_shared_blob(tensor, p, size); } else { uint8_t *p = reinterpret_cast(ptr); _blob->object = IE::make_shared_blob(tensor, p, size); diff --git a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md index 11fe655de0ddf5..75b05f78c5f5df 100644 --- a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md +++ b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/README.md @@ -1,4 +1,4 @@ -# nGraph Function Python* Sample {#openvino_inference_engine_samples_ngraph_function_creation_sample_README} +# nGraph Function Python* Sample {#openvino_inference_engine_ie_bridges_python_samples_ngraph_function_creation_sample_README} This sample demonstrates how to execute an inference using ngraph::Function to create a network. The sample uses the LeNet classifications network as an example. diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx index 225103322bbb62..188d38940bd422 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx @@ -18,7 +18,7 @@ from .cimport ie_api_impl_defs as C import numpy as np from enum import Enum -supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "U32", "I16", "I8", "U16", "U8"] +supported_precisions = ["FP32", "FP64", "FP16", "I64", "U64", "I32", "U32", "I16", "I8", "U16", "U8"] known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI'] @@ -27,6 +27,7 @@ layout_int_to_str_map = {0: "ANY", 1: "NCHW", 2: "NHWC", 3: "NCDHW", 4: "NDHWC", format_map = { 'FP32' : np.float32, + 'FP64' : np.float64, 'I32' : np.int32, 'FP16' : np.float16, 'I16' : np.int16, diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index a883b463d4904f..d9bd6120a9e10a 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -114,6 +114,7 @@ cdef class Blob: def __cinit__(self, TensorDesc tensor_desc = None, array : np.ndarray = None): cdef CTensorDesc c_tensor_desc cdef float[::1] fp32_array_memview + cdef double[::1] fp64_array_memview cdef int16_t[::1] I16_array_memview cdef uint16_t[::1] U16_array_memview cdef uint8_t[::1] U8_array_memview @@ -137,6 +138,8 @@ cdef class Blob: precision = tensor_desc.precision if precision == "FP32": self._ptr = C.make_shared_blob[float](c_tensor_desc) + elif precision == "FP64": + self._ptr = C.make_shared_blob[double](c_tensor_desc) elif precision == "FP16" or precision == "I16": self._ptr = C.make_shared_blob[int16_t](c_tensor_desc) elif precision == "Q78" or precision == "U16": @@ -168,6 +171,9 @@ cdef class Blob: if precision == "FP32": fp32_array_memview = self._array_data self._ptr = C.make_shared_blob[float](c_tensor_desc, &fp32_array_memview[0], fp32_array_memview.shape[0]) + elif precision == "FP64": + fp64_array_memview = self._array_data + self._ptr = C.make_shared_blob[double](c_tensor_desc, &fp64_array_memview[0], fp64_array_memview.shape[0]) elif precision == "FP16": raise RuntimeError("Currently, it's impossible to set_blob with FP16 precision") elif precision == "I16": @@ -264,10 +270,8 @@ cdef class IECore: cdef string model_ cdef IENetwork net = IENetwork() if init_from_buffer: - bin_buffer = malloc(len(weights)) - memcpy(bin_buffer, weights, len(weights)) model_ = bytes(model) - net.impl = self.impl.readNetwork(model_, bin_buffer, len(weights)) + net.impl = self.impl.readNetwork(model_, weights, len(weights)) else: weights_ = "".encode() @@ -1487,6 +1491,7 @@ cdef class BlobBuffer: # todo: half floats precision_to_format = { 'FP32': 'f', # float + 'FP64': 'd', # double 'FP16': 'h', # signed short 'U8': 'B', # unsigned char 'U16': 'H', # unsigned short diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index da37e6cd8c4ea9..226cc73bc2ee42 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -8,6 +8,7 @@ const std::string EXPORTED_NETWORK_NAME = "undefined"; std::map precision_map = {{"FP32", InferenceEngine::Precision::FP32}, + {"FP64", InferenceEngine::Precision::FP64}, {"FP16", InferenceEngine::Precision::FP16}, {"I8", InferenceEngine::Precision::I8}, {"I16", InferenceEngine::Precision::I16}, @@ -530,12 +531,14 @@ InferenceEnginePython::IECore::readNetwork(const std::string& modelPath, const s } InferenceEnginePython::IENetwork -InferenceEnginePython::IECore::readNetwork(const std::string& model, uint8_t *bin, size_t bin_size) { - InferenceEngine::Blob::Ptr weights_blob; +InferenceEnginePython::IECore::readNetwork(const std::string& model, const uint8_t *bin, size_t bin_size) { + InferenceEngine::MemoryBlob::Ptr weights_blob; if(bin_size!=0) { InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bin_size }, InferenceEngine::Layout::C); - weights_blob = InferenceEngine::make_shared_blob(tensorDesc, bin, bin_size); + weights_blob = InferenceEngine::make_shared_blob(tensorDesc); + weights_blob->allocate(); + memcpy(weights_blob->rwmap().as(), bin, bin_size); } InferenceEngine::CNNNetwork net = actual.ReadNetwork(model, weights_blob); return IENetwork(std::make_shared(net)); diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index 167091b618f4da..5534d1ddb53215 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -156,7 +156,7 @@ struct IECore { explicit IECore(const std::string & xmlConfigFile = std::string()); std::map getVersions(const std::string & deviceName); InferenceEnginePython::IENetwork readNetwork(const std::string& modelPath, const std::string& binPath); - InferenceEnginePython::IENetwork readNetwork(const std::string& model, uint8_t *bin, size_t bin_size); + InferenceEnginePython::IENetwork readNetwork(const std::string& model, const uint8_t *bin, size_t bin_size); std::unique_ptr loadNetwork(IENetwork network, const std::string & deviceName, const std::map & config, int num_requests); std::unique_ptr importNetwork(const std::string & modelFIle, const std::string & deviceName, diff --git a/inference-engine/ie_bridges/python/tests/test_Blob.py b/inference-engine/ie_bridges/python/tests/test_Blob.py index 4a382c58218c70..7220f87cbd8adf 100644 --- a/inference-engine/ie_bridges/python/tests/test_Blob.py +++ b/inference-engine/ie_bridges/python/tests/test_Blob.py @@ -46,6 +46,15 @@ def test_write_to_buffer_fp32(): assert np.array_equal(blob.buffer, ones_arr) +def test_write_to_buffer_fp64(): + tensor_desc = TensorDesc("FP64", [1, 3, 127, 127], "NCHW") + array = np.zeros(shape=(1, 3, 127, 127), dtype=np.float64) + blob = Blob(tensor_desc, array) + ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.float64) + blob.buffer[:] = ones_arr + assert np.array_equal(blob.buffer, ones_arr) + + @pytest.mark.skip(reason="Need to figure out how to implement right conversion") def test_write_to_buffer_fp16(): tensor_desc = TensorDesc("FP16", [1, 3, 127, 127], "NCHW") diff --git a/inference-engine/include/cpp/ie_cnn_network.h b/inference-engine/include/cpp/ie_cnn_network.h index 85980fba238c5c..6f021ad42fd744 100644 --- a/inference-engine/include/cpp/ie_cnn_network.h +++ b/inference-engine/include/cpp/ie_cnn_network.h @@ -124,7 +124,6 @@ class INFERENCE_ENGINE_API_CLASS(CNNNetwork) { * Wraps ICNNNetwork::setBatchSize * * @param size Size of batch to set - * @return Status code of the operation */ virtual void setBatchSize(const size_t size) { CALL_STATUS_FNC(setBatchSize, size); diff --git a/inference-engine/include/cpp/ie_infer_request.hpp b/inference-engine/include/cpp/ie_infer_request.hpp index c750a5d4c901f8..8cae1255188fc6 100644 --- a/inference-engine/include/cpp/ie_infer_request.hpp +++ b/inference-engine/include/cpp/ie_infer_request.hpp @@ -83,7 +83,7 @@ class InferRequest { /** * constructs InferRequest from the initialized shared_pointer * @param request Initialized shared pointer to IInferRequest interface - * @param plg Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed. + * @param splg Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed. */ explicit InferRequest(IInferRequest::Ptr request, InferenceEngine::details::SharedObjectLoader::Ptr splg = {}): diff --git a/inference-engine/include/cpp/ie_memory_state.hpp b/inference-engine/include/cpp/ie_memory_state.hpp index 24fd4d7fa1ff4f..cb45a159e2a19e 100644 --- a/inference-engine/include/cpp/ie_memory_state.hpp +++ b/inference-engine/include/cpp/ie_memory_state.hpp @@ -3,7 +3,9 @@ // /** - * @file + * @brief A header file that provides wrapper classes for IVariableState + * + * @file ie_memory_state.hpp */ #pragma once @@ -25,8 +27,9 @@ class VariableState { public: /** - * constructs VariableState from the initialized shared_pointer + * @brief constructs VariableState from the initialized shared_pointer * @param pState Initialized shared pointer + * @param plg Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed. */ explicit VariableState(IVariableState::Ptr pState, details::SharedObjectLoader::Ptr plg = {}) : actual(pState), plugin(plg) { if (actual == nullptr) { @@ -59,7 +62,7 @@ class VariableState { * @copybrief IVariableState::GetState * * Wraps IVariableState::GetState - * @return A blob representing a last state + * @return A blob representing a state */ Blob::CPtr GetState() const { Blob::CPtr stateBlob; @@ -67,7 +70,14 @@ class VariableState { return stateBlob; } - INFERENCE_ENGINE_DEPRECATED("Use GetState function instead") + /** + * @copybrief IVariableState::GetLastState + * @deprecated Use IVariableState::SetState instead + * + * Wraps IVariableState::GetLastState + * @return A blob representing a last state + */ + INFERENCE_ENGINE_DEPRECATED("Use VariableState::GetState function instead") Blob::CPtr GetLastState() const { return GetState(); } @@ -83,8 +93,9 @@ class VariableState { } }; -/* +/** * @brief For compatibility reasons. */ using MemoryState = VariableState; + } // namespace InferenceEngine diff --git a/inference-engine/include/gpu/gpu_context_api_dx.hpp b/inference-engine/include/gpu/gpu_context_api_dx.hpp index 03d284b8c22db9..cbf959b941524a 100644 --- a/inference-engine/include/gpu/gpu_context_api_dx.hpp +++ b/inference-engine/include/gpu/gpu_context_api_dx.hpp @@ -22,17 +22,17 @@ namespace InferenceEngine { namespace gpu { /** -* @brief This class represents an abstraction for GPU plugin remote context -* which is shared with Direct3D 11 device. -* The plugin object derived from this class can be obtained either with -* GetContext() method of Executable network or using CreateContext() Core call. -* @note User can also obtain OpenCL context handle from this class. -*/ + * @brief This class represents an abstraction for GPU plugin remote context + * which is shared with Direct3D 11 device. + * The plugin object derived from this class can be obtained either with + * GetContext() method of Executable network or using CreateContext() Core call. + * @note User can also obtain OpenCL context handle from this class. + */ class D3DContext : public ClContext { public: /** - * @brief A smart pointer to the D3DContext object - */ + * @brief A smart pointer to the D3DContext object + */ using Ptr = std::shared_ptr; /** @@ -47,16 +47,16 @@ class D3DContext : public ClContext { }; /** -* @brief This class represents an abstraction for GPU plugin remote blob -* which is shared with Direct3D 11 buffer. -* The plugin object derived from this class can be obtained with CreateBlob() call. -* @note User can also obtain OpenCL buffer handle from this class. -*/ + * @brief This class represents an abstraction for GPU plugin remote blob + * which is shared with Direct3D 11 buffer. + * The plugin object derived from this class can be obtained with CreateBlob() call. + * @note User can also obtain OpenCL buffer handle from this class. + */ class D3DBufferBlob : public ClBufferBlob { public: /** - * @brief A smart pointer to the D3DBufferBlob object - */ + * @brief A smart pointer to the D3DBufferBlob object + */ using Ptr = std::shared_ptr; /** @@ -77,16 +77,16 @@ class D3DBufferBlob : public ClBufferBlob { }; /** -* @brief This class represents an abstraction for GPU plugin remote blob -* which is shared with Direct3D 11 2D texture. -* The plugin object derived from this class can be obtained with CreateBlob() call. -* @note User can also obtain OpenCL 2D image handle from this class. -*/ + * @brief This class represents an abstraction for GPU plugin remote blob + * which is shared with Direct3D 11 2D texture. + * The plugin object derived from this class can be obtained with CreateBlob() call. + * @note User can also obtain OpenCL 2D image handle from this class. + */ class D3DSurface2DBlob : public ClImage2DBlob { public: /** - * @brief A smart pointer to the D3DSurface2DBlob object - */ + * @brief A smart pointer to the D3DSurface2DBlob object + */ using Ptr = std::shared_ptr; /** @@ -117,9 +117,14 @@ class D3DSurface2DBlob : public ClImage2DBlob { }; /** -* @brief This function is used to obtain a NV12 compound blob object from NV12 DXGI video decoder output. -* The resulting compound contains two remote blobs for Y and UV planes of the surface. -*/ + * @brief This function is used to obtain a NV12 compound blob object from NV12 DXGI video decoder output. + * The resulting compound contains two remote blobs for Y and UV planes of the surface. + * @param height Height of Y plane + * @param width Widht of Y plane + * @param ctx A pointer to remote context + * @param nv12_surf A ID3D11Texture2D instance to create NV12 blob from + * @return NV12 remote blob + */ static inline Blob::Ptr make_shared_blob_nv12(size_t height, size_t width, RemoteContext::Ptr ctx, ID3D11Texture2D* nv12_surf) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { @@ -145,8 +150,12 @@ static inline Blob::Ptr make_shared_blob_nv12(size_t height, size_t width, Remot } /** -* @brief This function is used to obtain remote context object from ID3D11Device -*/ + * @brief This function is used to obtain remote context object from ID3D11Device + * @param core Inference Engine Core object instance + * @param deviceName A name of to create a remote context for + * @param device A pointer to ID3D11Device to be used to create a remote context + * @return A shared remote context instance + */ static inline D3DContext::Ptr make_shared_context(Core& core, std::string deviceName, ID3D11Device* device) { ParamMap contextParams = { { GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED) }, @@ -156,8 +165,12 @@ static inline D3DContext::Ptr make_shared_context(Core& core, std::string device } /** -* @brief This function is used to obtain remote blob object from ID3D11Buffer -*/ + * @brief This function is used to obtain remote blob object from ID3D11Buffer + * @param desc A tensor description which describes blob configuration + * @param ctx A shared pointer to a remote context + * @param buffer A pointer to ID3D11Buffer instance to create remote blob based on + * @return A remote blob instance + */ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, ID3D11Buffer* buffer) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { @@ -172,14 +185,14 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext:: } /** -* @brief This function is used to obtain remote blob object from ID3D11Texture2D -* @param desc Tensor description -* @param ctx the RemoteContext object whuch owns context for the blob to be created -* @param surface Pointer to ID3D11Texture2D interface of the objects that owns NV12 texture -* @param plane ID of the plane to be shared (0 or 1) -* @return Smart pointer to created RemoteBlob object cast to base class -* @note The underlying ID3D11Texture2D can also be a plane of output surface of DXGI video decoder -*/ + * @brief This function is used to obtain remote blob object from ID3D11Texture2D + * @param desc Tensor description + * @param ctx the RemoteContext object whuch owns context for the blob to be created + * @param surface Pointer to ID3D11Texture2D interface of the objects that owns NV12 texture + * @param plane ID of the plane to be shared (0 or 1) + * @return Smart pointer to created RemoteBlob object cast to base class + * @note The underlying ID3D11Texture2D can also be a plane of output surface of DXGI video decoder + */ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, ID3D11Texture2D* surface, uint32_t plane = 0) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { diff --git a/inference-engine/include/gpu/gpu_context_api_ocl.hpp b/inference-engine/include/gpu/gpu_context_api_ocl.hpp index 489daa143a0f6d..9bcdf0adbedba0 100644 --- a/inference-engine/include/gpu/gpu_context_api_ocl.hpp +++ b/inference-engine/include/gpu/gpu_context_api_ocl.hpp @@ -25,16 +25,16 @@ namespace InferenceEngine { namespace gpu { /** -* @brief This class represents an abstraction for GPU plugin remote context -* which is shared with OpenCL context object. -* The plugin object derived from this class can be obtained either with -* GetContext() method of Executable network or using CreateContext() Core call. -*/ + * @brief This class represents an abstraction for GPU plugin remote context + * which is shared with OpenCL context object. + * The plugin object derived from this class can be obtained either with + * GetContext() method of Executable network or using CreateContext() Core call. + */ class ClContext : public RemoteContext, public details::param_map_obj_getter { public: /** - * @brief A smart pointer to the ClContext object - */ + * @brief A smart pointer to the ClContext object + */ using Ptr = std::shared_ptr; /** @@ -63,14 +63,14 @@ class ClContext : public RemoteContext, public details::param_map_obj_getter { }; /** -* @brief The basic class for all GPU plugin remote blob objects. -* The OpenCL memory object handle (cl_mem) can be obtained from this class object. -*/ + * @brief The basic class for all GPU plugin remote blob objects. + * The OpenCL memory object handle (cl_mem) can be obtained from this class object. + */ class ClBlob : public RemoteBlob { public: /** - * @brief A smart pointer to the ClBlob object - */ + * @brief A smart pointer to the ClBlob object + */ using Ptr = std::shared_ptr; /** @@ -81,16 +81,16 @@ class ClBlob : public RemoteBlob { }; /** -* @brief This class represents an abstraction for GPU plugin remote blob -* which can be shared with user-supplied OpenCL buffer. -* The plugin object derived from this class can be obtained with CreateBlob() call. -* @note User can obtain OpenCL buffer handle from this class. -*/ + * @brief This class represents an abstraction for GPU plugin remote blob + * which can be shared with user-supplied OpenCL buffer. + * The plugin object derived from this class can be obtained with CreateBlob() call. + * @note User can obtain OpenCL buffer handle from this class. + */ class ClBufferBlob : public ClBlob, public details::param_map_obj_getter { public: /** - * @brief A smart pointer to the ClBufferBlob object - */ + * @brief A smart pointer to the ClBufferBlob object + */ using Ptr = std::shared_ptr; /** @@ -124,16 +124,16 @@ class ClBufferBlob : public ClBlob, public details::param_map_obj_getter { }; /** -* @brief This class represents an abstraction for GPU plugin remote blob -* which can be shared with user-supplied OpenCL 2D Image. -* The plugin object derived from this class can be obtained with CreateBlob() call. -* @note User can obtain OpenCL image handle from this class. -*/ + * @brief This class represents an abstraction for GPU plugin remote blob + * which can be shared with user-supplied OpenCL 2D Image. + * The plugin object derived from this class can be obtained with CreateBlob() call. + * @note User can obtain OpenCL image handle from this class. + */ class ClImage2DBlob : public ClBlob, public details::param_map_obj_getter { public: /** - * @brief A smart pointer to the ClImage2DBlob object - */ + * @brief A smart pointer to the ClImage2DBlob object + */ using Ptr = std::shared_ptr; /** @@ -167,13 +167,13 @@ class ClImage2DBlob : public ClBlob, public details::param_map_obj_getter { }; /** -* @brief This function is used to construct a NV12 compound blob object from two cl::Image2D wrapper objects. -* The resulting compound contains two remote blobs for Y and UV planes of the surface. -* @param ctx RemoteContext plugin object derived from ClContext class. -* @param nv12_image_plane_y cl::Image2D object containing Y plane data. -* @param nv12_image_plane_uv cl::Image2D object containing UV plane data. -* @return Pointer to plugin-specific context class object, which is derived from RemoteContext. -*/ + * @brief This function is used to construct a NV12 compound blob object from two cl::Image2D wrapper objects. + * The resulting compound contains two remote blobs for Y and UV planes of the surface. + * @param ctx RemoteContext plugin object derived from ClContext class. + * @param nv12_image_plane_y cl::Image2D object containing Y plane data. + * @param nv12_image_plane_uv cl::Image2D object containing UV plane data. + * @return A shared remote blob instance + */ static inline Blob::Ptr make_shared_blob_nv12(RemoteContext::Ptr ctx, cl::Image2D& nv12_image_plane_y, cl::Image2D& nv12_image_plane_uv) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { @@ -201,8 +201,12 @@ static inline Blob::Ptr make_shared_blob_nv12(RemoteContext::Ptr ctx, cl::Image2 } /** -* @brief This function is used to obtain remote context object from user-supplied OpenCL context handle -*/ + * @brief This function is used to obtain remote context object from user-supplied OpenCL context handle + * @param core A reference to Inference Engine Core object + * @param deviceName A name of device to create a remote context for + * @param ctx A OpenCL context to be used to create shared remote context + * @return A shared remote context instance + */ static inline RemoteContext::Ptr make_shared_context(Core& core, std::string deviceName, cl_context ctx) { ParamMap contextParams = { { GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL) }, @@ -212,15 +216,22 @@ static inline RemoteContext::Ptr make_shared_context(Core& core, std::string dev } /** -* @brief This function is used to create remote blob object within default GPU plugin OpenCL context -*/ + * @brief This function is used to create remote blob object within default GPU plugin OpenCL context + * @param desc A tensor descriptor object representing remote blob configuration + * @param ctx A remote context used to create remote blob + * @return A remote blob instance + */ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, ClContext::Ptr ctx) { return std::dynamic_pointer_cast(ctx->CreateBlob(desc)); } /** -* @brief This function is used to obtain remote blob object from user-supplied cl::Buffer wrapper object -*/ + * @brief This function is used to obtain remote blob object from user-supplied cl::Buffer wrapper object + * @param desc A tensor descriptor object representing remote blob configuration + * @param ctx A remote context used to create remote blob + * @param buffer A cl::Buffer object wrapped by a remote blob + * @return A remote blob instance + */ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl::Buffer& buffer) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { @@ -235,8 +246,12 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext:: } /** -* @brief This function is used to obtain remote blob object from user-supplied OpenCL buffer handle -*/ + * @brief This function is used to obtain remote blob object from user-supplied OpenCL buffer handle + * @param desc A tensor descriptor object representing remote blob configuration + * @param ctx A remote context used to create remote blob + * @param buffer A cl_mem object wrapped by a remote blob + * @return A remote blob instance + */ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl_mem buffer) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { @@ -251,8 +266,12 @@ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext:: } /** -* @brief This function is used to obtain remote blob object from user-supplied cl::Image2D wrapper object -*/ + * @brief This function is used to obtain remote blob object from user-supplied cl::Image2D wrapper object + * @param desc A tensor descriptor object representing remote blob configuration + * @param ctx A remote context used to create remote blob + * @param buffer A cl::Image2D object wrapped by a remote blob + * @return A remote blob instance + */ static inline Blob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx, cl::Image2D& image) { auto casted = std::dynamic_pointer_cast(ctx); if (nullptr == casted) { diff --git a/inference-engine/include/ie_blob.h b/inference-engine/include/ie_blob.h index 6a6514e80c95c5..234a13528ebdd1 100644 --- a/inference-engine/include/ie_blob.h +++ b/inference-engine/include/ie_blob.h @@ -125,6 +125,7 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { /** * @brief Returns the tensor description + * @return A const reference to a tensor descriptor */ virtual const TensorDesc& getTensorDesc() const noexcept { return tensorDesc; @@ -132,6 +133,7 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { /** * @brief Returns the tensor description + * @return A reference to a tensor descriptor */ virtual TensorDesc& getTensorDesc() noexcept { return tensorDesc; @@ -141,6 +143,8 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { * @brief By default, returns the total number of elements (a product of all the dims or 1 for scalar) * * Return value and its interpretation heavily depend on the blob type + * + * @return The total number of elements */ virtual size_t size() const noexcept { if (tensorDesc.getLayout() == Layout::SCALAR) return 1; @@ -149,6 +153,7 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { /** * @brief Returns the size of the current Blob in bytes. + * @return Blob's size in bytes */ virtual size_t byteSize() const noexcept { return size() * element_size(); @@ -158,9 +163,11 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { * @deprecated Cast to MemoryBlob and use its API instead. * Blob class can represent compound blob, which do not refer to the only solid memory. * - * @brief Returns the number of bytes per element. + * @brief Provides the number of bytes per element. * * The overall Blob capacity is size() * element_size(). Abstract method. + * + * @return Returns the number of bytes per element */ virtual size_t element_size() const noexcept = 0; @@ -175,6 +182,8 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { * @brief Releases previously allocated data. * * Abstract method. + * + * @return `True` if deallocation happens successfully, `false` otherwise. */ virtual bool deallocate() noexcept = 0; @@ -243,13 +252,14 @@ class INFERENCE_ENGINE_API_CLASS(Blob) { */ virtual void* getHandle() const noexcept = 0; + /// private template friend class TBlobProxy; }; /** * @brief Helper cast function to work with shared Blob objects - * + * @param blob A blob to cast * @return shared_ptr to the type T. Returned shared_ptr shares ownership of the object with the * input Blob::Ptr */ @@ -262,7 +272,7 @@ std::shared_ptr as(const Blob::Ptr& blob) noexcept { /** * @brief Helper cast function to work with shared Blob objects - * + * @param blob A blob to cast * @return shared_ptr to the type const T. Returned shared_ptr shares ownership of the object with * the input Blob::Ptr */ @@ -320,6 +330,7 @@ class INFERENCE_ENGINE_API_CLASS(MemoryBlob): public Blob { /** * @brief Returns the total number of elements, which is a product of all the dimensions + * @return The total number of elements */ size_t size() const noexcept override { if (tensorDesc.getLayout() == Layout::SCALAR) return 1; @@ -464,6 +475,7 @@ class INFERENCE_ENGINE_API_CLASS(MemoryBlob): public Blob { */ void* getHandle() const noexcept override = 0; + /// private template friend class TBlobProxy; }; @@ -779,6 +791,11 @@ class TBlob : public MemoryBlob { return _handle.get(); } + /** + * @brief Creates a blob from the existing blob with a given ROI + * @param origBlob An original blob + * @param roi A ROI object + */ TBlob(const TBlob& origBlob, const ROI& roi) : MemoryBlob(make_roi_desc(origBlob.getTensorDesc(), roi, true)), _allocator(origBlob._allocator) { diff --git a/inference-engine/include/ie_common.h b/inference-engine/include/ie_common.h index 79f9a88b790d8d..cdd757103a415a 100644 --- a/inference-engine/include/ie_common.h +++ b/inference-engine/include/ie_common.h @@ -91,6 +91,13 @@ enum Layout : uint8_t { BLOCKED = 200, //!< A blocked layout }; + +/** + * @brief Prints a string representation of InferenceEngine::Layout to a stream + * @param out An output stream to send to + * @param p A layout value to print to a stream + * @return A reference to the `out` stream + */ inline std::ostream& operator<<(std::ostream& out, const Layout& p) { switch (p) { #define PRINT_LAYOUT(name) \ @@ -131,6 +138,13 @@ enum ColorFormat : uint32_t { NV12, ///< NV12 color format represented as compound Y+UV blob I420, ///< I420 color format represented as compound Y+U+V blob }; + +/** + * @brief Prints a string representation of InferenceEngine::ColorFormat to a stream + * @param out An output stream to send to + * @param fmt A color format value to print to a stream + * @return A reference to the `out` stream + */ inline std::ostream& operator<<(std::ostream& out, const ColorFormat& fmt) { switch (fmt) { #define PRINT_COLOR_FORMAT(name) \ @@ -235,7 +249,6 @@ struct ResponseDesc { char msg[4096] = {}; }; - /** * @brief Response structure encapsulating information about supported layer */ @@ -312,13 +325,14 @@ class NotAllocated : public std::logic_error { class InferNotStarted : public std::logic_error { using std::logic_error::logic_error; }; -} // namespace InferenceEngine /** @brief This class represents StatusCode::NETWORK_NOT_READ exception */ class NetworkNotRead : public std::logic_error { using std::logic_error::logic_error; }; +} // namespace InferenceEngine + #if defined(_WIN32) #define __PRETTY_FUNCTION__ __FUNCSIG__ #else diff --git a/inference-engine/include/ie_compound_blob.h b/inference-engine/include/ie_compound_blob.h index ff5d71e4078eb5..526402b9dfd85e 100644 --- a/inference-engine/include/ie_compound_blob.h +++ b/inference-engine/include/ie_compound_blob.h @@ -49,12 +49,14 @@ class INFERENCE_ENGINE_API_CLASS(CompoundBlob): public Blob { explicit CompoundBlob(std::vector&& blobs); /** - * @brief Always returns 0 + * @brief Always returns `0` + * @return Returns `0` */ size_t byteSize() const noexcept override; /** - * @brief Always returns 0 + * @brief Always returns `0` + * @return Returns `0` */ size_t element_size() const noexcept override; @@ -65,7 +67,7 @@ class INFERENCE_ENGINE_API_CLASS(CompoundBlob): public Blob { /** * @brief No operation is performed. Compound blob does not allocate/deallocate any data - * @return false + * @return Returns `false` */ bool deallocate() noexcept override; diff --git a/inference-engine/include/ie_iexecutable_network.hpp b/inference-engine/include/ie_iexecutable_network.hpp index 8e7c5fa0bef4c1..f919547295c748 100644 --- a/inference-engine/include/ie_iexecutable_network.hpp +++ b/inference-engine/include/ie_iexecutable_network.hpp @@ -46,7 +46,7 @@ class IExecutableNetwork : public details::IRelease { * This method need to be called to find output names for using them later * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob * - * @param out Reference to the ::ConstOutputsDataMap object + * @param out Reference to the InferenceEngine::ConstOutputsDataMap object * @param resp Optional: pointer to an already allocated object to contain information in case of failure * @return Status code of the operation: InferenceEngine::OK (0) for success */ @@ -55,11 +55,11 @@ class IExecutableNetwork : public details::IRelease { /** * @brief Gets the executable network input Data node information. * - * The received info is stored in the given ::ConstInputsDataMap object. + * The received info is stored in the given InferenceEngine::ConstInputsDataMap object. * This method need to be called to find out input names for using them later * when calling InferenceEngine::InferRequest::SetBlob * - * @param inputs Reference to ::ConstInputsDataMap object. + * @param inputs Reference to InferenceEngine::ConstInputsDataMap object. * @param resp Optional: pointer to an already allocated object to contain information in case of failure * @return Status code of the operation: InferenceEngine::OK (0) for success */ diff --git a/inference-engine/include/ie_imemory_state.hpp b/inference-engine/include/ie_imemory_state.hpp index 2e44350b5fa719..a5f52ae82514b2 100644 --- a/inference-engine/include/ie_imemory_state.hpp +++ b/inference-engine/include/ie_imemory_state.hpp @@ -20,7 +20,7 @@ namespace InferenceEngine { /** * @interface IVariableState - * @brief manages data for reset operations + * @brief Manages data for reset operations */ class IVariableState : public details::no_copy { public: @@ -30,8 +30,8 @@ class IVariableState : public details::no_copy { using Ptr = std::shared_ptr; /** - * @brief Gets name of current memory state, if length of array is not enough name is truncated by len, null - * terminator is inserted as well. As memory state name variable_id from according ReadValue used. + * @brief Gets name of current variable state, if length of array is not enough name is truncated by len, null + * terminator is inserted as well. As variable state name `variable_id` from according `ReadValue` used. * * @param name preallocated buffer for receiving name * @param len Length of the buffer @@ -41,7 +41,7 @@ class IVariableState : public details::no_copy { virtual StatusCode GetName(char* name, size_t len, ResponseDesc* resp) const noexcept = 0; /** - * @brief Reset internal memory state for relevant infer request, to a value specified as default for according ReadValue node + * @brief Reset internal variable state for relevant infer request, to a value specified as default for according ReadValue node * * @param resp Optional: pointer to an already allocated object to contain information in case of failure * @return Status code of the operation: InferenceEngine::OK (0) for success* @@ -53,26 +53,37 @@ class IVariableState : public details::no_copy { * * This method can fail if Blob size does not match the internal state size or precision * - * @param newState is the data to use as new state + * @param newState The data to use as new state * @param resp Optional: pointer to an already allocated object to contain information in case of failure * @return Status code of the operation: InferenceEngine::OK (0) for success */ virtual StatusCode SetState(Blob::Ptr newState, ResponseDesc* resp) noexcept = 0; /** - * @brief Returns the value of the memory state. + * @brief Returns the value of the variable state. * - * @param lastState + * @param state A reference to a blob containing a variable state * @param resp Optional: pointer to an already allocated object to contain information in case of failure * @return Status code of the operation: InferenceEngine::OK (0) for success - * */ + */ INFERENCE_ENGINE_DEPRECATED("Use GetState function instead") - virtual StatusCode GetLastState(Blob::CPtr& state, ResponseDesc* resp) const noexcept {return GetState(state, resp);} + virtual StatusCode GetLastState(Blob::CPtr& state, ResponseDesc* resp) const noexcept { + return GetState(state, resp); + } + + /** + * @brief Returns the value of the variable state. + * + * @param state A reference to a blob containing a variable state + * @param resp Optional: pointer to an already allocated object to contain information in case of failure + * @return Status code of the operation: InferenceEngine::OK (0) for success + */ virtual StatusCode GetState(Blob::CPtr& state, ResponseDesc* resp) const noexcept = 0; }; -/* +/** * @brief For compatibility reasons. */ using IMemoryState = IVariableState; + } // namespace InferenceEngine \ No newline at end of file diff --git a/inference-engine/include/ie_input_info.hpp b/inference-engine/include/ie_input_info.hpp index a1760d8a009993..5d6b8f8680383b 100644 --- a/inference-engine/include/ie_input_info.hpp +++ b/inference-engine/include/ie_input_info.hpp @@ -125,6 +125,7 @@ class InputInfo { /** * @brief Returns the tensor descriptor + * @return A const reference to a tensor descriptor */ const TensorDesc& getTensorDesc() const { if (!_inputData) { diff --git a/inference-engine/include/ie_layouts.h b/inference-engine/include/ie_layouts.h index a544231092b7ee..219dd6b9d1f3b3 100644 --- a/inference-engine/include/ie_layouts.h +++ b/inference-engine/include/ie_layouts.h @@ -130,6 +130,11 @@ class INFERENCE_ENGINE_API_CLASS(BlockingDesc) { bool operator!=(const BlockingDesc& rhs) const; protected: + /** + * @brief Fills tensor descriptor based on blocking dimensions and specific order + * @param blocked_dims A vector representing blocking dimensions + * @param order A vector with specific dims order + */ void fillDesc(const SizeVector& blocked_dims, const SizeVector& order); private: @@ -330,6 +335,14 @@ struct ROI { ROI() = default; + /** + * @brief Creates a ROI objects with given parameters + * @param id ID of a ROI (offset over batch dimension) + * @param posX W upper left coordinate of ROI + * @param posY H upper left coordinate of ROI + * @param sizeX W size of ROI + * @param sizeY H size of ROI + */ ROI(size_t id, size_t posX, size_t posY, size_t sizeX, size_t sizeY) : id(id), posX(posX), posY(posY), sizeX(sizeX), sizeY(sizeY) { } diff --git a/inference-engine/include/ie_locked_memory.hpp b/inference-engine/include/ie_locked_memory.hpp index c031f498366c12..111169ac3217f8 100644 --- a/inference-engine/include/ie_locked_memory.hpp +++ b/inference-engine/include/ie_locked_memory.hpp @@ -168,7 +168,7 @@ class LockedMemory : public details::LockedMemoryBase { /** * @brief Compares stored object with the given one * @param pointer An pointer to compare with. - * @return true if objects are equal, false otherwise + * @return `true` if objects are equal, `false` otherwise */ bool operator==(const T* pointer) const { // special case with nullptr @@ -177,8 +177,9 @@ class LockedMemory : public details::LockedMemoryBase { /** * @brief Compares the object with the one stored in the memory. - * - * @return true if objects are equal, false otherwise + * @param pointer A pointer to compare with + * @param lm A compared LockedMemory object + * @return `true` if objects are equal, `false` otherwise */ friend bool operator==(const T* pointer, const LockedMemory& lm) { return lm.operator==(pointer); @@ -266,8 +267,8 @@ class LockedMemory : public details::LockedMemoryBase { /** * @brief Compares stored object with the given one - * - * @return true if objects are equal, false otherwise + * @param pointer A pointer to compare with + * @return `true` if objects are equal, `false` otherwise */ bool operator==(const void* pointer) const { // special case with nullptr @@ -276,8 +277,9 @@ class LockedMemory : public details::LockedMemoryBase { /** * @brief Compares the object with the one stored in the memory - * - * @return true if objects are equal, false otherwise + * @param pointer A pointer to compare with + * @param lm A compared LockedMemory object + * @return `true` if objects are equal, `false` otherwise */ friend bool operator==(const void* pointer, const LockedMemory& lm) { return lm.operator==(pointer); @@ -362,8 +364,8 @@ class LockedMemory : public details::LockedMemoryBase { /** * @brief Compares stored object with the given one - * - * @return true if objects are equal, false otherwise + * @param pointer A pointer to compare with + * @return `true` if objects are equal, `false` otherwise */ bool operator==(const T* pointer) const { // special case with nullptr @@ -372,8 +374,9 @@ class LockedMemory : public details::LockedMemoryBase { /** * @brief Compares the object with the one stored in the memory - * - * @return true if objects are equal, false otherwise + * @param pointer A pointer to compare with + * @param lm A compared LockedMemory object + * @return `true` if objects are equal, `false` otherwise */ friend bool operator==(const T* pointer, const LockedMemory& lm) { return lm.operator==(pointer); diff --git a/inference-engine/include/ie_parallel.hpp b/inference-engine/include/ie_parallel.hpp index 67286f3e165efb..bc33c92e01266a 100644 --- a/inference-engine/include/ie_parallel.hpp +++ b/inference-engine/include/ie_parallel.hpp @@ -58,6 +58,15 @@ inline int parallel_get_env_threads() { } #if IE_THREAD == IE_THREAD_TBB #define PARTITIONING , tbb::static_partitioner() + +// The TBB version less than 2018u1 has no static_partitioner argument for +// tbb::parallel_deterministic_reduce. So will fallback to non deterministic version. +#if (TBB_INTERFACE_VERSION >= 10001) +#define _TBB_REDUCE_FUNC tbb::parallel_deterministic_reduce +#else +#define _TBB_REDUCE_FUNC tbb::parallel_reduce +#endif + #else #define PARTITIONING #endif @@ -186,7 +195,7 @@ void parallel_sort(I begin, I end, const F& comparator) { template R parallel_sum(const T0& D0, const R& input, const F& func) { #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) - return tbb::parallel_deterministic_reduce( + return _TBB_REDUCE_FUNC( tbb::blocked_range(0, D0), input, [&](const tbb::blocked_range& r, R init) -> R { R sum = init; @@ -218,7 +227,7 @@ R parallel_sum(const T0& D0, const R& input, const F& func) { template R parallel_sum2d(const T0& D0, const T1& D1, const R& input, const F& func) { #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) - return tbb::parallel_deterministic_reduce( + return _TBB_REDUCE_FUNC( tbb::blocked_range2d(0, D0, 0, D1), input, [&](const tbb::blocked_range2d& r, R init) -> R { R sum = init; @@ -257,7 +266,7 @@ R parallel_sum2d(const T0& D0, const T1& D1, const R& input, const F& func) { template R parallel_sum3d(const T0& D0, const T1& D1, const T2& D2, const R& input, const F& func) { #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) - return tbb::parallel_deterministic_reduce( + return _TBB_REDUCE_FUNC( tbb::blocked_range3d(0, D0, 0, D1, 0, D2), input, [&](const tbb::blocked_range3d& r, R init) -> R { R sum = init; diff --git a/inference-engine/include/ie_precision.hpp b/inference-engine/include/ie_precision.hpp index 8d13a4bab0445a..2d13bacef5da15 100644 --- a/inference-engine/include/ie_precision.hpp +++ b/inference-engine/include/ie_precision.hpp @@ -29,6 +29,7 @@ class Precision { FP32 = 10, /**< 32bit floating point value */ FP16 = 11, /**< 16bit floating point value, 5 bit for exponent, 10 bit for mantisa */ BF16 = 12, /**< 16bit floating point value, 8 bit for exponent, 7 bit for mantisa*/ + FP64 = 13, /**< 64bit floating point value */ Q78 = 20, /**< 16bit specific signed fixed point precision */ I16 = 30, /**< 16bit signed integer value */ U8 = 40, /**< 8bit unsigned integer value */ @@ -60,7 +61,10 @@ class Precision { /** @brief Default constructor */ Precision() = default; - /** @brief Constructor with specified precision */ + /** + * @brief Constructor with specified precision + * @param value A value of ePrecision to create an object from + */ Precision(const Precision::ePrecision value) { // NOLINT precisionInfo = getPrecisionInfo(value); } @@ -69,7 +73,7 @@ class Precision { * @brief Custom precision constructor * * @param bitsSize size of elements - * @param name optional name string, used in serialisation + * @param name optional: name string, used in serialisation */ explicit Precision(size_t bitsSize, const char* name = nullptr) { if (bitsSize == 0) { @@ -108,6 +112,7 @@ class Precision { switch (precisionInfo.value) { CASE(FP32, float); + CASE(FP64, double); CASE2(FP16, int16_t, uint16_t); CASE2(BF16, int16_t, uint16_t); CASE(I8, int8_t); @@ -131,39 +136,64 @@ class Precision { } } - /** @brief Equality operator with Precision object */ + /** + * @brief Equality operator with Precision object + * @param p A value of Precision to compare with + * @return `true` if values represent the same precisions, `false` otherwise + */ bool operator==(const Precision& p) const noexcept { return precisionInfo.value == p && precisionInfo.bitsSize == p.precisionInfo.bitsSize && areSameStrings(precisionInfo.name, p.precisionInfo.name); } - /** @brief Equality operator with ePrecision enum value */ + /** + * @brief Equality operator with ePrecision enum value + * @param p A value of ePrecision to compare with + * @return `true` if values represent the same precisions, `false` otherwise + */ bool operator==(const ePrecision p) const noexcept { return precisionInfo.value == p; } - /** @brief Inequality operator with ePrecision enum value */ + /** + * @brief Inequality operator with ePrecision enum value + * @param p A value of ePrecision to compare with + * @return `true` if values represent different precisions, `false` otherwise + */ bool operator!=(const ePrecision p) const noexcept { return precisionInfo.value != p; } - /** @brief Assignment operator with ePrecision enum value */ + /** + * @brief Assignment operator with ePrecision enum value + * @param p A value of ePrecision enumeration + * @return A Precision instance + */ Precision& operator=(const ePrecision p) noexcept { precisionInfo = getPrecisionInfo(p); return *this; } - /** @brief Cast operator to a bool */ + /** + * @brief Cast operator to a bool + * @return `true` if precision is specified, `false` otherwise + */ explicit operator bool() const noexcept { return precisionInfo.value != UNSPECIFIED; } - /** @brief Logical negation operator */ + /** + * @brief Logical negation operator + * @return `true` if precision is NOT specified, `false` otherwise + */ bool operator!() const noexcept { return precisionInfo.value == UNSPECIFIED; } - /** @brief Cast operator to a ePrecision */ + /** + * @brief Cast operator to a ePrecision + * @return A casted value of Precision::ePrecision enumeration + */ operator Precision::ePrecision() const noexcept { return precisionInfo.value; } @@ -176,19 +206,27 @@ class Precision { return precisionInfo.value; } - /** @brief Getter of precision name */ + /** + * @brief Getter of precision name + * @return A string representing precision name + */ const char* name() const noexcept { return precisionInfo.name; } - /** @brief Creates from string with precision name */ + /** + * @brief Creates Precision from string with precision name + * @param str A string representing precision + * @return Precision created from string representation + */ static Precision FromStr(const std::string& str) { static std::unordered_map names = { #define PRECISION_NAME(s) {#s, s} PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16), PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64), PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32), PRECISION_NAME(U64), - PRECISION_NAME(FP32), PRECISION_NAME(FP16), PRECISION_NAME(MIXED), PRECISION_NAME(BIN), + PRECISION_NAME(FP32), PRECISION_NAME(FP64), PRECISION_NAME(FP16), PRECISION_NAME(MIXED), + PRECISION_NAME(BIN), #undef PRECISION_NAME }; auto i = names.find(str); @@ -220,11 +258,12 @@ class Precision { */ bool isSigned() const noexcept { return (precisionInfo.value == Precision::UNSPECIFIED) || (precisionInfo.value == Precision::MIXED) || - (precisionInfo.value == Precision::FP32) || (precisionInfo.value == Precision::FP16) || - (precisionInfo.value == Precision::Q78) || (precisionInfo.value == Precision::I16) || - (precisionInfo.value == Precision::I8) || (precisionInfo.value == Precision::I32) || - (precisionInfo.value == Precision::I64) || (precisionInfo.value == Precision::BIN) || - (precisionInfo.value == Precision::BF16) || (precisionInfo.value == Precision::CUSTOM); + (precisionInfo.value == Precision::FP32) || (precisionInfo.value == Precision::FP64) || + (precisionInfo.value == Precision::FP16) || (precisionInfo.value == Precision::Q78) || + (precisionInfo.value == Precision::I16) || (precisionInfo.value == Precision::I8) || + (precisionInfo.value == Precision::I32) || (precisionInfo.value == Precision::I64) || + (precisionInfo.value == Precision::BIN) || (precisionInfo.value == Precision::BF16) || + (precisionInfo.value == Precision::CUSTOM); } protected: @@ -256,7 +295,9 @@ class Precision { } /** - * @brief Return PrecisionInfo + * @brief Creates PrecisionInfo based on ePrecision + * @param v A value of ePrecision emuneration + * @return Precision info object */ static PrecisionInfo getPrecisionInfo(ePrecision v) { #define CASE(x) \ @@ -264,6 +305,7 @@ class Precision { return makePrecisionInfo(#x); switch (v) { CASE(FP32); + CASE(FP64); CASE(FP16); CASE(BF16); CASE(I8); @@ -297,6 +339,11 @@ struct PrecisionTrait { using value_type = float; }; +template <> +struct PrecisionTrait { + using value_type = double; +}; + template <> struct PrecisionTrait { using value_type = int16_t; diff --git a/inference-engine/include/ie_unicode.hpp b/inference-engine/include/ie_unicode.hpp index 5f1583df5cd6e9..7a23f48bb25fd2 100644 --- a/inference-engine/include/ie_unicode.hpp +++ b/inference-engine/include/ie_unicode.hpp @@ -29,6 +29,8 @@ namespace InferenceEngine { /** * @deprecated Use OS-native conversion utilities * @brief Conversion from possibly-wide character string to a single-byte chain. + * @param str A possibly-wide character string + * @return A single-byte character string */ INFERENCE_ENGINE_DEPRECATED("Use OS-native conversion utilities") inline std::string fileNameToString(const file_name_t& str) { @@ -47,6 +49,8 @@ inline std::string fileNameToString(const file_name_t& str) { /** * @deprecated Use OS-native conversion utilities * @brief Conversion from single-byte character string to a possibly-wide one + * @param str A single-byte character string + * @return A possibly-wide character string */ INFERENCE_ENGINE_DEPRECATED("Use OS-native conversion utilities") inline file_name_t stringToFileName(const std::string& str) { diff --git a/inference-engine/include/ie_version.hpp b/inference-engine/include/ie_version.hpp index 89835ba7932643..e3773d6e1f1643 100644 --- a/inference-engine/include/ie_version.hpp +++ b/inference-engine/include/ie_version.hpp @@ -11,6 +11,9 @@ #include "ie_api.h" +/** + * @brief Inference Engine C++ API + */ namespace InferenceEngine { /** @@ -23,8 +26,8 @@ struct Version { * @brief An API version reflects the set of supported features */ struct { - int major; - int minor; + int major; //!< A major version + int minor; //!< A minor version } apiVersion; /** * @brief A null terminated string with build number diff --git a/inference-engine/samples/CMakeLists.txt b/inference-engine/samples/CMakeLists.txt index c85590d034dc98..3665310b97113a 100644 --- a/inference-engine/samples/CMakeLists.txt +++ b/inference-engine/samples/CMakeLists.txt @@ -121,7 +121,7 @@ set (BUILD_TESTING OFF) if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/gflags") function(add_gflags) set(BUILD_SHARED_LIBS OFF) - add_subdirectory(thirdparty/gflags) + add_subdirectory(thirdparty/gflags EXCLUDE_FROM_ALL) set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) endfunction() add_gflags() diff --git a/inference-engine/samples/common/samples/classification_results.h b/inference-engine/samples/common/samples/classification_results.h index 98ea8135b4512e..bda0ae29a1f035 100644 --- a/inference-engine/samples/common/samples/classification_results.h +++ b/inference-engine/samples/common/samples/classification_results.h @@ -97,6 +97,7 @@ class ClassificationResultT { switch (input.getTensorDesc().getPrecision()) { TBLOB_TOP_RESULT(FP32); + TBLOB_TOP_RESULT(FP64); TBLOB_TOP_RESULT(FP16); TBLOB_TOP_RESULT(Q78); TBLOB_TOP_RESULT(I16); diff --git a/inference-engine/scripts/run_code_checks.sh b/inference-engine/scripts/run_code_checks.sh deleted file mode 100755 index ef7b64cfd87308..00000000000000 --- a/inference-engine/scripts/run_code_checks.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# Copyright (C) 2018-2020 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -command -v realpath >/dev/null 2>&1 || { echo >&2 "cpplint require realpath executable but it's not installed. Aborting."; exit 1; } -SOURCE_DIR=$(realpath "${CURRENT_DIR}/..") -REPORT_DIR="${SOURCE_DIR}/report" -CPPLINT_REPORT_DIR="${REPORT_DIR}/cpplint" -PROJECT_NAME="Inference Engine" - -function run_cpplint() { - echo "-> CppLint started..." - if [ -d "${CPPLINT_REPORT_DIR}" ]; then - rm -Rf "${CPPLINT_REPORT_DIR}" - fi - - mkdir -p "${CPPLINT_REPORT_DIR}" - python "${CURRENT_DIR}/cpplint.py" --linelength=160 --counting=detailed --quiet --filter=" - -build/header_guard, - -build/include, - -build/include_order, - -build/include_subdir, - -build/include_what_you_use, - -build/namespaces, - -build/c++11, - -whitespace/indent, - -whitespace/comments, - -whitespace/ending_newline, - -runtime/references, - -runtime/int, - -runtime/explicit, - -readability/todo, - -readability/fn_size - " $(find "${SOURCE_DIR}" -name '*.h' -or -name '*.cc' -or -name '*.c' -or -name '*.cpp' -or -name '*.hpp' | - grep -v 'inference-engine/bin\|inference-engine/build\|inference-engine/report\|inference-engine/scripts\|inference-engine/temp\|inference-engine/tests_deprecated/\|gtest\|inference-engine/ie_bridges\|pugixml\|inference-engine/tools/vpu_perfcheck\|thirdparty/gflags\|thirdparty/ade\|thirdparty/fluid\|thirdparty/mkl-dnn\|thirdparty/movidius\|thirdparty/ocv\|thirdparty/plugixml\|thirdparty/std_lib\|thirdparty/clDNN/common\|thirdparty/clDNN/tutorial\|thirdparty/clDNN/utils' | - grep 'include\|src\|inference-engine/samples\|thirdparty/clDNN/kernel_selector\|thirdparty/clDNN/api\|thirdparty/clDNN/api_extension\|inference-engine/tests_' ) 2>&1 | - sed 's/"/\"/g' >&1| sed 's/&1| sed 's/>/\>/g' >&1| sed "s/'/\'/g" >&1| - sed 's/\&/\&/g' >&1| python "${CURRENT_DIR}/cpplint_to_cppcheckxml.py" &> "${CPPLINT_REPORT_DIR}/cpplint-cppcheck-result.xml" - - # Generate html from it - "${CURRENT_DIR}/cppcheck-htmlreport.py" --file="${CPPLINT_REPORT_DIR}/cpplint-cppcheck-result.xml" --report-dir="${CPPLINT_REPORT_DIR}" --source-dir="${SOURCE_DIR}" --title="${PROJECT_NAME}" - - # Change Cppcheck things to cpplint - sed -i.bak 's/Cppcheck/cpplint/g' "${CPPLINT_REPORT_DIR}/index.html" - sed -i.bak 's/a\ tool\ for\ static\ C\/C++\ code\ analysis/an\ open\ source\ lint\-like\ tool\ from\ Google/g' "${CPPLINT_REPORT_DIR}/index.html" - sed -i.bak 's/http:\/\/cppcheck.sourceforge.net/http:\/\/google\-styleguide.googlecode.com\/svn\/trunk\/cpplint\/cpplint.py/g' "${CPPLINT_REPORT_DIR}/index.html" - sed -i.bak 's/IRC: irc:\/\/irc.freenode.net\/cppcheck<\/a>/\ /g' "${CPPLINT_REPORT_DIR}/index.html" - - echo "-> CppLint finished..." -} - -function run_cpp_check() { - echo "-> Cppcheck started..." - CPPCHECK_REPORT_DIR="${REPORT_DIR}/cppcheck" - if [ -d "${CPPCHECK_REPORT_DIR}" ]; then - rm -Rf "${CPPCHECK_REPORT_DIR}" - fi - - mkdir -p "${CPPCHECK_REPORT_DIR}" - - # Generate cppcheck xml - cppcheck -v --enable=all --suppress=missingIncludeSystem --std=c++11 "${SOURCE_DIR}" -i"${SOURCE_DIR}/thirdparty" -i"${SOURCE_DIR}/tests/libs" -i"${SOURCE_DIR}/temp" -i"${SOURCE_DIR}/build" \ - -i"${SOURCE_DIR}/bin" -i"${SOURCE_DIR}/report" -I"${SOURCE_DIR}/include" -I"${SOURCE_DIR}/src" -I"${SOURCE_DIR}/thirdparty/pugixml/src" -I"${SOURCE_DIR}/thirdparty/gflags/src" -I"${SOURCE_DIR}/samples/scoring_agent/HTTPClient" -I"${SOURCE_DIR}/src/inference_engine" --xml-version=2 2> "${CPPCHECK_REPORT_DIR}/cppcheck-only-result.xml" - - # Generate html from it - python "${CURRENT_DIR}/cppcheck-htmlreport.py" \ - --file="${CPPCHECK_REPORT_DIR}/cppcheck-only-result.xml" \ - --report-dir="${CPPCHECK_REPORT_DIR}" \ - --source-dir="${SOURCE_DIR}" \ - --title="${PROJECT_NAME}" - echo "-> Cppcheck finished..." -} - -if [ ! -d "${REPORT_DIR}" ]; then - mkdir -p "${REPORT_DIR}" -fi - -run_cpplint - -out_cpp_lint=`cat "${CPPLINT_REPORT_DIR}/cpplint-cppcheck-result.xml"` -if [[ "${out_cpp_lint}" == *"error"* ]]; then - exit 1 -fi -#run_cpp_check diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp index 39a801254d98dc..bcfa794c704709 100644 --- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp @@ -326,11 +326,10 @@ auto check_inputs = [](InferenceEngine::InputsDataMap _networkInputs) { } }; -ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, +ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std::map &config) { // verification of supported input - InferenceEngine::InputsDataMap _networkInputs; - network.getInputsInfo(_networkInputs); + InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); check_inputs(_networkInputs); CLDNNPlugin::Config conf = _impl->m_config; @@ -373,14 +372,14 @@ ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEn context = m_defaultContext; - return std::make_shared(*CloneAndTransformNetwork(network, conf), context, conf); + InferenceEngine::CNNNetwork transformedNetwork(CloneAndTransformNetwork(network, conf)); + return std::make_shared(transformedNetwork, context, conf); } -ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, +ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, RemoteContext::Ptr context, const std::map &config) { - InferenceEngine::InputsDataMap _networkInputs; - network.getInputsInfo(_networkInputs); + InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); check_inputs(_networkInputs); auto casted = std::dynamic_pointer_cast(context); @@ -397,7 +396,8 @@ ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEn conf.max_dynamic_batch = static_cast(network.getBatchSize()); } - return std::make_shared(*CloneAndTransformNetwork(network, conf), casted, conf); + InferenceEngine::CNNNetwork transformedNetwork(CloneAndTransformNetwork(network, conf)); + return std::make_shared(transformedNetwork, casted, conf); } RemoteContext::Ptr clDNNEngine::CreateContext(const ParamMap& params) { @@ -430,7 +430,7 @@ void clDNNEngine::SetConfig(const std::map &config) { _impl->m_config.UpdateFromMap(config); } -QueryNetworkResult clDNNEngine::QueryNetwork(const ICNNNetwork& network, +QueryNetworkResult clDNNEngine::QueryNetwork(const CNNNetwork& network, const std::map& config) const { QueryNetworkResult res; GetDeviceInfo(config); // Verify device id diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.h b/inference-engine/src/cldnn_engine/cldnn_engine.h index b54698269201ff..31a502a3b031f9 100644 --- a/inference-engine/src/cldnn_engine/cldnn_engine.h +++ b/inference-engine/src/cldnn_engine/cldnn_engine.h @@ -32,17 +32,17 @@ class clDNNEngine : public InferenceEngine::InferencePluginInternal, public: clDNNEngine(); - InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, + InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std::map &config) override; - InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, + InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, InferenceEngine::RemoteContext::Ptr context, const std::map &config) override; void SetConfig(const std::map &config) override; InferenceEngine::Parameter GetConfig(const std::string& name, const std::map& options) const override; InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& options) const override; - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network, + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const override; InferenceEngine::RemoteContext::Ptr CreateContext(const InferenceEngine::ParamMap& params) override; diff --git a/inference-engine/src/cldnn_engine/cldnn_executable_network.cpp b/inference-engine/src/cldnn_engine/cldnn_executable_network.cpp index ca1724ba25051d..03ea0b73bec9b3 100644 --- a/inference-engine/src/cldnn_engine/cldnn_executable_network.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_executable_network.cpp @@ -34,7 +34,7 @@ using namespace InferenceEngine::details; namespace CLDNNPlugin { -CLDNNExecNetwork::CLDNNExecNetwork(InferenceEngine::ICNNNetwork &network, RemoteContext::Ptr context, Config config) : +CLDNNExecNetwork::CLDNNExecNetwork(InferenceEngine::CNNNetwork &network, RemoteContext::Ptr context, Config config) : InferenceEngine::ExecutableNetworkThreadSafeDefault{[&]()->InferenceEngine::ITaskExecutor::Ptr { if (config.throughput_streams > 1) { return std::make_shared( diff --git a/inference-engine/src/cldnn_engine/cldnn_executable_network.h b/inference-engine/src/cldnn_engine/cldnn_executable_network.h index 6c7497ba928351..9ab84fd52e41c7 100644 --- a/inference-engine/src/cldnn_engine/cldnn_executable_network.h +++ b/inference-engine/src/cldnn_engine/cldnn_executable_network.h @@ -24,7 +24,7 @@ class CLDNNExecNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefa public: typedef std::shared_ptr Ptr; - explicit CLDNNExecNetwork(InferenceEngine::ICNNNetwork &network, RemoteContext::Ptr context, Config config); + CLDNNExecNetwork(InferenceEngine::CNNNetwork &network, RemoteContext::Ptr context, Config config); InferenceEngine::CNNNetwork GetExecGraphInfo() override; InferenceEngine::IInferRequest::Ptr CreateInferRequest() override; diff --git a/inference-engine/src/cldnn_engine/cldnn_graph.cpp b/inference-engine/src/cldnn_engine/cldnn_graph.cpp index 702e988de7b018..0c487f23644a8a 100644 --- a/inference-engine/src/cldnn_engine/cldnn_graph.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_graph.cpp @@ -35,7 +35,7 @@ using namespace InferenceEngine::details; namespace CLDNNPlugin { -CLDNNGraph::CLDNNGraph(InferenceEngine::ICNNNetwork& network, gpu::ClContext::Ptr context, Config config, uint16_t stream_id) +CLDNNGraph::CLDNNGraph(InferenceEngine::CNNNetwork& network, gpu::ClContext::Ptr context, Config config, uint16_t stream_id) : m_context(context) , m_networkName(network.getName()) , m_config(config) diff --git a/inference-engine/src/cldnn_engine/cldnn_graph.h b/inference-engine/src/cldnn_engine/cldnn_graph.h index 86e65db4e80e94..c4dc89473752fa 100644 --- a/inference-engine/src/cldnn_engine/cldnn_graph.h +++ b/inference-engine/src/cldnn_engine/cldnn_graph.h @@ -39,7 +39,7 @@ class CLDNNGraph { public: typedef std::shared_ptr Ptr; - explicit CLDNNGraph(InferenceEngine::ICNNNetwork& network, gpu::ClContext::Ptr context, Config config, uint16_t stream_id = 0); + CLDNNGraph(InferenceEngine::CNNNetwork& network, gpu::ClContext::Ptr context, Config config, uint16_t stream_id = 0); explicit CLDNNGraph(std::shared_ptr graph, uint16_t stream_id = 0); InferenceEngine::CNNNetwork GetExecGraphInfo(); diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index dc06a6ba7d774a..43b89630349bbc 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -385,7 +385,7 @@ bool Program::CanProcessDynBatch(InferenceEngine::ICNNNetwork &network) const { return check_result; } -Program::Program(InferenceEngine::ICNNNetwork& network, std::shared_ptr engine, const Config& config) +Program::Program(InferenceEngine::CNNNetwork& network, std::shared_ptr engine, const Config& config) : m_config(config) , m_defaultFormat(cldnn::format::bfyx) , m_engine(engine) @@ -396,8 +396,7 @@ Program::Program(InferenceEngine::ICNNNetwork& network, std::shared_ptrsecond->getInputData()); if (!input0.empty() && (input0.begin()->second->params.count("lpt_back_to_fp16") != 0)) { @@ -406,6 +405,8 @@ Program::Program(InferenceEngine::ICNNNetwork& network, std::shared_ptroutData.empty() || layer->insData.empty()) continue; - auto canReduceOutputPrecision = [](const CNNLayerPtr& l) -> bool { + auto isOutputLayer = [](const CNNLayerPtr& l, const OutputsDataMap& networkOutputs) -> bool { + bool is_output = false; + + if (GetNextLayers(l).empty()) + is_output = true; + + // Condition above is not enough, as network output layer + // can still be used in other parts of the graph + // (e.g. 1st output form TopK primitive may become network output + // while 2nd output from the same primitive may still be used + // in the graph). + if (!is_output) { + for (auto layerOutput : l->outData) { + for (auto networkOutput : networkOutputs) { + if (layerOutput->getName() == networkOutput.second->getName()) { + is_output = true; + break; + } + } + + if (is_output) + break; + } + } + + return is_output; + }; + + auto canReduceOutputPrecision = [](const CNNLayerPtr& l, const bool isNetworkOutput) -> bool { + // Don't do the conversion for network outputs + if (isNetworkOutput) + return false; + auto type = LayerTypeFromStr(l->type); - // Don't do conversion for outputs auto next = GetNextLayers(l); - if (next.empty()) { - return false; - } if (type == LayerType::ScaleShift) { // ScaleShift is supposed to return Dequantized values, so in most of the cases we can convert it's output to FP16 @@ -463,9 +492,11 @@ Program::Program(InferenceEngine::ICNNNetwork& network, std::shared_ptr(layer, GetNextLayers(layer).empty()); - } else if (canReduceOutputPrecision(layer)) { + convertLayerPrecision(layer, is_network_output); + } else if (canReduceOutputPrecision(layer, is_network_output)) { for (auto &out_data : layer->outData) { if (out_data->getPrecision() == Precision::FP32) out_data->setPrecision(Precision::FP16); @@ -575,7 +606,7 @@ void Program::InitFormat(InferenceEngine::ICNNNetwork &network) { m_defaultFormat = FormatFromLayout(InferenceEngine::Layout::NCHW); } -std::shared_ptr Program::BuildProgram(InferenceEngine::ICNNNetwork &network) { +std::shared_ptr Program::BuildProgram(InferenceEngine::CNNNetwork &network) { cldnn::build_options options; if (!m_config.graph_dumps_dir.empty()) { options.set_option(cldnn::build_option::graph_dumps_dir(m_config.graph_dumps_dir)); @@ -586,11 +617,8 @@ std::shared_ptr Program::BuildProgram(InferenceEngine::ICNNNetwo cldnn::topology topology; // 1. create inputs - InferenceEngine::InputsDataMap networkInputs; - network.getInputsInfo(networkInputs); - - InferenceEngine::OutputsDataMap networkOutputs; - network.getOutputsInfo(networkOutputs); + InferenceEngine::InputsDataMap networkInputs = network.getInputsInfo(); + InferenceEngine::OutputsDataMap networkOutputs = network.getOutputsInfo(); p_currentOutputs = networkOutputs; if (networkInputs.empty()) { diff --git a/inference-engine/src/cldnn_engine/cldnn_program.h b/inference-engine/src/cldnn_engine/cldnn_program.h index 8276944404c082..2d3439dcde1766 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.h +++ b/inference-engine/src/cldnn_engine/cldnn_program.h @@ -85,7 +85,7 @@ struct PerfCounter { class Program { public: - Program(InferenceEngine::ICNNNetwork &network, std::shared_ptr engine, const Config& config); + Program(InferenceEngine::CNNNetwork &network, std::shared_ptr engine, const Config& config); std::shared_ptr getCompiledProgram(int program_id = 0); std::map primitiveIDs; @@ -241,7 +241,7 @@ class Program { std::shared_ptr m_engine; Config m_config; - std::shared_ptr BuildProgram(InferenceEngine::ICNNNetwork &network); + std::shared_ptr BuildProgram(InferenceEngine::CNNNetwork &network); void InitProfileInfo(const std::string& layerName, const std::string& layerType, diff --git a/inference-engine/src/gna_plugin/CMakeLists.txt b/inference-engine/src/gna_plugin/CMakeLists.txt index f5625436e6663a..c01325c06cd3ad 100644 --- a/inference-engine/src/gna_plugin/CMakeLists.txt +++ b/inference-engine/src/gna_plugin/CMakeLists.txt @@ -69,6 +69,7 @@ set_target_properties(${TARGET_NAME} ${TARGET_NAME}_test_static # install -install(FILES "${GNA_KERNEL_LIBRARY}" - DESTINATION ${IE_CPACK_IE_DIR}/external/gna/lib +file(GLOB_RECURSE source_list "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") +install(FILES ${source_list} + DESTINATION ${IE_CPACK_IE_DIR}/external/gna/lib COMPONENT gna) diff --git a/inference-engine/src/gna_plugin/frontend/layer_quantizer.hpp b/inference-engine/src/gna_plugin/frontend/layer_quantizer.hpp index f50aa9007f29ae..dba694b8055ac5 100644 --- a/inference-engine/src/gna_plugin/frontend/layer_quantizer.hpp +++ b/inference-engine/src/gna_plugin/frontend/layer_quantizer.hpp @@ -286,7 +286,7 @@ inline void quantizeWeightsBiases(const QuantDesc & quantDesc, THROW_IE_EXCEPTION << "Unsupported input dims size for " << wl->name << ", should be > 1, but " << wl->insData[0].lock().get()->getDims().size(); } uint32_t num_rows = isDiagonal ? 1 : wl->outData[0]->getDims()[oIdx]; - uint32_t num_columns = wl->insData[0].lock().get()->getDims()[iIdx]; + uint32_t num_columns = isDiagonal ? wl->_weights->size() : wl->insData[0].lock().get()->getDims()[iIdx]; if (LayerInfo(wl).isAffineFilter() || LayerInfo(wl).isConcatAlignFilter()) { // for affine filter layer insdata size not equal to actual coded in input layer @@ -476,7 +476,8 @@ class DataQuantizer : public DataQuantizerBas if (LayerInfo(*cnnLayer).isActivation() || LayerInfo(*cnnLayer).isCopy() || LayerInfo(*cnnLayer).isNonFunctional() || - LayerInfo(*cnnLayer).isPermute()) { + LayerInfo(*cnnLayer).isPermute() || + LayerInfo(*cnnLayer).isConst()) { // precision of activation layers is always equal input precision for (auto &&outData : cnnLayer->outData) { outData->setPrecision(Desc::mandatory().getInputPrecision()); @@ -485,8 +486,12 @@ class DataQuantizer : public DataQuantizerBas } cnnLayer->precision = Desc::mandatory().getInputPrecision(); - if (cnnLayer->type == "Const") { - if (cnnLayer->blobs["custom"]->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) { + if (LayerInfo(*cnnLayer).isConst()) { + auto initial_precision = cnnLayer->blobs["custom"]->getTensorDesc().getPrecision(); + // TODO I32 must be handled separately when it'll be supported + IE_ASSERT(initial_precision != InferenceEngine::Precision::I32); + + if (initial_precision == InferenceEngine::Precision::FP16) { cnnLayer->blobs["custom"] = make_fp32_blob(cnnLayer->blobs["custom"]); } auto const_scale_factor = InferenceEngine::getInjectedData(*cnnLayer)->_dst_quant.GetScale(); diff --git a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp index d53403cd7bcccc..2cacd280be2e94 100644 --- a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp +++ b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp @@ -423,7 +423,12 @@ class ScaleFactorPerLayer { auto quantData = InferenceEngine::getInjectedData(*concatLayer); std::vector inputLayers; for (auto input_idx = 0; input_idx != concatLayer->insData.size(); input_idx++) { - inputLayers.push_back(InferenceEngine::CNNNetPrevLayer(concatLayer, input_idx)); + auto prev_layer = InferenceEngine::CNNNetPrevLayer(concatLayer, input_idx); + // FlattenConcat inserts reshape between concat and its inputs, which results in taking wrong layers as inputs for scale factor calulation + if (prev_layer->type == "reshape" && prev_layer->insData.size() == 1 && prev_layer->outData.size() == 1) { + prev_layer = InferenceEngine::CNNNetPrevLayer(prev_layer, 0); + } + inputLayers.push_back(prev_layer); } // if all inputs have same quant value - trivial propagation diff --git a/inference-engine/src/gna_plugin/gna_executable_network.hpp b/inference-engine/src/gna_plugin/gna_executable_network.hpp index d240c7863eda58..1b10e92faff012 100644 --- a/inference-engine/src/gna_plugin/gna_executable_network.hpp +++ b/inference-engine/src/gna_plugin/gna_executable_network.hpp @@ -40,7 +40,7 @@ class GNAExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafe _networkOutputs = plg->GetOutputs(); } - GNAExecutableNetwork(InferenceEngine::ICNNNetwork &network, std::shared_ptr plg) + GNAExecutableNetwork(InferenceEngine::CNNNetwork &network, std::shared_ptr plg) : plg(plg) { plg->LoadNetwork(network); } @@ -49,7 +49,7 @@ class GNAExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafe : GNAExecutableNetwork(aotFileName, std::make_shared(config)) { } - GNAExecutableNetwork(InferenceEngine::ICNNNetwork &network, const std::map &config) + GNAExecutableNetwork(InferenceEngine::CNNNetwork &network, const std::map &config) : GNAExecutableNetwork(network, std::make_shared(config)) { } diff --git a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp index f0dfbb8470e083..705df14fdd8869 100644 --- a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp +++ b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp @@ -34,6 +34,7 @@ #include "layers/gna_fake_quantize_layer.hpp" #include "round_float_define.hpp" #include "gna_plugin_policy.hpp" +#include "gna_groups.hpp" using namespace InferenceEngine; using namespace std; @@ -522,23 +523,12 @@ void GNAGraphCompiler::PowerPrimitive(InferenceEngine::CNNLayerPtr layer) { auto input = layer->insData[0].lock(); auto outputs = *layer->outData.begin(); - uint32_t num_rows_in = InferenceEngine::details::product(begin(input->getDims()), end(input->getDims())); - uint32_t num_columns_in = 1; + auto reshaped_dims = Get2DReshapedData(input, 8)->getDims(); + uint32_t num_rows_in = reshaped_dims[1]; + uint32_t num_columns_in = reshaped_dims[0]; uint32_t num_rows_out = num_rows_in; uint32_t num_padding = ALIGN(num_rows_in, 8) - num_rows_in; - if (input->getDims().size() > 2 || input->getDims()[0] >= 8) { - for (size_t index_divide = 8; index_divide > 0; index_divide--) { - if (num_rows_in % index_divide == 0) { - num_rows_in /= index_divide; - num_columns_in = index_divide; - break; - } - } - num_rows_out = num_rows_in; - num_padding = ALIGN(num_rows_in, 8) - num_rows_in; - } - size_t num_data_bytes_out = InferenceEngine::details::product(begin(outputs->getDims()), end(outputs->getDims())) * outputs->getPrecision().size(); @@ -1116,8 +1106,9 @@ void GNAGraphCompiler::AffinePrimitive(InferenceEngine::CNNLayerPtr layer, bool auto outputs = *layer->outData.begin(); auto inputPrecision = quantized ? Precision(Precision::I16) : inputs->getPrecision(); - uint32_t num_rows_in = FROM_IR_DIM(inputs, 1); - uint32_t num_columns_in = FROM_IR_DIM(inputs, 2); + auto input_data = HasTo2DReshapeData(layer) ? Get2DReshapedData(inputs, 8) : inputs; + uint32_t num_rows_in = FROM_IR_DIM(input_data, 1); + uint32_t num_columns_in = FROM_IR_DIM(input_data, 2); uint32_t num_rows_out = isDiag ? num_rows_in : FROM_IR_DIM(outputs, 1); uint32_t num_padding = ALIGN(num_rows_in, 8) - num_rows_in; uint32_t num_padding_out = isDiag ? num_padding : 0; @@ -2253,4 +2244,4 @@ std::vector GNAGraphCompiler::getFromIRDimsOrderNCHW(InferenceEngin break; } return order; -} +} \ No newline at end of file diff --git a/inference-engine/src/gna_plugin/gna_graph_tools.hpp b/inference-engine/src/gna_plugin/gna_graph_tools.hpp index 8761dcd344efe9..8d562ee0475874 100644 --- a/inference-engine/src/gna_plugin/gna_graph_tools.hpp +++ b/inference-engine/src/gna_plugin/gna_graph_tools.hpp @@ -711,5 +711,4 @@ inline void CNNNetworkRemoveLayer(CNNLayerPtr layer, bool checkDims = true) { // removing layer->osp, and layer->isp connection not necessary - layer will delete it by itself } - } // namespace InferenceEngine diff --git a/inference-engine/src/gna_plugin/gna_groups.hpp b/inference-engine/src/gna_plugin/gna_groups.hpp new file mode 100644 index 00000000000000..2635dc1b95cb8f --- /dev/null +++ b/inference-engine/src/gna_plugin/gna_groups.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "gna_plugin_log.hpp" +#include "layers/gna_layer_info.hpp" + +namespace GNAPluginNS { +/** + * @brief returns a pointer to 2D reshaped data to satisfy maximum size of zero dimension + * @param input a pointer to data to be reshaped + * @param maxZeroDimSize the maximum size of zero dimension + */ +inline InferenceEngine::DataPtr Get2DReshapedData(InferenceEngine::DataPtr input, size_t maxZeroDimSize) { + auto dims = input->getDims(); + uint32_t numRowsIn = InferenceEngine::details::product(begin(dims), end(dims)); + uint32_t numColumnsIn = 1; + // Rows number should be 8-elements aligned + if (numRowsIn % 8 == 0) { + if (dims.size() >= 2 || dims[0] >= maxZeroDimSize) { + size_t indexDivide = maxZeroDimSize; + while (indexDivide > 1) { + if ((numRowsIn / 8) % indexDivide == 0) break; + --indexDivide; + } + numRowsIn /= indexDivide; + numColumnsIn = indexDivide; + } + } + + InferenceEngine::SizeVector newDims(dims.size(), 1); + newDims[0] = numColumnsIn; + newDims[1] = numRowsIn; + return std::make_shared(input->getName(), + InferenceEngine::TensorDesc(input->getPrecision(), newDims, input->getLayout())); +} + +/** + * @brief returns true if input data should be 2D reshaped for the layer + * @param layer + */ +inline bool HasTo2DReshapeData(InferenceEngine::CNNLayerPtr layer) { + if (GNAPluginNS::LayerInfo(layer).isPower()) + return true; + + if (!GNAPluginNS::LayerInfo(layer).isScaleShift()) + return false; + + // Don't reshape user-defined ScaleShift layers + if (layer->name.rfind("SyntheticScaleShift", 0) == std::string::npos) + return false; + + // Don't reshape diagonallayers with bias connection + return !GNAPluginNS::LayerInfo(getCreatorLayer(layer->insData.front().lock()).lock()).has32BOutput(); +} +} // namespace GNAPluginNS \ No newline at end of file diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index c8a704f48d5aae..b8f8be8d7935b3 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -433,7 +433,7 @@ void GNAPlugin::UpdateInputScaleFromNetwork(InferenceEngine::ICNNNetwork & netwo } } -void GNAPlugin::LoadNetwork(ICNNNetwork & _network) { +void GNAPlugin::LoadNetwork(CNNNetwork & _network) { std::shared_ptr convertedNetwork; if (_network.getFunction()) { std::shared_ptr clonedNetwork = cloneNetwork(_network); @@ -460,7 +460,7 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) { manager.run_passes(graph); convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(graph, *clonedNetwork); } - InferenceEngine::ICNNNetwork &network = convertedNetwork ? *convertedNetwork : _network; + InferenceEngine::CNNNetwork network = convertedNetwork ? InferenceEngine::CNNNetwork{convertedNetwork} : _network; NetPass::ConvertPrecision(network, Precision::I64, Precision::I32); NetPass::ConvertPrecision(network, Precision::U64, Precision::I32); @@ -498,7 +498,7 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) { passes->registerPass(); passes->registerPass(); - passes->registerPass(); + passes->registerPass(); passes->registerPass(); passes->registerPass(); if (policy.PermutePolicy != Policy::Permute::DISABLED) { @@ -1458,7 +1458,7 @@ void GNAPlugin::UpdateFieldsFromConfig() { *gnaFlags = config.gnaFlags; } -InferenceEngine::QueryNetworkResult GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network, +InferenceEngine::QueryNetworkResult GNAPlugin::QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const { InferenceEngine::QueryNetworkResult res; @@ -1467,9 +1467,7 @@ InferenceEngine::QueryNetworkResult GNAPlugin::QueryNetwork(const InferenceEngin } std::unordered_set allLayers; - InferenceEngine::InputsDataMap inputs; - - network.getInputsInfo(inputs); + InferenceEngine::InputsDataMap inputs = network.getInputsInfo(); std::vector sortedLayers = CNNNetSortTopologically(network); if (inputs.empty()) { diff --git a/inference-engine/src/gna_plugin/gna_plugin.hpp b/inference-engine/src/gna_plugin/gna_plugin.hpp index d90950fb8ca072..83f5806fd020c4 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.hpp +++ b/inference-engine/src/gna_plugin/gna_plugin.hpp @@ -13,7 +13,7 @@ #include #include #include -#include "cpp_interfaces/impl/ie_memory_state_internal.hpp" +#include "cpp_interfaces/impl/ie_variable_state_internal.hpp" #include "descriptions/gna_flags.hpp" #include "descriptions/gna_input_desc.hpp" #include "descriptions/gna_output_desc.hpp" @@ -99,23 +99,23 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin { std::string GetName() const noexcept override; void SetName(const std::string & pluginName) noexcept override; - void LoadNetwork(InferenceEngine::ICNNNetwork &network); + void LoadNetwork(InferenceEngine::CNNNetwork &network); bool Infer(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result); void GetPerformanceCounts(std::map &perfMap); void AddExtension(InferenceEngine::IExtensionPtr extension) override; void SetConfig(const std::map &config) override; - InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::ICNNNetwork &network, + InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::CNNNetwork &network, const std::map &config_map) override { THROW_GNA_EXCEPTION << "Not implemented"; } - InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::ICNNNetwork &network, + InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::CNNNetwork &network, const std::map &config_map, InferenceEngine::RemoteContext::Ptr context) override { THROW_GNA_EXCEPTION << "Not implemented"; } bool Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob &result); void SetCore(InferenceEngine::ICore*) noexcept override {} InferenceEngine::ICore* GetCore() const noexcept override {return nullptr;} void Reset(); - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork &network, + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork &network, const std::map& config) const override; uint32_t QueueInference(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result); bool Wait(uint32_t idx); diff --git a/inference-engine/src/gna_plugin/gna_plugin_internal.hpp b/inference-engine/src/gna_plugin/gna_plugin_internal.hpp index 18b70fd05c4a7f..815934b12c0ef6 100644 --- a/inference-engine/src/gna_plugin/gna_plugin_internal.hpp +++ b/inference-engine/src/gna_plugin/gna_plugin_internal.hpp @@ -30,13 +30,14 @@ class GNAPluginInternal : public InferenceEngine::InferencePluginInternal { public: InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl( - const InferenceEngine::ICNNNetwork &network, + const InferenceEngine::CNNNetwork &network, const std::map &config) override { Config updated_config(defaultConfig); updated_config.UpdateFromMap(config); auto plg = std::make_shared(updated_config.key_config_map); plgPtr = plg; - return std::make_shared(*cloneNetwork(network), plg); + InferenceEngine::CNNNetwork clonedNetwork(cloneNetwork(network)); + return std::make_shared(clonedNetwork, plg); } void SetConfig(const std::map &config) override { @@ -69,7 +70,7 @@ class GNAPluginInternal : public InferenceEngine::InferencePluginInternal { return GetCurrentPlugin()->GetName(); } - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network, + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const override { auto plg = GetCurrentPlugin(); try { diff --git a/inference-engine/src/gna_plugin/gna_plugin_policy.hpp b/inference-engine/src/gna_plugin/gna_plugin_policy.hpp index 6880b9ec57d879..bf6b5aa9b9dc48 100644 --- a/inference-engine/src/gna_plugin/gna_plugin_policy.hpp +++ b/inference-engine/src/gna_plugin/gna_plugin_policy.hpp @@ -34,10 +34,10 @@ class Policy { AUTO_PERMUTE } PermutePolicy = Permute::DISABLED; - enum class Concat4Dto2DConversion { + enum class FlattenTrivialConcatConversion { DISABLED, ENABLED - } ConcatConversionPolicy = Concat4Dto2DConversion::ENABLED; + } ConcatConversionPolicy = FlattenTrivialConcatConversion::ENABLED; enum class ConcatAlignment { DISABLED, diff --git a/inference-engine/src/gna_plugin/memory/gna_memory_state.hpp b/inference-engine/src/gna_plugin/memory/gna_memory_state.hpp index 2a7c83d6daea9b..2fc0b30c3f6e58 100644 --- a/inference-engine/src/gna_plugin/memory/gna_memory_state.hpp +++ b/inference-engine/src/gna_plugin/memory/gna_memory_state.hpp @@ -6,7 +6,7 @@ #include #include -#include +#include #include "gna_plugin.hpp" namespace GNAPluginNS { diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp index ff1ac35a3cc062..51707a60b84296 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp @@ -37,7 +37,7 @@ #include "layers/gna_layer_info.hpp" #include "gna_upstream_iterator.hpp" #include "frontend/quantization.h" - +#include "gna_groups.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; @@ -77,8 +77,9 @@ static void insertDiagonalLayerBetween(InferenceEngine::CNNLayerPtr prevLayer, IE_ASSERT(diagLayer != nullptr); // TODO: diagonal size - auto dimsIndex = nextLayer->outData[0]->getTensorDesc().getDims().size() - 1; - std::vector weightsValues(nextLayer->outData[0]->getTensorDesc().getDims()[dimsIndex], fillValue); + size_t weightsSize = LayerInfo(prevLayer).has32BOutput() ? weightsSize = nextLayer->outData[0]->getDims().back() : + Get2DReshapedData(nextLayer->outData[0], 8)->getDims()[1]; + std::vector weightsValues(weightsSize, fillValue); IE_ASSERT(diagLayer != nullptr); diagLayer->_weights = make_shared_blob( TensorDesc( @@ -91,7 +92,6 @@ static void insertDiagonalLayerBetween(InferenceEngine::CNNLayerPtr prevLayer, auto diagonalWithQuant = quantized ? InferenceEngine::injectData(diagLayer) : diagLayer; - getCreatorLayer(dataPtr) = diagonalWithQuant; diagonalWithQuant->outData.push_back(dataPtr); // actual insertion @@ -259,6 +259,10 @@ void HandleMultipleActivationsForTheLayerPass::run() { LayerInfo info(inputTo.second); if (info.isActivation()) { + if (!activations.empty() && odata->getDims()[0] != 1) { + THROW_GNA_EXCEPTION << "Unsupported batch size " << odata->getDims()[0] + << " for diagonal layer insertion"; + } activations.insert(inputTo.second); } } @@ -877,18 +881,27 @@ void InsertCopyLayerPass::run() { } } -void Concat4Dto2DPass::run() { - // Find 4D concat layers that will have to use ConcatAlignFilters and can be substituted by 2D concat +void FlattenTrivialConcatPass::run() { + // change all trivial concatenations (concatenation where output buffer is a buffer made by appending input buffers) + // by reshaping its inputs to 1 x total_input_size and its output to 1 x total_cocat_size and chaning the axis to 1 // for example if 4D concat have unaligned inputs then ConcatAlignFilters need to be used if sizes before // axis are all ones then concat can be changed to 2D for example, lets say all unputs have same shape equal to: // 1, 1, 5, 3 then for axis 0, 1, 2 the change will be made and inputs will be reshaped to 1, 15, // but for shape 2, 1, 5, 3 only axis 0 is valid and inputs will reshape to 1, 30 auto quantized = InferenceEngine::getInjectedData(pLayers->front()); - - if (getPassManager()->getPolicy().ConcatConversionPolicy == Policy::Concat4Dto2DConversion::DISABLED) return; + if (getPassManager()->getPolicy().ConcatConversionPolicy == Policy::FlattenTrivialConcatConversion::DISABLED) return; if (getPassManager()->getPolicy().ConcatAlignmentPolicy == Policy::ConcatAlignment::DISABLED) return; if (getPassManager()->getPolicy().ConcatAlignmentPolicy == Policy::ConcatAlignment::DISABLED_FOR_FP32 && !quantized) return; + auto getLayerByIndex = [](int idx, ConcatLayer* concatLayer) { + auto input = concatLayer->insData[idx]; + auto lockedInput = input.lock(); + if (!lockedInput) { + THROW_GNA_EXCEPTION << "cannot get insdata : "<< idx << " for layer: " << concatLayer->name; + } + return lockedInput; + }; + for (auto & l : *pLayers) { LayerInfo info(l); auto concatLayer = info.as(); @@ -896,63 +909,58 @@ void Concat4Dto2DPass::run() { if (concatLayer->insData.size() < 1) continue; auto dims_size = concatLayer->insData[0].lock()->getDims().size(); - if (dims_size > 2) { - auto axis = concatLayer->_axis; - bool skip_layer = false; - for (int i = 0; i < axis; i++) { - if (concatLayer->insData[0].lock()->getDims()[i] != 1) skip_layer = true; - } - if (skip_layer) continue; - skip_layer = true; - std::vector total_sizes; - for (auto& input : concatLayer->insData) { - auto input_dims = input.lock()->getDims(); - total_sizes.push_back(std::accumulate(input_dims.begin(), input_dims.end(), size_t(1), std::multiplies())); - if (total_sizes.back() % 64 != 0) skip_layer = false; - } - if (skip_layer) continue; - - for (size_t input_idx = 0; input_idx != concatLayer->insData.size(); input_idx++) { - auto getLayerByIndex = [&concatLayer](int idx) { - auto input = concatLayer->insData[idx]; - auto lockedInput = input.lock(); - if (!lockedInput) { - THROW_GNA_EXCEPTION << "cannot get insdata : "<< idx << " for layer: " << concatLayer->name; - } - return lockedInput; - }; + if (dims_size < 2 || concatLayer->_axis == dims_size - 1) continue; - auto concatInput = getLayerByIndex(input_idx); + auto axis = concatLayer->_axis; + bool skip_layer = false; + for (int i = 0; i < axis; i++) { + if (concatLayer->insData[0].lock()->getDims()[i] != 1) skip_layer = true; + } + if (skip_layer) continue; + std::vector total_sizes; + for (auto& input : concatLayer->insData) { + auto input_dims = input.lock()->getDims(); + total_sizes.push_back(std::accumulate(input_dims.begin(), input_dims.end(), size_t(1), std::multiplies())); + } - auto tensor = InferenceEngine::TensorDesc(concatInput->getTensorDesc()); - tensor.reshape(SizeVector({1, total_sizes[input_idx]}), Layout::NC); - auto reshapeName = l->name + "_input_"+ std::to_string(input_idx) +"_reshape"; - auto reshape = CNNNetworkCreateReshape(tensor, reshapeName, quantized); + for (size_t input_idx = 0; input_idx != concatLayer->insData.size(); input_idx++) { + auto concatInput = getLayerByIndex(input_idx, concatLayer); - CNNNetworkInsertLayer(getCreatorLayer(concatInput).lock(), l, reshape); - gnalog() << "\tInserted " << reshapeName << " between " << getCreatorLayer(concatInput).lock()->name << " and " << l->name << std::endl; - } + auto tensor = InferenceEngine::TensorDesc(concatInput->getTensorDesc()); + tensor.reshape(SizeVector({1, total_sizes[input_idx]}), Layout::NC); + auto reshapeName = l->name + "_input_"+ std::to_string(input_idx) +"_reshape"; + auto reshape = CNNNetworkCreateReshape(tensor, reshapeName, quantized); + + CNNNetworkInsertLayer(getCreatorLayer(concatInput).lock(), l, reshape); + gnalog() << "\tInserted " << reshapeName << " between " << getCreatorLayer(concatInput).lock()->name << " and " << l->name << std::endl; + } - for (auto output_idx = 0; output_idx != concatLayer->outData.size(); output_idx++) { - auto output = concatLayer->outData[output_idx]; - auto output_tensor_copy = TensorDesc(output->getTensorDesc()); + for (auto output_idx = 0; output_idx != concatLayer->outData.size(); output_idx++) { + auto output = concatLayer->outData[output_idx]; + auto output_tensor_copy = TensorDesc(output->getTensorDesc()); - auto dims = output_tensor_copy.getDims(); - auto total_size = std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies()); + auto dims = output_tensor_copy.getDims(); + auto total_size = std::accumulate(dims.begin(), dims.end(), size_t(1), std::multiplies()); - auto new_tensor = output->getTensorDesc(); - new_tensor.reshape(SizeVector({1, total_size}), Layout::NC); + auto new_tensor = output->getTensorDesc(); + new_tensor.reshape(SizeVector({1, total_size}), Layout::NC); - auto new_output = CNNReplaceDataWithChangedTensorDescription(output, new_tensor); - gnalog() << "\tChanged " << output->getName() << " dims to 2D" << std::endl; + auto new_output = CNNReplaceDataWithChangedTensorDescription(output, new_tensor); + gnalog() << "\tChanged " << output->getName() << " dims to 2D" << std::endl; - auto reshapeName = l->name + "_output_"+ std::to_string(output_idx) +"_reshape"; + auto reshapeName = l->name + "_output_"+ std::to_string(output_idx) +"_reshape"; - auto reshape = CNNNetworkCreateReshape(output_tensor_copy, reshapeName, quantized); + auto reshape = CNNNetworkCreateReshape(output_tensor_copy, reshapeName, quantized); + if (getInputTo(new_output).empty()) { + reshape->insData.push_back(new_output); + getInputTo(new_output)[reshape->name] = reshape; + } else { CNNNetworkInsertLayer(l, nullptr, reshape, output_idx); - gnalog() << "\tInserted " << reshapeName << " after " << l->name << std::endl; } + gnalog() << "\tInserted " << reshapeName << " after " << l->name << std::endl; } + + concatLayer->_axis = 1; } } @@ -1404,28 +1412,30 @@ void SubstituteScaleShiftBroadCastPass::run() { } bool was_reshaped = reshaped_data.count(insData->getName()) != 0; + bool reshape_batch = HasTo2DReshapeData(l); InferenceEngine::SizeVector dataDims; if (was_reshaped) { dataDims = reshaped_data[insData->getName()]; } else { - dataDims = insData->getDims(); + dataDims = HasTo2DReshapeData(l) ? Get2DReshapedData(insData, 8)->getDims() : insData->getDims(); } if (dataDims.size() <= 2) { // NC or C cannot do broadcast continue; } + auto batchSize = dataDims[0]; auto nElements = product(begin(dataDims), end(dataDims)) / batchSize; auto weightsElements = scaleShift->_weights->size(); auto weightsBytes = scaleShift->_weights->byteSize(); - if (nElements == weightsElements) { + if (!reshape_batch && nElements == weightsElements) { continue; } // only 3d scaleshift supported where number of c is arbitrary - auto lastD = dataDims[dataDims.size() - 1]; + auto lastD = reshape_batch ? dataDims[1] : dataDims.back(); if (lastD != weightsElements) { THROW_GNA_EXCEPTION << "Unsupported layer: " << l->name << " should have last dim(" << lastD << ") equal to weights(" << weightsElements << ") length"; @@ -1668,11 +1678,17 @@ void FuseMultipleIdentitiesPass::run() { }; auto prevLayersReached = CNNNetGetPrevLayersSkip(l, isFunctional); - prevLayersReached.erase(std::remove_if(prevLayersReached.begin(), - prevLayersReached.end(), - [] (const std::pair & candidate) { - return LayerInfo(candidate.first).isLink(); - }), prevLayersReached.end()); + if (!prevLayersReached.empty()) { + prevLayersReached.erase(std::remove_if(prevLayersReached.begin(), + prevLayersReached.end(), + [] (const std::pair & candidate) { + return LayerInfo(candidate.first).isLink(); + }), prevLayersReached.end()); + if (prevLayersReached.empty()) { + gnalog() << ", connected to link output only" << std::endl; + continue; + } + } if (prevLayersReached.size() != 1) { std::stringstream layers; diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.hpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.hpp index ee82abeda4f25c..7f2da1ee0be7ff 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.hpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.hpp @@ -142,9 +142,9 @@ DECL_PASS(InsertCopyLayer); DECL_PASS(InsertSplitAligningFilter); /** -* @brief Pass that changes 4D concat to 2D concat in cases that would have to use ConcatAlignFilter +* @brief Pass that flattens trivial concatenations inputs and output and changes its axis to 1 */ -DECL_PASS(Concat4Dto2D); +DECL_PASS(FlattenTrivialConcat); /** * @brief concat-aligning filter layer insertion required in cases when concat inputs size are not 64-aligned diff --git a/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp b/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp index 1ea9df72728a16..4f6e44978f69c6 100644 --- a/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp +++ b/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp @@ -81,8 +81,8 @@ void FP::ApplyDiagonalTransform(intel_dnn_component_t *component) { } } for (uint32_t j = 0; j < n; j++) { - float *Bcol = B + j * ldb; - float *Ccol = C + j * ldc; + float *Bcol = B + j * component->num_rows_in; + float *Ccol = C + j * component->num_rows_out; cblas_ssbmv1(CblasRowMajor, CblasLower, m, 0, 1.0, A, 1, Bcol, 1, 1.0, Ccol, 1); } } diff --git a/inference-engine/src/hetero_plugin/CMakeLists.txt b/inference-engine/src/hetero_plugin/CMakeLists.txt index cd8d15a4ece347..41eb7688e6ab4f 100644 --- a/inference-engine/src/hetero_plugin/CMakeLists.txt +++ b/inference-engine/src/hetero_plugin/CMakeLists.txt @@ -16,8 +16,8 @@ ie_faster_build(${TARGET_NAME} UNITY ) -target_link_libraries(${TARGET_NAME} PRIVATE ade pugixml inference_engine - inference_engine_legacy ${NGRAPH_LIBRARIES} inference_engine_transformations) +target_link_libraries(${TARGET_NAME} PRIVATE pugixml inference_engine + ${NGRAPH_LIBRARIES} inference_engine_transformations) ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) diff --git a/inference-engine/src/hetero_plugin/hetero_ade_util.cpp b/inference-engine/src/hetero_plugin/hetero_ade_util.cpp deleted file mode 100644 index 75fbccbfb11c32..00000000000000 --- a/inference-engine/src/hetero_plugin/hetero_ade_util.cpp +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "hetero_ade_util.hpp" - -#include -#include - -#include -#include -#include - -#include -#include -#include - -namespace InferenceEngine { -namespace { -using VisitedLayersMap = std::unordered_map; -using TGraph = ade::TypedGraph; - -void translateVisitLayer(VisitedLayersMap& visited, - TGraph& gr, - const ade::NodeHandle& prevNode, - const CNNLayer::Ptr& layer) { - assert(nullptr != layer);; - assert(!ade::util::contains(visited, layer)); - auto node = gr.createNode(); - gr.metadata(node).set(CNNLayerMetadata{layer}); - if (nullptr != prevNode) { - gr.link(prevNode, node); - } - visited.insert({layer, node}); - for (auto&& data : layer->outData) { - for (auto&& layerIt : getInputTo(data)) { - auto nextLayer = layerIt.second; - auto it = visited.find(nextLayer); - if (visited.end() == it) { - translateVisitLayer(visited, gr, node, nextLayer); - } else { - gr.link(node, it->second); - } - } - } -} -} // namespace - -void translateNetworkToAde(ade::Graph& gr, ICNNNetwork& network) { - TGraph tgr(gr); - VisitedLayersMap visited; - for (auto& data : getRootDataObjects(network)) { - assert(nullptr != data); - for (auto& layerIt : getInputTo(data)) { - auto layer = layerIt.second; - assert(nullptr != layer); - if (!ade::util::contains(visited, layer)) { - translateVisitLayer(visited, tgr, nullptr, layer); - } - } - } -} - -const char* CNNLayerMetadata::name() { - return "CNNLayerMetadata"; -} - -} // namespace InferenceEngine diff --git a/inference-engine/src/hetero_plugin/hetero_ade_util.hpp b/inference-engine/src/hetero_plugin/hetero_ade_util.hpp deleted file mode 100644 index 7d10bc1bca3a8e..00000000000000 --- a/inference-engine/src/hetero_plugin/hetero_ade_util.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ade { -class Graph; -} // namespace ade - -namespace InferenceEngine { - -struct CNNLayerMetadata { - CNNLayerPtr layer; - - static const char* name(); -}; - -class ICNNNetwork; -void translateNetworkToAde(ade::Graph& gr, ICNNNetwork& network); -} // namespace InferenceEngine - diff --git a/inference-engine/src/hetero_plugin/hetero_executable_network.cpp b/inference-engine/src/hetero_plugin/hetero_executable_network.cpp index 19dcd54877f803..6ea6efd5cc4a69 100644 --- a/inference-engine/src/hetero_plugin/hetero_executable_network.cpp +++ b/inference-engine/src/hetero_plugin/hetero_executable_network.cpp @@ -5,8 +5,6 @@ #include "ie_metric_helpers.hpp" #include "hetero_executable_network.hpp" #include "hetero_async_infer_request.hpp" -#include -#include "hetero_graph_splitter.hpp" #include "hetero_itt.hpp" #include "xml_parse_utils.h" #include @@ -44,283 +42,19 @@ using namespace HeteroPlugin; using namespace InferenceEngine::PluginConfigParams; using namespace InferenceEngine::HeteroConfigParams; -namespace { - -void forward(const CNNLayerPtr& layer, std::deque& layers) { - for (const auto& out : layer->outData) { - for (const auto& out_link : getInputTo(out)) { - const auto& nextLayer = out_link.second; - if (nullptr != nextLayer) { - layers.emplace_back(nextLayer); - } - } - } -} - -template -void traverse(T& inputs, - std::function apply, - std::function& layers)> expand = forward) { - std::unordered_set visitedObjects; - std::deque layersToCheck; - - layersToCheck.insert(layersToCheck.end(), inputs.begin(), inputs.end()); - - while (!layersToCheck.empty()) { - auto& layer = layersToCheck.front(); - if (visitedObjects.insert(layer).second) { - apply(layer); - expand(layer, layersToCheck); - } - layersToCheck.pop_front(); - } -} - -void traverse(InferenceEngine::ICNNNetwork& network, - std::function apply, - std::function& layers)> expand = forward) { - std::vector layers; - - InferenceEngine::InputsDataMap inputs; - network.getInputsInfo(inputs); - for (const auto& input : inputs) { - const auto data = input.second->getInputData(); - for (const auto& to : getInputTo(data)) { - const auto nextLayer = to.second; - assert(nullptr != nextLayer); - layers.emplace_back(nextLayer); - } - } - - traverse(layers, apply, expand); -} - -std::vector getAffinities(InferenceEngine::ICNNNetwork &network) { - std::vector ret; - std::unordered_set affinities; - traverse(network, - [&](const InferenceEngine::CNNLayerPtr &layer) { - assert(nullptr != layer); - if (!contains(affinities, layer->affinity)) { - affinities.insert(layer->affinity); - ret.push_back(layer->affinity); - } - }); - return ret; -} - -void dumpGraph(InferenceEngine::ICNNNetwork &network, - const std::vector &subgraphs, - std::ostream &stream) { - static const std::array colors{{"#FFC405", - "#20F608", - "#F1F290", - "#C405FF", - "#BCFF05", - "#05FFC4", - "#FFC405", - "#5A5DF0", - "#FF2E05"}}; - auto split_color = [subgraphs](const CNNLayerPtr layer, - ordered_properties &printed_properties, - ordered_properties &node_properties) { - for (size_t i = 0; i < subgraphs.size(); i++) { - for (auto s : subgraphs[i]) { - if (s->name == layer->name) { - node_properties.emplace_back( - "fillcolor", - colors[std::min(i, colors.size() - 1)]); - printed_properties.insert(printed_properties.begin(), - std::pair("subgraph#", std::to_string(i))); - printed_properties.insert(printed_properties.begin(), - std::pair("device", layer->affinity)); - return; - } - } - } - }; - - saveGraphToDot(network, stream, split_color); -} - -} // namespace - -void HeteroExecutableNetwork::InitCNNImpl(const InferenceEngine::ICNNNetwork& network_) { - auto networkPtr = cloneNet(network_); - auto& network = *networkPtr; - - // going over all network, if all layers are not assigned to devices, apply the default fallback policy - details::CNNNetworkIterator i(&network); - bool allEmpty = true; - while (i != details::CNNNetworkIterator()) { - CNNLayer::Ptr layer = *i; - if (!layer->affinity.empty()) { - allEmpty = false; - break; - } - i++; - } - - auto itDumpDotFile = _config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)); - bool dumpDotFile = itDumpDotFile != _config.end() ? itDumpDotFile->second == YES : false; -#ifndef NDEBUG - dumpDotFile = true; -#endif - - if (allEmpty) { - auto it = _config.find("TARGET_FALLBACK"); - if (it != _config.end()) { - _heteroPlugin->SetAffinity(network, _config); - } else { - THROW_IE_EXCEPTION << "The 'TARGET_FALLBACK' option was not defined for heterogeneous plugin"; - } - } else { - if (dumpDotFile) { - std::unordered_set devicesSet; - details::CNNNetworkIterator i(&network); - while (i != details::CNNNetworkIterator()) { - CNNLayer::Ptr layer = *i; - if (!layer->affinity.empty()) { - devicesSet.insert(layer->affinity); - } - i++; - } - std::vector devices{std::begin(devicesSet), std::end(devicesSet)}; - std::stringstream stream(std::stringstream::out); - stream << "hetero_affinity_" << network.getName() << ".dot"; - std::ofstream file(stream.str().c_str()); - saveGraphToDot(network, file, HeteroLayerColorer{devices}); - } - } - - details::CNNNetworkIterator el(&network); - bool someEmptyAffinity = false; - CNNLayer::Ptr layerEmptyAffinity = nullptr; - while (el != details::CNNNetworkIterator()) { - CNNLayer::Ptr layer = *el; - if (!CaselessEq()(layer->type, "input") && - layer->affinity.empty()) { - someEmptyAffinity = true; - layerEmptyAffinity = layer; - break; - } - el++; - } - - if (allEmpty && someEmptyAffinity) { - THROW_IE_EXCEPTION << "Hetero plugin used default fallback policy, but some layers eg: \n(Name:" << - layerEmptyAffinity->name << ", Type: " << layerEmptyAffinity->type << - ") were not able to be assigned on any pointed device.\n" << - "It happened because these layers are not supported in plugins by default.\n" << - "You need to implement custom layers to support them."; - } else if (someEmptyAffinity) { - THROW_IE_EXCEPTION << "Network passed to LoadNetwork has affinity assigned, but some layers eg: \n(Name:" << - layerEmptyAffinity->name << ", Type: " << layerEmptyAffinity->type << - ") were not assigned to any device.\n" << - "It might happen if you assigned layers manually and missed some layers or\n" << - "if you used some automatic assigning mode which decided that these layers are not\n" << - "supported by any plugin"; - } - - InputsDataMap externalInputsData; - network.getInputsInfo(externalInputsData); - - OutputsDataMap externalOutputsData; - network.getOutputsInfo(externalOutputsData); - - auto subgraphs = splitGraph(network, getAffinities(network)); - sortSubgraphs(subgraphs); - - if (dumpDotFile) { - std::stringstream stream(std::stringstream::out); - stream << "hetero_subgraphs_" << network.getName() << ".dot"; - - std::ofstream file(stream.str().c_str()); - dumpGraph(network, subgraphs, file); - } - - std::vector descs; - std::vector tempLayers; - for (auto &&subgraph : subgraphs) { - auto affinity = (*subgraph.begin())->affinity; - tempLayers.assign(subgraph.begin(), subgraph.end()); - auto tempNetwork = cloneNet(tempLayers); - auto name = network.getName() + "_" + std::to_string(std::distance(subgraphs.data(), &subgraph)); - tempNetwork->setName(name); - // restoring some outputs from original net if they are not marked as output automatically - // this might happen if output was set manually for origin network and - // it doesn't go to next subgraph - for (auto il : tempLayers) { - if (externalOutputsData.find(il->name) != externalOutputsData.end()) { - tempNetwork->addOutput(il->name); - } - } - - // update of pre-processing info - InputsDataMap clonedInputs; - tempNetwork->getInputsInfo(clonedInputs); - for (auto &&it : externalInputsData) { - auto inp = clonedInputs.find(it.first); - if (inp != clonedInputs.end() && nullptr != inp->second) { - inp->second->setPrecision(it.second->getPrecision()); - inp->second->getPreProcess() = it.second->getPreProcess(); - } - } - - // go over all inputs/outputs and right now - // set precision for intermediate data (not for external) to FP32 - for (auto &&it : clonedInputs) { - if (externalInputsData.find(it.first) == externalInputsData.end()) { - it.second->setPrecision(Precision::FP32); - } - } - - OutputsDataMap tmpOutputs; - tempNetwork->getOutputsInfo(tmpOutputs); - for (auto &&o : tmpOutputs) { - if (externalOutputsData.find(o.first) == externalOutputsData.end()) { - o.second->setPrecision(Precision::FP32); - } - } - - NetworkDesc desc; - desc._device = affinity; - desc._clonedNetwork = CNNNetwork{tempNetwork}; - - descs.emplace_back(std::move(desc)); - } - - for (auto &&d : descs) { - IExecutableNetwork::Ptr ret; - - auto subnetworkInputs = d._clonedNetwork.getInputsInfo(); - bool isInputSubnetwork = (subnetworkInputs.end() != std::find_first_of( - subnetworkInputs.begin(), subnetworkInputs.end(), - externalInputsData.begin(), externalInputsData.end(), - [] (const InputsDataMap::value_type& lhs, const InputsDataMap::value_type& rhs) { - return lhs.first == rhs.first; - })); - - auto cfg = _config; - cfg[PluginConfigInternalParams::KEY_SUBNETWORK_WITH_NETWORK_INPUTS] = - isInputSubnetwork ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO); - - auto deviceName = d._device; - auto metaDevices = _heteroPlugin->GetDevicePlugins(deviceName, cfg); - assert(metaDevices.size() == 1); - auto loadConfig = metaDevices[deviceName]; - d._network = _heteroPlugin->GetCore()->LoadNetwork(d._clonedNetwork, deviceName, loadConfig); - } - - networks = std::move(descs); -} - template using NodeMap = std::unordered_map; -void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& network_) { - auto function = network_.getFunction(); +HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network, + const Engine::Configs& config, + Engine* plugin): + InferenceEngine::ExecutableNetworkThreadSafeDefault( + nullptr, std::make_shared()), + _heteroPlugin{plugin}, + _name{network.getName()}, + _config{config} { + auto function = network.getFunction(); + IE_ASSERT(function != nullptr); auto clonedFunction = ngraph::clone_function(*function); auto itDumpDotFile = _config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)); bool dumpDotFile = itDumpDotFile != _config.end() ? (itDumpDotFile->second == YES) : false; @@ -346,7 +80,7 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net if (queryNetworkResult.supportedLayersMap.empty()) { auto it = _config.find("TARGET_FALLBACK"); if (it != _config.end()) { - queryNetworkResult = _heteroPlugin->QueryNetwork(network_, _config); + queryNetworkResult = _heteroPlugin->QueryNetwork(network, _config); } else { THROW_IE_EXCEPTION << "The 'TARGET_FALLBACK' option was not defined for heterogeneous plugin"; } @@ -380,7 +114,6 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net std::unordered_set devices; NodeMap affinities; // Check that all nodes has user or plugin defined affinities - std::shared_ptr convertedNetwork; for (auto&& node : orderedOps) { auto itAffinity = queryNetworkResult.supportedLayersMap.find(node->get_friendly_name()); if (itAffinity != queryNetworkResult.supportedLayersMap.end()) { @@ -631,10 +364,8 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net std::move(std::begin(nextSubgraphs), std::end(nextSubgraphs), std::back_inserter(orderedSubgraphs)); } while (!allSubgraphs.empty()); - InputsDataMap externalInputsData; - network_.getInputsInfo(externalInputsData); - OutputsDataMap externalOutputsData; - network_.getOutputsInfo(externalOutputsData); + InputsDataMap externalInputsData = network.getInputsInfo(); + OutputsDataMap externalOutputsData = network.getOutputsInfo(); networks.resize(orderedSubgraphs.size()); std::vector> subFunctions(orderedSubgraphs.size()); std::vector isInputSubnetwork(orderedSubgraphs.size()); @@ -691,21 +422,6 @@ void HeteroExecutableNetwork::InitNgraph(const InferenceEngine::ICNNNetwork& net } } -HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::ICNNNetwork& network, - const Engine::Configs& config, - Engine* plugin): - InferenceEngine::ExecutableNetworkThreadSafeDefault( - nullptr, std::make_shared()), - _heteroPlugin{plugin}, - _name{network.getName()}, - _config{config} { - if (network.getFunction() == nullptr) { - InitCNNImpl(network); - } else { - InitNgraph(network); - } -} - HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel, const std::map& configs, Engine* heteroPlugin) : @@ -820,7 +536,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& descs.emplace_back(NetworkDesc{ deviceName, - loaded ? CNNNetwork{cloneNet(static_cast(cnnnetwork))} : CNNNetwork{}, + loaded ? cnnnetwork : CNNNetwork{}, executableNetwork, }); } @@ -844,38 +560,27 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) { } auto subnetworksNode = heteroNode.append_child("subnetworks"); - std::map, ::CNNNetwork> convertedNetworks; for (auto&& subnetwork : networks) { - auto subnet = subnetwork._clonedNetwork; - if (subnet.getFunction()) { - subnet = convertedNetworks[subnet.getFunction()] = - InferenceEngine::CNNNetwork( - std::make_shared(subnetwork._clonedNetwork)); - } + auto subnetFunction = subnetwork._clonedNetwork.getFunction(); + IE_ASSERT(subnetFunction != nullptr); auto subnetworkNode = subnetworksNode.append_child("subnetwork"); subnetworkNode.append_attribute("device").set_value(subnetwork._device.c_str()); auto subnetworkInputsNode = subnetworkNode.append_child("inputs"); - auto inputInfo = subnet.getInputsInfo(); - for (auto&& input : inputInfo) { + for (auto&& parameter : subnetFunction->get_parameters()) { auto inputNode = subnetworkInputsNode.append_child("input"); - inputNode.append_attribute("name").set_value(input.first.c_str()); - inputNode.append_attribute("precision").set_value(input.second->getPrecision().name()); + inputNode.append_attribute("name").set_value(parameter->get_friendly_name().c_str()); + inputNode.append_attribute("precision").set_value(parameter->get_output_element_type(0).get_type_name().c_str()); } auto subnetworkOutputsNode = subnetworkNode.append_child("outputs"); - auto outputInfo = subnet.getOutputsInfo(); - for (auto&& output : outputInfo) { + for (auto&& result : subnetFunction->get_results()) { auto outputNode = subnetworkOutputsNode.append_child("output"); - auto creator = getCreatorLayer(output.second).lock(); - outputNode.append_attribute("creatorName").set_value(creator->name.c_str()); - outputNode.append_attribute("name").set_value(output.first.c_str()); - outputNode.append_attribute("precision").set_value(output.second->getPrecision().name()); - auto& outDatas = creator->outData; - auto itData = std::find_if(std::begin(outDatas), std::end(outDatas), [&] (const DataPtr& data) { - return output.first == data->getName(); - }); - IE_ASSERT(outDatas.end() != itData); - std::uint64_t index = std::distance(std::begin(outDatas), itData); - outputNode.append_attribute("index").set_value(std::to_string(index).c_str()); + auto sourceOutput = result->input_value(0); + outputNode.append_attribute("creatorName").set_value(sourceOutput.get_node()->get_friendly_name().c_str()); + outputNode.append_attribute("name").set_value( + (sourceOutput.get_node()->get_friendly_name() + + ((sourceOutput.get_node()->get_output_size() == 0) ? "" : std::to_string(sourceOutput.get_index()))).c_str()); + outputNode.append_attribute("precision").set_value(result->get_input_element_type(0).get_type_name().c_str()); + outputNode.append_attribute("index").set_value(std::to_string(sourceOutput.get_index()).c_str()); } } @@ -901,9 +606,6 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) { #else pugi::xml_document doc; auto subnet = subnetwork._clonedNetwork; - if (subnet.getFunction()) { - subnet = convertedNetworks[subnet.getFunction()]; - } auto dataSize = static_cast(InferenceEngine::Serialization::FillXmlDoc(subnet, doc)); doc.save(heteroModel, nullptr, pugi::format_raw); heteroModel << std::endl; diff --git a/inference-engine/src/hetero_plugin/hetero_executable_network.hpp b/inference-engine/src/hetero_plugin/hetero_executable_network.hpp index d3024ac48424f2..7e5ce5eda2af09 100644 --- a/inference-engine/src/hetero_plugin/hetero_executable_network.hpp +++ b/inference-engine/src/hetero_plugin/hetero_executable_network.hpp @@ -20,7 +20,6 @@ #include "hetero_infer_request.hpp" #include "ie_icore.hpp" -#include #include "hetero_async_infer_request.hpp" namespace HeteroPlugin { @@ -38,7 +37,7 @@ class HeteroExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadS /** * @brief constructor */ - HeteroExecutableNetwork(const InferenceEngine::ICNNNetwork& network, + HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config, Engine* plugin); /** @@ -62,9 +61,8 @@ class HeteroExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadS void ExportImpl(std::ostream& modelFile) override; private: - void InitCNNImpl(const InferenceEngine::ICNNNetwork& network); - - void InitNgraph(const InferenceEngine::ICNNNetwork& network); + void InitCNNImpl(const InferenceEngine::CNNNetwork& network); + void InitNgraph(const InferenceEngine::CNNNetwork& network); struct NetworkDesc { std::string _device; diff --git a/inference-engine/src/hetero_plugin/hetero_graph_splitter.cpp b/inference-engine/src/hetero_plugin/hetero_graph_splitter.cpp deleted file mode 100644 index cce3d30fb41078..00000000000000 --- a/inference-engine/src/hetero_plugin/hetero_graph_splitter.cpp +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "hetero_graph_splitter.hpp" -#include "hetero_ade_util.hpp" - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -namespace InferenceEngine { - -namespace { -class ISplitChecker { -public: - struct GraphSelectionResult final { - static const constexpr std::size_t NoGraph - = static_cast(-1); - - std::size_t selectedGraph = NoGraph; - bool continueSelect = false; - }; - - virtual ~ISplitChecker() = default; - virtual GraphSelectionResult selectSubgraph( - const std::vector& subgraphs) = 0; -}; - -class DefaultSplitChecker : public ISplitChecker { -public: - // ISplitChecker interface - GraphSelectionResult selectSubgraph(const std::vector& subgraphs) override; -}; -} // namespace - -std::vector splitGraph(ICNNNetwork& network, - const std::vector& plugins) { - assert(!plugins.empty()); - ade::Graph gr; - ade::TypedGraph tgr(gr); - - std::vector tempSubgraphs; - LayersSet tempSet1; - LayersSet tempSet2; - - translateNetworkToAde(gr, network); - std::size_t currentChecker = 0; - - DefaultSplitChecker checker; - - auto getChecker = [&]() { - assert(currentChecker < plugins.size()); - return &checker; - }; - - auto getAffinity = [&]()->const std::string& { - assert(currentChecker < plugins.size()); - return plugins[currentChecker]; - }; - - auto nodes = gr.nodes(); - ade::subgraphs::NodesSet availableNodes(nodes.begin(), nodes.end()); - std::vector finalSubgraphs; - ade::SubgraphSelfReferenceChecker cycleChecker(nodes); - while (!availableNodes.empty()) { - auto subgraphs = ade::selectSubgraphs( - ade::util::filter(ade::util::toRange(availableNodes), - [&](const ade::NodeHandle& node) { - assert(nullptr != node); - auto layer = tgr.metadata(node).get().layer; - assert(nullptr != layer); - return layer->affinity == getAffinity(); - }), - [&]( - const ade::EdgeHandle& edge, - ade::SubgraphMergeDirection dir) { - assert(nullptr != edge); - auto dstNode = ade::getDstMergeNode(edge, dir); - assert(nullptr != dstNode); - if (!ade::util::contains(availableNodes, dstNode)) { - return false; - } - auto srcNode = ade::getSrcMergeNode(edge, dir); - assert(nullptr != srcNode); - auto srcLayer = tgr.metadata(srcNode).get().layer; - auto dstLayer = tgr.metadata(dstNode).get().layer; - assert(nullptr != srcLayer); - assert(nullptr != dstLayer); - return srcLayer->affinity == dstLayer->affinity; - }, - [&]( - const ade::subgraphs::NodesSet& acceptedNodes, - const ade::subgraphs::NodesSet& rejectedNodes) { - if (cycleChecker(acceptedNodes, rejectedNodes)) { - return false; - } - return true; - }); - - if (!subgraphs.empty()) { - if (plugins.size() == currentChecker) { - THROW_IE_EXCEPTION << "Some nodes weren't assigned to plugin"; - } - - tempSubgraphs.clear(); - for (auto&& subgraph : subgraphs) { - assert(!subgraph.empty()); - tempSet1.clear(); - for (auto&& node : subgraph) { - assert(nullptr != node); - auto layer = tgr.metadata(node).get().layer; - assert(nullptr != layer); - tempSet1.insert(layer); - } - tempSubgraphs.emplace_back(std::move(tempSet1)); - } - auto result = getChecker()->selectSubgraph(tempSubgraphs); - const auto selected = result.selectedGraph; - if (ISplitChecker::GraphSelectionResult::NoGraph != - selected) { - assert(selected < subgraphs.size()); - finalSubgraphs.emplace_back(std::move(tempSubgraphs[selected])); - - for (auto&& node : subgraphs[selected]) { - availableNodes.erase(node); - } - - if (result.continueSelect) { - continue; - } - } - } - ++currentChecker; - } - - return finalSubgraphs; -} - -ISplitChecker::GraphSelectionResult DefaultSplitChecker::selectSubgraph( - const std::vector& subgraphs) { - assert(!subgraphs.empty()); - std::size_t index = 0; - auto maxSize = subgraphs[0].size(); - for (auto i : ade::util::iota(std::size_t(1), subgraphs.size())) { - auto size = subgraphs[i].size(); - if (size > maxSize) { - index = 1; - maxSize = size; - } - } - GraphSelectionResult ret; - ret.selectedGraph = index; - ret.continueSelect = true; - return ret; -} - -namespace { -struct SubgraphDesc { - std::size_t topoIndex = static_cast(-1); - std::unordered_set dependsOn; -}; - -void topoVisitSubgraph(std::vector& subgraphs, - SubgraphDesc& subgraph, - std::size_t& topoIndex) { - if (subgraph.topoIndex != static_cast(-1)) { - assert(subgraph.topoIndex < topoIndex); - return; - } - - for (auto&& dep : subgraph.dependsOn) { - topoVisitSubgraph(subgraphs, subgraphs[dep], topoIndex); - } - subgraph.topoIndex = topoIndex; - ++topoIndex; -} -} // namespace - -void sortSubgraphs(std::vector& subgraphs) { - std::vector descs(subgraphs.size()); - - for (auto i : ade::util::iota(subgraphs.size())) { - auto& subgraph = subgraphs[i]; - assert(!subgraph.empty()); - for (auto&& layer : subgraph) { - assert(nullptr != layer); - for (auto&& dataIt : layer->insData) { - auto data = dataIt.lock(); - assert(nullptr != data); - auto prevLayer = getCreatorLayer(data).lock(); - if (nullptr != prevLayer) { - for (auto j : ade::util::iota(subgraphs.size())) { - if (i != j) { - if (ade::util::contains(subgraphs[j], prevLayer)) { - descs[i].dependsOn.insert(j); - break; - } - } - } - } - } - } - } - - { - std::size_t topoIndex = 0; - for (auto&& desc : descs) { - topoVisitSubgraph(descs, desc, topoIndex); - } - assert(subgraphs.size() == topoIndex); - } - - std::vector ret(subgraphs.size()); - for (auto i : ade::util::iota(subgraphs.size())) { - assert(i < descs.size()); - auto& desc = descs[i]; - auto topoIndex = desc.topoIndex; - assert(topoIndex != static_cast(-1)); - assert(topoIndex < ret.size()); - assert(!subgraphs[i].empty()); - ret[topoIndex] = std::move(subgraphs[i]); - } - subgraphs = std::move(ret); -} - -} // namespace InferenceEngine diff --git a/inference-engine/src/hetero_plugin/hetero_graph_splitter.hpp b/inference-engine/src/hetero_plugin/hetero_graph_splitter.hpp deleted file mode 100644 index fc4d2bce0c6838..00000000000000 --- a/inference-engine/src/hetero_plugin/hetero_graph_splitter.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include -#include -#include -#include -#include - -namespace InferenceEngine { -class ICNNNetwork; - -using LayersSet = std::unordered_set; - -/// Split network on subgraphs based on layer affinity -/// -/// @param network - source network -/// @param checkers - list of supported plugins -/// -/// @return list of subgraphs -std::vector -splitGraph(ICNNNetwork& network, - const std::vector& plugins); - -/// Sort sugraphs topologically, behaviour is undefined if there are circular -/// refences between subgraps -/// -/// @param subgraphs - list of subgraphs -void -sortSubgraphs(std::vector& subgraphs); - -} // namespace InferenceEngine - diff --git a/inference-engine/src/hetero_plugin/hetero_infer_request.cpp b/inference-engine/src/hetero_plugin/hetero_infer_request.cpp index 6690815540b530..b4b606908160d7 100644 --- a/inference-engine/src/hetero_plugin/hetero_infer_request.cpp +++ b/inference-engine/src/hetero_plugin/hetero_infer_request.cpp @@ -5,7 +5,6 @@ #include "hetero_infer_request.hpp" #include "hetero_itt.hpp" #include -#include #include #include #include diff --git a/inference-engine/src/hetero_plugin/hetero_plugin.cpp b/inference-engine/src/hetero_plugin/hetero_plugin.cpp index 5ba7016e91cff9..9c7af172eb31c3 100644 --- a/inference-engine/src/hetero_plugin/hetero_plugin.cpp +++ b/inference-engine/src/hetero_plugin/hetero_plugin.cpp @@ -45,7 +45,7 @@ Engine::Configs mergeConfigs(Engine::Configs config, const Engine::Configs & loc } // namespace -InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork& network, +InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const Configs& config) { if (GetCore() == nullptr) { THROW_IE_EXCEPTION << "Please, work with HETERO device via InferencEngine::Core object"; @@ -57,30 +57,12 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const } DeviceMetaInformationMap metaDevices = GetDevicePlugins(it->second, tconfig); - if (network.getFunction()) { - auto allSupportsNgraph = - std::all_of(std::begin(metaDevices), std::end(metaDevices), - [&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool { - auto& deviceName = metaDevice.first; - auto clonedNetwork = cloneNetwork(network); - try { GetCore()->QueryNetwork(network, deviceName, metaDevice.second); } - catch (const InferenceEngine::details::InferenceEngineException & ex) { - std::string message = ex.what(); - return message.find(NOT_IMPLEMENTED_str) == std::string::npos; - } - return true; - }); - if (!allSupportsNgraph) { - auto cnnNetworkImpl = std::make_shared(network); - IE_ASSERT(cnnNetworkImpl != nullptr); - return std::make_shared( - *cnnNetworkImpl, mergeConfigs(_config, config), this); - } else { - return std::make_shared(*cloneNetwork(network), mergeConfigs(_config, config), this); - } - } else { - return std::make_shared(network, mergeConfigs(_config, config), this); + auto function = network.getFunction(); + if (function == nullptr) { + THROW_IE_EXCEPTION << "HETERO plugin supports just ngraph network representation"; } + + return std::make_shared(network, mergeConfigs(_config, config), this); } ExecutableNetwork Engine::ImportNetworkImpl(std::istream& heteroModel, const Configs& config) { @@ -142,59 +124,7 @@ void Engine::SetConfig(const Configs &configs) { } } -HeteroLayerColorer::HeteroLayerColorer(const std::vector& devices) { - static const std::vector colors = {"#5A5DF0", "#20F608", "#F1F290", "#11F110"}; - for (auto&& device : devices) { - deviceColorMap[device] = colors[std::distance(&device, devices.data()) % colors.size()]; - } -} - -void HeteroLayerColorer::operator()(const CNNLayerPtr layer, - ordered_properties &printed_properties, - ordered_properties &node_properties) { - auto device = layer->affinity; - printed_properties.insert(printed_properties.begin(), std::make_pair("device", device)); - node_properties.emplace_back("fillcolor", deviceColorMap[device]); -} - -void Engine::SetAffinity(InferenceEngine::ICNNNetwork &network, const Configs &config) { - QueryNetworkResult qr = QueryNetwork(network, config); - - details::CNNNetworkIterator i(&network); - while (i != details::CNNNetworkIterator()) { - CNNLayer::Ptr layer = *i; - auto it = qr.supportedLayersMap.find(layer->name); - if (it != qr.supportedLayersMap.end()) { - layer->affinity = it->second; - } - i++; - } - - auto dumpDot = [](const Configs & config) { - auto it = config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)); - return it != config.end() ? it->second == YES : false; - }; - - if (dumpDot(config) || dumpDot(_config)) { - std::unordered_set devicesSet; - details::CNNNetworkIterator i(&network); - while (i != details::CNNNetworkIterator()) { - CNNLayer::Ptr layer = *i; - if (!layer->affinity.empty()) { - devicesSet.insert(layer->affinity); - } - i++; - } - std::vector devices{std::begin(devicesSet), std::end(devicesSet)}; - std::stringstream stream(std::stringstream::out); - stream << "hetero_affinity_" << network.getName() << ".dot"; - - std::ofstream file(stream.str()); - saveGraphToDot(network, file, HeteroLayerColorer{devices}); - } -} - -QueryNetworkResult Engine::QueryNetwork(const ICNNNetwork &network, const Configs& config) const { +QueryNetworkResult Engine::QueryNetwork(const CNNNetwork &network, const Configs& config) const { QueryNetworkResult qr; if (GetCore() == nullptr) { @@ -210,42 +140,15 @@ QueryNetworkResult Engine::QueryNetwork(const ICNNNetwork &network, const Config std::string fallbackDevicesStr = it->second; DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig); - std::map queryResults; - auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) { - // go over devices and call query network - for (auto&& metaDevice : metaDevices) { - auto& deviceName = metaDevice.first; - auto clonedNetwork = cloneNetwork(networkObject); - queryResults[deviceName] = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second); - } - return queryResults; - }; + auto function = network.getFunction(); + if (function == nullptr) { + THROW_IE_EXCEPTION << "HETERO plugin supports just ngraph network representation"; + } - if (network.getFunction()) { - auto allSupportsNgraph = - std::all_of(std::begin(metaDevices), std::end(metaDevices), - [&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool { - auto& deviceName = metaDevice.first; - auto clonedNetwork = cloneNetwork(network); - try { GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second); } - catch (const InferenceEngine::details::InferenceEngineException & ex) { - std::string message = ex.what(); - return message.find(NOT_IMPLEMENTED_str) == std::string::npos; - } - return true; - }); - if (!allSupportsNgraph) { - if (contains(tconfig, CONFIG_KEY_INTERNAL(AGGREGATED_PLUGIN))) { - THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; - } else { - auto cnnNetworkImpl = std::make_shared(network); - queryNetwork(*cnnNetworkImpl); - } - } else { - queryNetwork(network); - } - } else { - queryNetwork(network); + std::map queryResults; + for (auto&& metaDevice : metaDevices) { + auto& deviceName = metaDevice.first; + queryResults[deviceName] = GetCore()->QueryNetwork(network, deviceName, metaDevice.second); } // WARNING: Here is devices with user set priority diff --git a/inference-engine/src/hetero_plugin/hetero_plugin.hpp b/inference-engine/src/hetero_plugin/hetero_plugin.hpp index 7c5d5633fc041a..c44b0e7e9530fa 100644 --- a/inference-engine/src/hetero_plugin/hetero_plugin.hpp +++ b/inference-engine/src/hetero_plugin/hetero_plugin.hpp @@ -13,7 +13,6 @@ #include #include #include -#include namespace HeteroPlugin { @@ -25,11 +24,11 @@ class Engine : public InferenceEngine::InferencePluginInternal { Engine(); InferenceEngine::ExecutableNetworkInternal::Ptr - LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, const Configs &config) override; + LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const Configs &config) override; void SetConfig(const Configs &config) override; - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork &network, + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork &network, const Configs& config) const override; InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& devices); - - void operator() (const CNNLayerPtr layer, - ordered_properties &printed_properties, - ordered_properties &node_properties); - - std::unordered_map deviceColorMap; -}; - } // namespace HeteroPlugin diff --git a/inference-engine/src/inference_engine/ie_core.cpp b/inference-engine/src/inference_engine/ie_core.cpp index ddce658f9c6ef1..7671b39d7e74e6 100644 --- a/inference-engine/src/inference_engine/ie_core.cpp +++ b/inference-engine/src/inference_engine/ie_core.cpp @@ -292,7 +292,7 @@ class Core::Impl : public ICore { return GetCPPPluginByName(parsed._deviceName).ImportNetwork(networkModel, parsed._config); } - QueryNetworkResult QueryNetwork(const ICNNNetwork& network, const std::string& deviceName, + QueryNetworkResult QueryNetwork(const CNNNetwork& network, const std::string& deviceName, const std::map& config) const override { auto parsed = parseDeviceNameIntoConfig(deviceName, config); return GetCPPPluginByName(parsed._deviceName).QueryNetwork(network, parsed._config); diff --git a/inference-engine/src/inference_engine/ie_plugin_cpp.hpp b/inference-engine/src/inference_engine/ie_plugin_cpp.hpp index ec8f89389d3900..e57239e95aec9a 100644 --- a/inference-engine/src/inference_engine/ie_plugin_cpp.hpp +++ b/inference-engine/src/inference_engine/ie_plugin_cpp.hpp @@ -92,7 +92,7 @@ class InferencePlugin { CALL_STATEMENT(return ExecutableNetwork(actual->ImportNetwork(modelFileName, config), actual)); } - QueryNetworkResult QueryNetwork(const ICNNNetwork& network, + QueryNetworkResult QueryNetwork(const CNNNetwork& network, const std::map& config) const { QueryNetworkResult res; CALL_STATEMENT(res = actual->QueryNetwork(network, config)); @@ -109,7 +109,7 @@ class InferencePlugin { CALL_STATEMENT(return actual->GetMetric(name, options)); } - ExecutableNetwork LoadNetwork(const ICNNNetwork& network, const std::map& config, + ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map& config, RemoteContext::Ptr context) { CALL_STATEMENT(return ExecutableNetwork(actual->LoadNetwork(network, config, context), actual)); } diff --git a/inference-engine/src/inference_engine/ie_rtti.cpp b/inference-engine/src/inference_engine/ie_rtti.cpp index 9d5bdb474aee74..a2e45d59fab47e 100644 --- a/inference-engine/src/inference_engine/ie_rtti.cpp +++ b/inference-engine/src/inference_engine/ie_rtti.cpp @@ -87,6 +87,7 @@ Parameter::Any::~Any() {} template struct InferenceEngine::Parameter::RealData; template struct InferenceEngine::Parameter::RealData; template struct InferenceEngine::Parameter::RealData; +template struct InferenceEngine::Parameter::RealData; template struct InferenceEngine::Parameter::RealData; template struct InferenceEngine::Parameter::RealData; template struct InferenceEngine::Parameter::RealData; diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt index dde6ebdaf3818a..0b40fd9e71d373 100644 --- a/inference-engine/src/legacy_api/CMakeLists.txt +++ b/inference-engine/src/legacy_api/CMakeLists.txt @@ -4,11 +4,13 @@ set(TARGET_NAME "inference_engine_legacy") -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h) - set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") +file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) +file(GLOB_RECURSE PUBLIC_HEADERS ${PUBLIC_HEADERS_DIR}/*.hpp + ${PUBLIC_HEADERS_DIR}/*.h) + # Create named folders for the sources within the .vcproj # Empty name lists them directly under the .vcproj diff --git a/inference-engine/src/legacy_api/include/legacy/ie_util_internal.hpp b/inference-engine/src/legacy_api/include/legacy/ie_util_internal.hpp index 2db9f4c0a92d06..d879ccb0993698 100644 --- a/inference-engine/src/legacy_api/include/legacy/ie_util_internal.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ie_util_internal.hpp @@ -79,7 +79,7 @@ using printer_callback = * @param layer_cb - callback function, that called on every printed layer node */ INFERENCE_ENGINE_API_CPP(void) -saveGraphToDot(InferenceEngine::ICNNNetwork& network, std::ostream& out, printer_callback layer_cb = nullptr); +saveGraphToDot(const InferenceEngine::ICNNNetwork& network, std::ostream& out, printer_callback layer_cb = nullptr); /** @brief Return root data objects, i.e. objects came from input or const layers diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/gather_tree_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/gather_tree_ie.hpp index 02d914c9bdae13..89350cb8a1754f 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/gather_tree_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/gather_tree_ie.hpp @@ -32,6 +32,7 @@ class INFERENCE_ENGINE_API_CLASS(GatherTreeIE) : public Op { const Output& end_token); void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/hard_sigmoid_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/hard_sigmoid_ie.hpp index ac8489bf420326..0361aae89c76c9 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/hard_sigmoid_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/hard_sigmoid_ie.hpp @@ -27,6 +27,7 @@ class INFERENCE_ENGINE_API_CLASS(HardSigmoid_IE) : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; float get_alpha() const { return m_alpha; } void set_alpha(float alpha) { m_alpha = alpha; } diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/interp.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/interp.hpp index d7a7dbd1dafadd..df19fdc0f95fe2 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/interp.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/interp.hpp @@ -37,6 +37,8 @@ class INFERENCE_ENGINE_API_CLASS(Interp) : public Op { void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; InterpolateIEAttrs get_attrs() { return m_attrs; } diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lrn_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lrn_ie.hpp index a56d3d7a030409..109a3b301d635e 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lrn_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lrn_ie.hpp @@ -30,6 +30,7 @@ class INFERENCE_ENGINE_API_CLASS(LRN_IE) : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; double get_alpha() const { return m_alpha; } void set_alpha(double alpha) { m_alpha = alpha; } diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/onehot_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/onehot_ie.hpp index 8f9f0aa565a2dd..9e3af14fab4f2a 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/onehot_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/onehot_ie.hpp @@ -27,6 +27,7 @@ class INFERENCE_ENGINE_API_CLASS(OneHotIE) : public Op { void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor& visitor) override; int get_axis() { return m_axis; } int get_depth() { return m_depth; } diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/pad_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/pad_ie.hpp index 05bab1cb1dc0d5..dfd73a21f07906 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/pad_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/pad_ie.hpp @@ -26,6 +26,7 @@ class INFERENCE_ENGINE_API_CLASS(PadIE) : public Op { size_t get_version() const override { return 1; } void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; PadMode get_pad_mode() { return m_pad_mode; } diff --git a/inference-engine/src/legacy_api/src/cnn_network_impl.cpp b/inference-engine/src/legacy_api/src/cnn_network_impl.cpp index 8c3f8211520c67..e308a68af768b1 100644 --- a/inference-engine/src/legacy_api/src/cnn_network_impl.cpp +++ b/inference-engine/src/legacy_api/src/cnn_network_impl.cpp @@ -30,7 +30,10 @@ #include "legacy/graph_tools.hpp" #include "legacy/details/ie_cnn_network_tools.h" #include -#include "network_serializer_v7.hpp" + +#ifdef ENABLE_V7_SERIALIZE +# include "network_serializer_v7.hpp" +#endif using namespace std; using namespace InferenceEngine; diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index 30934ac08d8477..fa80980c213652 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -619,7 +619,6 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr auto res = std::make_shared(attrs); res->params = params; return res; - }); addSpecificCreator({"NonMaxSuppressionIE3"}, [](const std::shared_ptr<::ngraph::Node>& node, @@ -849,6 +848,157 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr return res; }); + + addSpecificCreator({"Clamp"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Clamp", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"LRN_IE"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Norm", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"Elu"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "elu", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"MatMul"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Gemm", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"GatherTreeIE"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "GatherTree", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + return res; + }); + + addSpecificCreator({"GRN"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "GRN", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"OneHotIE"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "OneHot", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"HardSigmoid_IE"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "HardSigmoid", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + + auto castedLayer = std::dynamic_pointer_cast(node); + if (!castedLayer) + THROW_IE_EXCEPTION << "Cannot get " << attrs.type << " layer " << attrs.name; + + res->params["alpha"] = Builder::asString(castedLayer->get_alpha()); + res->params["beta"] = Builder::asString(castedLayer->get_beta()); + return res; + }); + + addSpecificCreator({"Interp"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Interp", details::convertPrecision(node->get_output_element_type(0))}; + auto castedLayer = std::dynamic_pointer_cast(node); + if (!castedLayer) THROW_IE_EXCEPTION << "Cannot get " << attrs.type << " layer " << attrs.name; + + auto interp_attrs = castedLayer->get_attrs(); + + if (interp_attrs.antialias) { + THROW_IE_EXCEPTION << "Interp do not support antialias"; + } + if (interp_attrs.mode != "linear") { + THROW_IE_EXCEPTION << "Interp do not support mode '" << interp_attrs.mode << "'"; + } + + bool align_corners; + auto res = std::make_shared(attrs); + res->params = params; + + std::istringstream(params.at("align_corners")) >> align_corners; + res->params["align_corners"] = align_corners ? "1" : "0"; + return res; + }); + + addSpecificCreator({"PadIE"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Pad", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + + res->params["pad_mode"] = params.at("pad_mode"); + res->params["pads_begin"] = params.at("pads_begin"); + res->params["pads_end"] = params.at("pads_end"); + + if (params.at("pad_mode") == "constant") { + res->params["pad_value"] = params.at("pad_value"); + } + + return res; + }); + + addSpecificCreator({"FakeQuantize"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "FakeQuantize", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + return res; + }); + + addSpecificCreator({"ConvolutionIE"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Convolution", details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params = params; + + auto && rt_info = node->get_rt_info(); + bool keep_constants(false); + if (auto attr = std::dynamic_pointer_cast>(rt_info["keep_constants"])) { + keep_constants = attr->get(); + } + + // Restore output and kernel size + auto shape = node->get_input_shape(1); + shape.erase(shape.begin(), shape.begin() + 2); + + res->params["kernel"] = Builder::asString(static_cast&>(shape)); + res->params["output"] = Builder::asString(node->get_shape()[1]); + + // forward auto_pad only when its value is different than explicit + if (params.at("auto_pad") == "explicit") { + res->params.erase("auto_pad"); + } + + const auto weightsNode = node->input_value(1).get_node_shared_ptr(); + if (!keep_constants && InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) { + if (node->inputs().size() == 3) { + const auto biasNode = node->input_value(2).get_node_shared_ptr(); + InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases); + } + } + return res; + }); } CNNLayerPtr InferenceEngine::details::CNNLayerCreator::create() { @@ -877,8 +1027,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr> convertors = { std::make_shared>(), - std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), @@ -886,25 +1034,15 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), @@ -929,7 +1067,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp index fb97e194024514..e6a3ca2566b4e5 100644 --- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp +++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp @@ -9,20 +9,15 @@ #include #include "legacy/ngraph_ops/crop_ie.hpp" -#include "ngraph_ops/convolution_ie.hpp" #include "legacy/ngraph_ops/eltwise.hpp" #include "legacy/ngraph_ops/fully_connected.hpp" #include "legacy/ngraph_ops/gather_ie.hpp" -#include "legacy/ngraph_ops/gather_tree_ie.hpp" #include "legacy/ngraph_ops/gru_cell_ie.hpp" #include "legacy/ngraph_ops/interp.hpp" -#include "legacy/ngraph_ops/lrn_ie.hpp" #include "legacy/ngraph_ops/lstm_cell_ie.hpp" #include #include "legacy/ngraph_ops/normalize_ie.hpp" #include "legacy/ngraph_ops/nms_ie.hpp" -#include "legacy/ngraph_ops/onehot_ie.hpp" -#include "legacy/ngraph_ops/pad_ie.hpp" #include "legacy/ngraph_ops/power.hpp" #include "legacy/ngraph_ops/prior_box_clustered_ie.hpp" #include "legacy/ngraph_ops/prior_box_ie.hpp" @@ -32,7 +27,7 @@ #include "legacy/ngraph_ops/scaleshift.hpp" #include "legacy/ngraph_ops/tile_ie.hpp" #include "legacy/ngraph_ops/rnn_cell_ie.hpp" -#include "legacy/ngraph_ops/hard_sigmoid_ie.hpp" + #include "generic_ie.hpp" #include "exec_graph_info.hpp" @@ -353,9 +348,15 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ case Precision::FP16: precision_str = "FP16"; break; + case Precision::BF16: + precision_str = "BF16"; + break; case Precision::FP32: precision_str = "FP32"; break; + case Precision::FP64: + precision_str = "FP64"; + break; case Precision::I8: precision_str = "I8"; break; @@ -491,22 +492,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr< return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Norm", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - res->params["alpha"] = asString(castedLayer->get_alpha()); - res->params["beta"] = asString(castedLayer->get_beta()); - res->params["k"] = asString(castedLayer->get_bias()); - res->params["local-size"] = asString(castedLayer->get_nsize()); - res->params["region"] = castedLayer->get_region(); - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Crop", @@ -539,19 +524,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_p return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Clamp", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - res->params["min"] = asString(castedLayer->get_min()); - res->params["max"] = asString(castedLayer->get_max()); - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "SoftMax", @@ -640,97 +612,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::s return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "FakeQuantize", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - res->params["levels"] = asString(castedLayer->get_levels()); - return res; -} - -template <> -CNNLayer::Ptr NodeConverter::createLayer( - const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Convolution", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - std::string value; - for (const auto& val : castedLayer->get_pads_begin()) { - if (!value.empty()) value += ","; - value += asString(val); - } - res->params["pads_begin"] = value; - - value.clear(); - for (const auto& val : castedLayer->get_pads_end()) { - if (!value.empty()) value += ","; - value += asString(val); - } - res->params["pads_end"] = value; - - switch (castedLayer->get_auto_pad()) { - case ngraph::op::PadType::SAME_UPPER: - res->params["auto_pad"] = "same_upper"; - break; - case ngraph::op::PadType::SAME_LOWER: - res->params["auto_pad"] = "same_lower"; - break; - case ngraph::op::PadType::VALID: - res->params["auto_pad"] = "valid"; - break; - default: - break; - } - - value.clear(); - for (const auto& val : castedLayer->get_strides()) { - if (!value.empty()) value += ","; - value += asString(val); - } - res->params["strides"] = value; - - value.clear(); - for (const auto& val : castedLayer->get_dilations()) { - if (!value.empty()) value += ","; - value += asString(val); - } - res->params["dilations"] = value; - - // Restore kernel size and output - const auto& shape = castedLayer->get_input_shape(1); - res->params["output"] = asString(castedLayer->get_shape()[1]); - res->params["group"] = asString(castedLayer->get_group()); - - value.clear(); - for (size_t i = 2; i < shape.size(); i++) { - if (!value.empty()) value += ","; - value += asString(shape[i]); - } - res->params["kernel"] = value; - - auto & rt_info = layer->get_rt_info(); - bool keep_constants(false); - if (auto attr = std::dynamic_pointer_cast>(rt_info["keep_constants"])) { - keep_constants = attr->get(); - } - - const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr(); - if (!keep_constants && InferenceEngine::details::addBlob(weightsNode, res, InferenceEngine::details::weights)) { - if (castedLayer->inputs().size() == 3) { - const auto biasNode = castedLayer->input_value(2).get_node_shared_ptr(); - InferenceEngine::details::addBlob(biasNode, res, InferenceEngine::details::biases); - } - } - - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer( const std::shared_ptr& layer) const { @@ -1091,14 +972,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "GatherTree", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "ReverseSequence", details::convertPrecision(layer->get_output_element_type(0))}; @@ -1147,46 +1020,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::sha return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Pad", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - switch (castedLayer->get_pad_mode()) { - case ngraph::op::PadMode::EDGE: - res->params["pad_mode"] = "edge"; - break; - case ngraph::op::PadMode::REFLECT: - res->params["pad_mode"] = "reflect"; - break; - case ngraph::op::PadMode::CONSTANT: - res->params["pad_mode"] = "constant"; - res->params["pad_value"] = asString(castedLayer->get_pad_value()); - break; - case ngraph::op::PadMode::SYMMETRIC: - res->params["pad_mode"] = "symmetric"; - } - std::string pad; - for (const auto& p : castedLayer->get_pads_begin()) { - if (!pad.empty()) pad += ","; - pad += asString(p); - } - res->params["pads_begin"] = pad; - - pad.clear(); - for (const auto& p : castedLayer->get_pads_end()) { - if (!pad.empty()) pad += ","; - pad += asString(p); - } - res->params["pads_end"] = pad; - - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "ScaleShift", @@ -1201,19 +1034,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::sh return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "elu", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - res->params["alpha"] = asString(castedLayer->get_alpha()); - - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Eltwise", @@ -1478,40 +1298,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shar return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Resample", - details::convertPrecision(layer->get_output_element_type(0))}; - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - auto attrs = castedLayer->get_attrs(); - - if (attrs.antialias) { - THROW_IE_EXCEPTION << "Interp do not support antialias"; - } - if (attrs.mode != "linear") { - THROW_IE_EXCEPTION << "Interp do not support mode '" << attrs.mode << "'"; - } - - params = {layer->get_friendly_name(), "Interp", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - - res->params["height"] = asString(attrs.height); - res->params["width"] = asString(attrs.width); - res->params["pad_beg"] = asString(attrs.pad_beg); - res->params["pad_end"] = asString(attrs.pad_end); - res->params["align_corners"] = attrs.align_corners ? "1" : "0"; - - return res; -} - -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - THROW_IE_EXCEPTION << "Interpolate operation should be converted to Interp"; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Interpolate", @@ -1658,21 +1444,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std:: return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Gemm", - details::convertPrecision(layer->get_output_element_type(0))}; - - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - auto res = std::make_shared(params); - res->params["transpose_a"] = castedLayer->get_transpose_a() ? "True" : "False"; - res->params["transpose_b"] = castedLayer->get_transpose_b() ? "True" : "False"; - - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { auto castedLayer = ngraph::as_type_ptr(layer); @@ -1888,45 +1659,5 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "OneHot", Precision::FP32}; - - auto castedLayer = std::dynamic_pointer_cast(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - auto res = std::make_shared(params); - res->params["axis"] = std::to_string(castedLayer->get_axis()); - res->params["depth"] = std::to_string(castedLayer->get_depth()); - res->params["on_value"] = std::to_string(castedLayer->get_on_value()); - res->params["off_value"] = std::to_string(castedLayer->get_off_value()); - return res; -} - -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = { layer->get_friendly_name(), "HardSigmoid", details::convertPrecision(layer->get_output_element_type(0)) }; - auto res = std::make_shared(params); - auto castedLayer = std::dynamic_pointer_cast(layer); - if (castedLayer == nullptr) - THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - res->params["alpha"] = asString(castedLayer->get_alpha()); - res->params["beta"] = asString(castedLayer->get_beta()); - return res; -} - -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "GRN", - details::convertPrecision(layer->get_output_element_type(0))}; - auto castedLayer = std::dynamic_pointer_cast(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - auto res = std::make_shared(params); - res->params["bias"] = asString(castedLayer->get_bias()); - return res; -} - } // namespace Builder } // namespace InferenceEngine diff --git a/inference-engine/src/legacy_api/src/ie_layer_validators.cpp b/inference-engine/src/legacy_api/src/ie_layer_validators.cpp index 5d05deeb6d0a02..240fa0ca68e09e 100644 --- a/inference-engine/src/legacy_api/src/ie_layer_validators.cpp +++ b/inference-engine/src/legacy_api/src/ie_layer_validators.cpp @@ -12,7 +12,6 @@ #include #include "debug.h" -#include "xml_parse_utils.h" #include #include "ie_layer_validators.hpp" diff --git a/inference-engine/src/legacy_api/src/ie_util_internal.cpp b/inference-engine/src/legacy_api/src/ie_util_internal.cpp index 3555ffa0492ca7..4e56ef9e8d6ff6 100644 --- a/inference-engine/src/legacy_api/src/ie_util_internal.cpp +++ b/inference-engine/src/legacy_api/src/ie_util_internal.cpp @@ -544,7 +544,7 @@ struct NodePrinter { } }; -void saveGraphToDot(InferenceEngine::ICNNNetwork& network, std::ostream& out, printer_callback layer_cb) { +void saveGraphToDot(const InferenceEngine::ICNNNetwork& network, std::ostream& out, printer_callback layer_cb) { NodePrinter printer(out, std::move(layer_cb)); out << "digraph Network {\n"; diff --git a/inference-engine/src/legacy_api/src/net_pass.cpp b/inference-engine/src/legacy_api/src/net_pass.cpp index e2c34de2fcb0de..ab189e84eaccc3 100644 --- a/inference-engine/src/legacy_api/src/net_pass.cpp +++ b/inference-engine/src/legacy_api/src/net_pass.cpp @@ -1527,6 +1527,9 @@ void ConvertPrecision(ICNNNetwork& net, Precision from, Precision to) { case getPrecisionMask(Precision::FP16, Precision::FP32): convertPrecisionForAll(net); break; + case getPrecisionMask(Precision::FP64, Precision::FP32): + convertPrecisionForAll(net); + break; case getPrecisionMask(Precision::U8, Precision::I32): convertPrecisionForAll(net); break; diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/gather_tree_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/gather_tree_ie.cpp index 0e3c04c13dee03..dcb4dab38e12c2 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/gather_tree_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/gather_tree_ie.cpp @@ -63,3 +63,7 @@ void op::GatherTreeIE::validate_and_infer_types() { const auto& step_ids_et = get_input_element_type(0); set_output_type(0, step_ids_et, step_ids_rank); } + +bool ngraph::op::GatherTreeIE::visit_attributes(AttributeVisitor& visitor) { + return true; +} diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/hard_sigmoid_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/hard_sigmoid_ie.cpp index d85796c590379c..4ccc0e8e60a82b 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/hard_sigmoid_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/hard_sigmoid_ie.cpp @@ -35,3 +35,7 @@ shared_ptr op::HardSigmoid_IE::clone_with_new_inputs(const OutputVector& n check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_alpha, m_beta); } + +bool op::HardSigmoid_IE::visit_attributes(AttributeVisitor& visitor) { + return true; +} diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp old mode 100644 new mode 100755 index 19ffed8697cf91..f07d73fc04c49c --- a/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/interp.cpp @@ -67,6 +67,16 @@ shared_ptr op::Interp::clone_with_new_inputs(const OutputVector& new_args) return make_shared(new_args.at(0), m_attrs); } +bool op::Interp::visit_attributes(AttributeVisitor& visitor) +{ + visitor.on_attribute("align_corners", m_attrs.align_corners); + visitor.on_attribute("width", m_attrs.width); + visitor.on_attribute("height", m_attrs.height); + visitor.on_attribute("pad_beg", m_attrs.pad_beg); + visitor.on_attribute("pad_end", m_attrs.pad_end); + return true; +} + constexpr NodeTypeInfo op::ResampleV2::type_info; op::ResampleV2::ResampleV2(const Output& image, const Output& output_shape, diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/lrn_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/lrn_ie.cpp index bd96c941bf33b2..9cdf5a53f36436 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/lrn_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/lrn_ie.cpp @@ -32,3 +32,12 @@ shared_ptr op::LRN_IE::clone_with_new_inputs(const OutputVector& new_args) check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_alpha, m_beta, m_bias, m_size, m_region); } + +bool op::LRN_IE::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("alpha", m_alpha); + visitor.on_attribute("beta", m_beta); + visitor.on_attribute("k", m_bias); + visitor.on_attribute("local-size", m_size); + visitor.on_attribute("region", m_region); + return true; +} diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/onehot_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/onehot_ie.cpp index 012b0762c4e9bd..2c964ec21d8098 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/onehot_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/onehot_ie.cpp @@ -35,3 +35,11 @@ shared_ptr op::OneHotIE::clone_with_new_inputs(const OutputVector& new_arg check_new_args_count(this, new_args); return make_shared(new_args.at(0), m_axis, m_depth, m_on_value, m_off_value, m_type); } + +bool op::OneHotIE::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("axis", m_axis); + visitor.on_attribute("depth", m_depth); + visitor.on_attribute("off_value", m_off_value); + visitor.on_attribute("on_value", m_on_value); + return true; +} diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/pad_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/pad_ie.cpp index f2cde75913dcef..4456466f54c07a 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/pad_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/pad_ie.cpp @@ -44,3 +44,11 @@ void op::PadIE::validate_and_infer_types() { shared_ptr op::PadIE::clone_with_new_inputs(const OutputVector& new_args) const { return nullptr; } + +bool op::PadIE::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("pads_begin", m_pads_begin); + visitor.on_attribute("pads_end", m_pads_end); + visitor.on_attribute("pad_mode", m_pad_mode); + visitor.on_attribute("pad_value", m_pad_value); + return true; +} diff --git a/inference-engine/src/mkldnn_plugin/bf16transformer.cpp b/inference-engine/src/mkldnn_plugin/bf16transformer.cpp index 0d8ef1d76ef953..0ddaf3fdbd0f9e 100644 --- a/inference-engine/src/mkldnn_plugin/bf16transformer.cpp +++ b/inference-engine/src/mkldnn_plugin/bf16transformer.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include "ngraph/type/bfloat16.hpp" using namespace MKLDNNPlugin; @@ -23,7 +24,7 @@ void precisionColoringBF16(const CNNLayerPtr layer, if (layer && !layer->insData.empty() && layer->input()) { printed_properties.insert(printed_properties.begin(), std::pair("Precision", - layer->input()->getPrecision() == Precision::FP32 ? "FP32" : "BF16")); + layer->input()->getPrecision() == Precision::FP32 ? "FP32" : "BF16")); if (layer->input()->getPrecision() == Precision::FP32) { node_properties.emplace_back("fillcolor", "#5A5DF0"); @@ -55,20 +56,31 @@ void BF16Transformer::convertToBFloat16(InferenceEngine::CNNNetwork &network) { InputsDataMap inputs = network.getInputsInfo(); OutputsDataMap outputs = network.getOutputsInfo(); for (auto iter : sortedLayers) { + if (CaselessEq()(iter->type, "convolution")) { + auto dims = iter->insData[0].lock()->getDims(); + if ((dims.size() == 4 || dims.size() == 5) && (dims[1] == 1 || dims[1] == 3)) + continue; + } + // check, if memory output node needs to be transformed if (iter->type == "Memory" && iter->outData.size() == 0 && iter->insData[0].lock()->getPrecision() == Precision::FP32) { - auto curPrec = iter->insData[0].lock()->getPrecision(); iter->insData[0].lock()->setPrecision(Precision::BF16); } + for (size_t o = 0; o < iter->outData.size(); o++) { if (inputs.find(iter->outData[o]->getName()) == inputs.end() && outputs.find(iter->outData[o]->getName()) == outputs.end() + && !CaselessEq()(iter->type, "const") && iter->outData[o]->getPrecision() == Precision::FP32) { iter->outData[o]->setPrecision(Precision::BF16); } } } + + // insert convert after input if necessary + insertConvertAfterInput(network); + // convert all edges back to FP32 on demand optimizeToFloat(network); } @@ -255,3 +267,120 @@ InferenceEngine::MemoryBlob::Ptr BF16Transformer::convertBF16ToFloat(InferenceEn } return weightsFP32; } +void BF16Transformer::addLayerToCNNNetworkAfterData( + DataPtr parentOutData, + CNNLayer::Ptr layer, + const std::string& nextLayerName, + ICNNNetwork& net, + const int childInsDataIndex) { + CNNNetworkImpl* netImpl = dynamic_cast(&net); + if (netImpl == nullptr) { + THROW_IE_EXCEPTION << "unexpected network type"; + } + + CNNLayerPtr nextLayer; + if (!nextLayerName.empty()) { + netImpl->getLayerByName(nextLayerName.c_str(), nextLayer, nullptr); + } + + if (layer && (nextLayerName.empty() || (parentOutData == nullptr) || (childInsDataIndex != -1) || + (getInputTo(parentOutData).find(nextLayerName) != getInputTo(parentOutData).end()))) { + auto getTensorDesc = [](CNNLayerPtr& nextLayer) { + const DataPtr insData = nextLayer->insData[0].lock(); + return insData->getTensorDesc(); + }; + + const TensorDesc& parentTensorDesc = parentOutData != nullptr ? parentOutData->getTensorDesc() : getTensorDesc(nextLayer); + DataPtr newEdgeAfterLayer(new Data(layer->name, parentTensorDesc)); + newEdgeAfterLayer->setName(layer->name); + getCreatorLayer(newEdgeAfterLayer) = layer; + getInputTo(newEdgeAfterLayer).clear(); + + + if (netImpl == nullptr) { + THROW_IE_EXCEPTION << "unexpected network type"; + } + netImpl->addData(layer->name.c_str(), newEdgeAfterLayer); + IE_SUPPRESS_DEPRECATED_START + netImpl->addLayer(layer); + IE_SUPPRESS_DEPRECATED_END + + if (parentOutData != nullptr) { + getInputTo(parentOutData)[layer->name] = layer; + layer->insData.push_back(parentOutData); + } + layer->outData.push_back(newEdgeAfterLayer); + + if (!nextLayerName.empty()) { + // CNNLayerPtr nextLayer = getInputTo(parentOutData)[nextLayerName]; + getInputTo(newEdgeAfterLayer)[nextLayerName] = nextLayer; + + if (parentOutData != nullptr) { + getInputTo(parentOutData).erase(nextLayerName); + + if (childInsDataIndex == -1) { + for (size_t i = 0; i < nextLayer->insData.size(); i++) { + if (nextLayer->insData[i].lock() == parentOutData) { + nextLayer->insData[i] = newEdgeAfterLayer; + } + } + } else { + nextLayer->insData[childInsDataIndex] = newEdgeAfterLayer; + } + } else { + nextLayer->insData.push_back(newEdgeAfterLayer); + } + } else { + CNNLayerPtr parent = getCreatorLayer(parentOutData).lock(); + if (parent == nullptr) { + THROW_IE_EXCEPTION << "parent data is absent"; + } + netImpl->removeOutput(parent->name); + netImpl->addData(layer->name.c_str(), newEdgeAfterLayer); + netImpl->addOutput(layer->name); + } + } else { + THROW_IE_EXCEPTION << "Invalid argument"; + } +} + +void BF16Transformer::insertConvertAfterInput(InferenceEngine::CNNNetwork &network) { + auto inputLayers = InferenceEngine::CNNNetGetAllInputLayers(network); + for (auto inputIter : inputLayers) { + for (size_t o = 0; o < inputIter->outData.size(); o++) { + for (auto bfInitIter : getInputTo(inputIter->outData[o])) { + if (inputIter->outData[o]->getPrecision() == Precision::BF16) { + // we don't need to enforce bf16-mode for the next layer + break; + } + auto bfInitLayer = bfInitIter.second; + if (_initbf16.find(bfInitLayer->type) != _initbf16.end()) { + if (CaselessEq()(bfInitLayer->type, "convolution")) { + // TODO: have to be removed after adding suitable implementation for convolution + break; + } + // insert convert + std::string layerName = inputIter->outData[o]->getName(); + LayerParams cnnLayerParams{layerName, "Convert", Precision::FP32}; + auto lay = std::make_shared(cnnLayerParams); + std::map par = {{"name", layerName}, + {"type", "Convert"}, + {"precision", "FP32"}}; + lay->params = par; + CNNLayerPtr convertLayer(lay); + BF16Transformer::addLayerToCNNNetworkAfterData(inputIter->outData[o], convertLayer, bfInitLayer->name, + network); + // compute input port id for bfInitLayer + for (size_t i = 0; i < bfInitLayer->insData.size(); i++) { + if (bfInitLayer->insData[i].lock()->getName() == inputIter->outData[o]->getName()) { + // set conv input as bf + bfInitLayer->insData[i].lock()->setPrecision(Precision::BF16); + break; + } + } + break; + } + } + } + } +} \ No newline at end of file diff --git a/inference-engine/src/mkldnn_plugin/bf16transformer.h b/inference-engine/src/mkldnn_plugin/bf16transformer.h index 6ff30cdcae3482..3f302348e4778f 100644 --- a/inference-engine/src/mkldnn_plugin/bf16transformer.h +++ b/inference-engine/src/mkldnn_plugin/bf16transformer.h @@ -8,15 +8,22 @@ #include #include #include +#include namespace MKLDNNPlugin { class BF16Transformer { const InferenceEngine::details::caseless_set _initbf16 = - { "convolution", "fullyconnected", "innerproduct", "gemm" }; + { "convolution", "fullyconnected", "innerproduct", "gemm", "RegionYolo" }; const InferenceEngine::details::caseless_set _complementbf16 = - { "relu", "tanh", "elu", "square", "abs", "sqrt", "linear", "bounded_relu", "soft_relu", "logistic", - "exp", "gelu", "clamp", "swish", "prelu", "pooling", "norm", "gather", "memory" }; + { "relu", "tanh", "elu", "square", "abs", "sqrt", "linear", "bounded_relu", "soft_relu", "normalize", + "sigmoid", "ReLU6", "not", "activation", "HSwish", "mish", "logistic", "mod", "resample", + "exp", "gelu", "clamp", "swish", "prelu", "pooling", "norm", "gather", "memory", "mvn", "crop", "activation", + "broadcast", "convert", "BatchToSpace", "DepthToSpace", "ExtractImagePatches", "concat", "power", "lrn", + "permute", "ScatterUpdate", "ScatterElementsUpdate", "ScatterNDUpdate", "depthwise", + "select", "ShuffleChannels", "SpaceToBatch", "SpaceToDepth", "squeeze", "StridedSlice", "unsqueeze", "eltwise", + "ReduceAnd", "ReduceOr", "ReduceMax", "ReduceMin" }; + const InferenceEngine::details::caseless_set _multiinput = { "concat", "eltwise" }; // prevent fallback to fp32 without considering both input and output nodes @@ -33,6 +40,13 @@ class BF16Transformer { */ bool tryToMarkFP32(InferenceEngine::DataPtr data, const std::set &immutable); + /** + * Because of singularity of input node, layer, following input doesn't support bf16 itself. + * We fix it by insertion of convert layer, which has to be replaced to reorder in graph optimizer. + * + */ + void insertConvertAfterInput(InferenceEngine::CNNNetwork &network); + public: /** * Restores Float point data types on edges which goes to non supported layers @@ -61,6 +75,16 @@ class BF16Transformer { */ void convertToBFloat16(InferenceEngine::CNNNetwork &network); + /** + * inserts given layer after current tensor + */ + static void addLayerToCNNNetworkAfterData( + InferenceEngine::DataPtr parentOutData, + InferenceEngine::CNNLayerPtr layer, + const std::string& nextLayerName, + InferenceEngine::ICNNNetwork& net, + const int childInsDataIndex = -1); + InferenceEngine::MemoryBlob::Ptr convertBF16ToFloat(InferenceEngine::MemoryBlob::Ptr); }; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp index ebda5795690100..d5c4e4db1db20c 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp @@ -145,6 +145,9 @@ void MKLDNNGraphOptimizer::ApplyImplSpecificGraphOptimizations(MKLDNNGraph &grap graph.RemoveDroppedNodes(); #if defined (COMPILED_CPU_MKLDNN_REORDER_NODE) + ChangeConvertToReorder(graph); + graph.RemoveDroppedNodes(); + DropDoubleReorders(graph); graph.RemoveDroppedNodes(); @@ -1918,6 +1921,55 @@ void MKLDNNGraphOptimizer::DropConvertReorder(MKLDNNGraph& graph) { } } } + +void MKLDNNGraphOptimizer::ChangeConvertToReorder(MKLDNNGraph& graph) { + std::vector continuousPrecisions{ + Precision::BF16, + Precision::FP32 + }; + for (int ind = 0; ind < graph.GetNodes().size(); ind++) { + auto convertCandidate = graph.GetNodes().at(ind); + std::string nodeType = convertCandidate->getTypeStr(); + if (!InferenceEngine::details::CaselessEq()(nodeType, "convert")) { + continue; + } + auto inputPrecision = convertCandidate->getCnnLayer()->insData[0].lock()->getPrecision(); + auto outputPrecision = convertCandidate->getCnnLayer()->outData[0]->getPrecision(); + if (std::find(continuousPrecisions.begin(), continuousPrecisions.end(), inputPrecision) == continuousPrecisions.end() || + std::find(continuousPrecisions.begin(), continuousPrecisions.end(), outputPrecision) == continuousPrecisions.end()) { + continue; + } + std::unordered_set uniqueLayerNames; + for (auto node : graph.GetNodes()) { + uniqueLayerNames.insert(node->getCnnLayer()->name); + } + auto parentEdge = convertCandidate->getParentEdges()[0].lock(); + auto parentNode = parentEdge->getParent(); + auto &childEdge = convertCandidate->getChildEdgeAt(0); + auto childNode = childEdge->getChild(); + std::string basicLayerName = childEdge->getParent()->getName() + "_" + + MKLDNNExtensionUtils::getReorderArgs(convertCandidate->getCnnLayer()->insData[0].lock()->getTensorDesc(), + convertCandidate->getCnnLayer()->outData[0]->getTensorDesc()) + + "_" + childEdge->getChild()->getName(); + std::string layerName = basicLayerName; + int idx = 0; + while (uniqueLayerNames.find(layerName) != uniqueLayerNames.end()) { + idx++; + layerName = basicLayerName + "_" + std::to_string(idx); + } + // create temporary edge + auto oldParentOutputPort = parentEdge->getInputNum(); + auto oldChildInputPort = childEdge->getOutputNum(); + MKLDNNEdgePtr tempEdge(new MKLDNNEdge(parentNode, childNode, oldParentOutputPort, oldChildInputPort)); + + graph.InsertReorder(tempEdge, layerName, convertCandidate->getCnnLayer()->insData[0].lock()->getTensorDesc(), + convertCandidate->getCnnLayer()->outData[0]->getTensorDesc(), false); + parentNode->removeEdge(parentEdge); + parentEdge->drop(); + childEdge->drop(); + graph.DropNode(convertCandidate); + } +} #endif void MKLDNNGraphOptimizer::RemoveIOScaleShifts(MKLDNNGraph &graph) { diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.h b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.h index 481ca61d0562fe..025b79c9b7e864 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.h @@ -46,6 +46,7 @@ class MKLDNNGraphOptimizer { #if defined (COMPILED_CPU_MKLDNN_REORDER_NODE) void DropDoubleReorders(MKLDNNGraph& graph); void DropConvertReorder(MKLDNNGraph& graph); + void ChangeConvertToReorder(MKLDNNGraph &graph); #endif void FuseConvolutionAndZeroPoints(MKLDNNGraph &graph); void FuseBroadcastAndEltwise(MKLDNNGraph &graph); diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp index ae7db8843395d4..54856e5a4cff8d 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp @@ -105,6 +105,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::PushInputData() { // these precisions are supported by mkldnn, so we push the blob directly case InferenceEngine::Precision::I8: case InferenceEngine::Precision::I32: + case InferenceEngine::Precision::BF16: case InferenceEngine::Precision::FP32: { break; } diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_memory_state.h b/inference-engine/src/mkldnn_plugin/mkldnn_memory_state.h index 751635b7709ff6..999ff269783b9c 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_memory_state.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_memory_state.h @@ -4,7 +4,7 @@ #pragma once -#include "cpp_interfaces/impl/ie_memory_state_internal.hpp" +#include "cpp_interfaces/impl/ie_variable_state_internal.hpp" #include "mkldnn_memory.h" #include diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index fc1fe7972de010..eed640b0148d97 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -264,12 +264,11 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork, const Config& conf) } InferenceEngine::ExecutableNetworkInternal::Ptr -Engine::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, const std::map &config) { +Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std::map &config) { OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, "Engine::LoadExeNetworkImpl"); // verification of supported input - InferenceEngine::InputsDataMap _networkInputs; - network.getInputsInfo(_networkInputs); + InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); for (const auto &ii : _networkInputs) { auto input_precision = ii.second->getPrecision(); if (input_precision != InferenceEngine::Precision::FP32 && @@ -278,6 +277,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, const st input_precision != InferenceEngine::Precision::I16 && input_precision != InferenceEngine::Precision::I8 && input_precision != InferenceEngine::Precision::U8 && + input_precision != InferenceEngine::Precision::BF16 && input_precision != InferenceEngine::Precision::BOOL && input_precision != InferenceEngine::Precision::I64 && input_precision != InferenceEngine::Precision::U64) { @@ -417,7 +417,7 @@ void Engine::AddExtension(InferenceEngine::IExtensionPtr extension) { extensionManager->AddExtension(extension); } -QueryNetworkResult Engine::QueryNetwork(const ICNNNetwork& network, const std::map& config) const { +QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::map& config) const { QueryNetworkResult res; MKLDNNWeightsSharing::Ptr fake_w_cache; auto function = network.getFunction(); @@ -483,7 +483,7 @@ QueryNetworkResult Engine::QueryNetwork(const ICNNNetwork& network, const std::m } } } else { - details::CNNNetworkIterator i(&network); + details::CNNNetworkIterator i(network); while (i != details::CNNNetworkIterator()) { try { mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)); diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.h b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.h index 07f1c0e840cadb..028d5238be2385 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.h @@ -22,7 +22,7 @@ class Engine : public InferenceEngine::InferencePluginInternal { ~Engine() override; InferenceEngine::ExecutableNetworkInternal::Ptr - LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network, + LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std::map &config) override; void AddExtension(InferenceEngine::IExtensionPtr extension) override; @@ -33,7 +33,7 @@ class Engine : public InferenceEngine::InferencePluginInternal { InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& options) const override; - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network, + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const override; private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/argmax.cpp b/inference-engine/src/mkldnn_plugin/nodes/argmax.cpp index 449168f504cb10..63fa62a58074e8 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/argmax.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/argmax.cpp @@ -27,7 +27,7 @@ class ArgMaxImpl: public ExtLayerBase { conf.axis_index_ = conf.has_axis_ ? std::stoi(layer->params.at("axis")) :0; - addConfig(layer, {DataConfigurator(ConfLayout::PLN)}, {DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/base.hpp b/inference-engine/src/mkldnn_plugin/nodes/base.hpp index f31812e4cbd720..b9b650b3eca616 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/base.hpp +++ b/inference-engine/src/mkldnn_plugin/nodes/base.hpp @@ -60,8 +60,8 @@ class ExtLayerBase: public ILayerExecImpl { explicit DataConfigurator(ConfLayout l): layout(l) {} - DataConfigurator(ConfLayout l, bool constant, int inplace = -1): - layout(l), constant(constant), inplace(inplace) {} + DataConfigurator(ConfLayout l, bool constant, int inplace = -1, Precision::ePrecision prc = Precision::UNSPECIFIED): + layout(l), constant(constant), inplace(inplace), prc(prc) {} DataConfigurator(ConfLayout l, Precision::ePrecision prc): layout(l), prc(prc) {} @@ -128,14 +128,7 @@ class ExtLayerBase: public ILayerExecImpl { conf.layout = ConfLayout::PLN; } - // All extension layers support only FP32 precision! - // fixing of BF16 precisions where they are - layers naturally support only FP32 - // if we see BF16, that means another floating point format which will be converted by reorder - // added by current mkl-dnn cpu plugin when it figure out diff in data types on input and output of edges InferenceEngine::Precision precision = (conf.prc == Precision::UNSPECIFIED) ? data_desc.getPrecision() : Precision(conf.prc); - if (precision == Precision::BF16) { - precision = Precision::FP32; - } if (conf.layout == ConfLayout::ANY) { dataConfig.desc = TensorDesc(precision, data_dims, InferenceEngine::Layout::ANY); } else { diff --git a/inference-engine/src/mkldnn_plugin/nodes/broadcast.cpp b/inference-engine/src/mkldnn_plugin/nodes/broadcast.cpp index 5734cf23808c58..f975202b078c93 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/broadcast.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/broadcast.cpp @@ -31,7 +31,7 @@ class BroadcastImpl: public ExtLayerBase { LayerConfig config; DataConfig dataConfig, shapeConfig; - Precision dataPrecision = layer->outData[0]->getTensorDesc().getPrecision(); + Precision dataPrecision = layer->insData[BROADCAST_INPUT].lock()->getTensorDesc().getPrecision(); const SizeVector& data_dims = layer->insData[BROADCAST_INPUT].lock()->getTensorDesc().getDims(); dataConfig.desc = TensorDesc(dataPrecision, data_dims, layer->insData[BROADCAST_INPUT].lock()->getTensorDesc().getLayout()); diff --git a/inference-engine/src/mkldnn_plugin/nodes/bucketize.cpp b/inference-engine/src/mkldnn_plugin/nodes/bucketize.cpp index 5886c16ab6604b..e27a1b83c279de 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/bucketize.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/bucketize.cpp @@ -41,19 +41,16 @@ class BucketizeImpl : public ExtLayerBase { input_precision = input->getTensorDesc().getPrecision(); if (input_precision != Precision::FP32 && input_precision != Precision::I32 && input_precision != Precision::I64) { - THROW_IE_EXCEPTION << layer->name - << " Incorrect input precision of the input. Only FP32, I32 and I64 are supported!"; + input_precision = Precision::FP32; } boundaries_precision = boundaries->getTensorDesc().getPrecision(); if (boundaries_precision != Precision::FP32 && boundaries_precision != Precision::I32 && boundaries_precision != Precision::I64) { - THROW_IE_EXCEPTION << layer->name - << " Incorrect input precision of the boundaries tensor. Only FP32, I32 and I64 are supported!"; + boundaries_precision = Precision::FP32; } output_precision = layer->outData[OUTPUT_TENSOR_PORT]->getTensorDesc().getPrecision(); if (output_precision != Precision::I32 && output_precision != Precision::I64) { - THROW_IE_EXCEPTION << layer->name - << " Incorrect precision of the output tensor. Only I32 and I64 are supported!"; + output_precision = Precision::I32; } // check dimensions of input tensors @@ -73,8 +70,8 @@ class BucketizeImpl : public ExtLayerBase { num_values = std::accumulate(input_tensor_dims.begin(), input_tensor_dims.end(), 1, std::multiplies()); addConfig(layer, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, input_precision), DataConfigurator(ConfLayout::PLN, boundaries_precision) }, + { DataConfigurator(ConfLayout::PLN, output_precision) }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp index aa2cefaa618dac..17a79325f2f649 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp @@ -4,13 +4,14 @@ #include "cpu_convert.h" #include "cpu_memcpy.h" +#include "utils/bfloat16.hpp" #include #include using namespace InferenceEngine; template -void convert(void *srcPtr, void *dstPtr, const size_t size) { +void convert(const void *srcPtr, void *dstPtr, const size_t size) { if (std::is_same::value) { cpu_memcpy(dstPtr, srcPtr, size*sizeof(dstType)); } else { @@ -24,7 +25,7 @@ void convert(void *srcPtr, void *dstPtr, const size_t size) { } template -void convertFrom(void *srcPtr, void *dstPtr, Precision dstPrc, const size_t size) { +void convertFrom(const void *srcPtr, void *dstPtr, Precision dstPrc, const size_t size) { switch (dstPrc) { case Precision::U8: convert::value_type>(srcPtr, dstPtr, size); @@ -50,6 +51,9 @@ void convertFrom(void *srcPtr, void *dstPtr, Precision dstPrc, const size_t size case Precision::FP32: convert::value_type>(srcPtr, dstPtr, size); break; + case Precision::BF16: + convert(srcPtr, dstPtr, size); + break; case Precision::BOOL: convert::value_type>(srcPtr, dstPtr, size); break; @@ -58,7 +62,7 @@ void convertFrom(void *srcPtr, void *dstPtr, Precision dstPrc, const size_t size } } -void cpu_convert(void *srcPtr, void *dstPtr, Precision srcPrc, Precision dstPrc, const size_t size) { +void cpu_convert(const void *srcPtr, void *dstPtr, Precision srcPrc, Precision dstPrc, const size_t size) { if (srcPtr == nullptr || dstPtr == nullptr) THROW_IE_EXCEPTION << "cpu_convert has null data pointer"; @@ -92,6 +96,9 @@ void cpu_convert(void *srcPtr, void *dstPtr, Precision srcPrc, Precision dstPrc, case Precision::FP32: convertFrom::value_type>(srcPtr, dstPtr, dstPrc, size); break; + case Precision::BF16: + convertFrom(srcPtr, dstPtr, dstPrc, size); + break; case Precision::BOOL: convertFrom::value_type>(srcPtr, dstPtr, dstPrc, size); break; diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.h b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.h index 8c2baa37929f11..5ace2e7cd6a2ef 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.h +++ b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.h @@ -20,4 +20,4 @@ * @return none. */ -void cpu_convert(void *srcPtr, void *dstPtr, InferenceEngine::Precision srcPrc, InferenceEngine::Precision dstPrc, const size_t size); +void cpu_convert(const void *srcPtr, void *dstPtr, InferenceEngine::Precision srcPrc, InferenceEngine::Precision dstPrc, const size_t size); diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/softmax.cpp b/inference-engine/src/mkldnn_plugin/nodes/common/softmax.cpp index f9b4f57a5b2e2d..bd625795203490 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/softmax.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/common/softmax.cpp @@ -3,25 +3,35 @@ // #include -#include #include +#include #include "jit_generator.hpp" #include "jit_uni_eltwise.hpp" +#include "utils/bfloat16.hpp" #include "softmax.h" using namespace InferenceEngine; +using namespace MKLDNNPlugin; +using namespace mkldnn; using namespace mkldnn::impl::cpu; using namespace mkldnn::impl::utils; #define GET_OFF(field) offsetof(jit_args_softmax, field) struct jit_args_softmax { - const float* src; - const float* dst; - size_t stride; + const void* src; + void* dst; + size_t src_stride; + size_t dst_stride; size_t work_amount; }; +struct jit_softmax_config_params { + Precision src_dt; + Precision dst_dt; +}; + + struct jit_uni_softmax_kernel { void (*ker_)(const jit_args_softmax *); @@ -35,14 +45,15 @@ template struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_generator { DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_softmax_kernel_f32) - jit_uni_softmax_kernel_f32() : jit_uni_softmax_kernel(), jit_generator() { + jit_uni_softmax_kernel_f32(jit_softmax_config_params jcp) : jit_uni_softmax_kernel(), jit_generator() { exp_injector.reset(new jit_uni_eltwise_injector_f32(this, alg_kind::eltwise_exp, 0.f, 0.f)); this->preamble(); mov(reg_src, ptr[reg_params + GET_OFF(src)]); mov(reg_dst, ptr[reg_params + GET_OFF(dst)]); - mov(reg_stride, ptr[reg_params + GET_OFF(stride)]); + mov(reg_src_stride, ptr[reg_params + GET_OFF(src_stride)]); + mov(reg_dst_stride, ptr[reg_params + GET_OFF(dst_stride)]); mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]); Xbyak::Label max_loop_label; @@ -54,12 +65,12 @@ struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_ge mov(aux_reg_work_amount, reg_work_amount); mov(aux_reg_src, reg_src); - uni_vmovups(vmm_max, ptr[aux_reg_src]); + load_vector(vmm_max, ptr[aux_reg_src], jcp.src_dt); L(max_loop_label); { cmp(aux_reg_work_amount, 0); jle(max_loop_end_label, T_NEAR); - uni_vmovups(vmm_val, ptr[aux_reg_src]); + load_vector(vmm_val, ptr[aux_reg_src], jcp.src_dt); if (isa == sse42) { uni_vmovups(vmm_mask, vmm_val); @@ -77,7 +88,7 @@ struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_ge uni_vblendvps(vmm_max, vmm_max, vmm_val, vmm_mask); } - add(aux_reg_src, reg_stride); + add(aux_reg_src, reg_src_stride); sub(aux_reg_work_amount, 1); jmp(max_loop_label, T_NEAR); @@ -93,16 +104,16 @@ struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_ge cmp(aux_reg_work_amount, 0); jle(exp_loop_end_label, T_NEAR); - uni_vmovups(vmm_val, ptr[aux_reg_src]); + load_vector(vmm_val, ptr[aux_reg_src], jcp.src_dt); uni_vsubps(vmm_val, vmm_val, vmm_max); exp_injector->compute_vector_range(vmm_val.getIdx(), vmm_val.getIdx() + 1); uni_vaddps(vmm_exp_sum, vmm_exp_sum, vmm_val); - uni_vmovups(ptr[aux_reg_dst], vmm_val); + store_vector(ptr[aux_reg_dst], vmm_val, jcp.dst_dt); - add(aux_reg_src, reg_stride); - add(aux_reg_dst, reg_stride); + add(aux_reg_src, reg_src_stride); + add(aux_reg_dst, reg_dst_stride); sub(aux_reg_work_amount, 1); jmp(exp_loop_label, T_NEAR); @@ -116,13 +127,13 @@ struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_ge cmp(aux_reg_work_amount, 0); jle(div_loop_end_label, T_NEAR); - uni_vmovups(vmm_val, ptr[aux_reg_dst]); + load_vector(vmm_val, ptr[aux_reg_dst], jcp.dst_dt); uni_vdivps(vmm_val, vmm_val, vmm_exp_sum); - uni_vmovups(ptr[aux_reg_dst], vmm_val); + store_vector(ptr[aux_reg_dst], vmm_val, jcp.dst_dt); - add(aux_reg_dst, reg_stride); + add(aux_reg_dst, reg_dst_stride); sub(aux_reg_work_amount, 1); jmp(div_loop_label, T_NEAR); @@ -147,7 +158,8 @@ struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_ge Xbyak::Reg64 aux_reg_dst = r15; Xbyak::Reg64 reg_work_amount = r11; Xbyak::Reg64 aux_reg_work_amount = r12; - Xbyak::Reg64 reg_stride = r14; + Xbyak::Reg64 reg_src_stride = r14; + Xbyak::Reg64 reg_dst_stride = r10; Xbyak::Reg64 reg_params = abi_param1; Vmm vmm_mask = Vmm(0); @@ -158,23 +170,64 @@ struct jit_uni_softmax_kernel_f32 : public jit_uni_softmax_kernel, public jit_ge const Xbyak::Opmask k_mask = Xbyak::Opmask(1); std::shared_ptr> exp_injector; + + inline void load_vector(Vmm vmm_src, const Xbyak::Address &op, Precision src_dt) { + switch (src_dt) { + case Precision::FP32: + uni_vmovups(vmm_src, op); + break; + case Precision::BF16: + vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; + default: + assert(!"unknown src_dt"); + } + } + inline void store_vector(const Xbyak::Address &op, Vmm vmm_dst, Precision dst_dt) { + Xbyak::Ymm ymm_dst = Xbyak::Ymm(vmm_dst.getIdx()); + + switch (dst_dt) { + case Precision::FP32: + uni_vmovups(op, vmm_dst); + break; + case Precision::BF16: + vcvtneps2bf16(ymm_dst, vmm_dst); + uni_vmovups(op, ymm_dst); + break; + default: + assert(!"unknown dst_dt"); + } + } }; -SoftmaxGeneric::SoftmaxGeneric() { +SoftmaxGeneric::SoftmaxGeneric(Precision inpPrc, Precision outPrc) + : input_prec(inpPrc), output_prec(outPrc) { + if (Precision::BF16 == output_prec) { + if (!mayiuse(avx512_core_bf16)) { + THROW_IE_EXCEPTION << "SoftmaxGeneric doesn't support BF16 precision on this target."; + } + } + block_size = 1; + auto jcp = jit_softmax_config_params(); + jcp.src_dt = inpPrc; + jcp.dst_dt = outPrc; + if (mayiuse(avx512_common)) { - softmax_kernel.reset(new jit_uni_softmax_kernel_f32()); + softmax_kernel.reset(new jit_uni_softmax_kernel_f32(jcp)); block_size = 16; } else if (mayiuse(avx2)) { - softmax_kernel.reset(new jit_uni_softmax_kernel_f32()); + softmax_kernel.reset(new jit_uni_softmax_kernel_f32(jcp)); block_size = 8; } else if (mayiuse(sse42)) { - softmax_kernel.reset(new jit_uni_softmax_kernel_f32()); + softmax_kernel.reset(new jit_uni_softmax_kernel_f32(jcp)); block_size = 4; } } -void SoftmaxGeneric::execute(const float *src_data, float *dst_data, int B, int C, int H, int W) { +template +void SoftmaxGeneric::calculate(const in_data_t *src_data, out_data_t *dst_data, int B, int C, int H, int W) { for (int b = 0; b < B; b++) { int tail_start = 0; if (softmax_kernel) { @@ -185,7 +238,8 @@ void SoftmaxGeneric::execute(const float *src_data, float *dst_data, int B, int arg.src = src_data + b * C * H * W + ib * block_size; arg.dst = dst_data + b * C * H * W + ib * block_size; - arg.stride = static_cast((size_t)(H) * W * sizeof(float)); + arg.src_stride = static_cast((size_t)(H) * W * sizeof(in_data_t)); + arg.dst_stride = static_cast((size_t)(H) * W * sizeof(out_data_t)); arg.work_amount = static_cast(C); (*softmax_kernel)(&arg); @@ -214,3 +268,31 @@ void SoftmaxGeneric::execute(const float *src_data, float *dst_data, int B, int }); } } + +void SoftmaxGeneric::execute(const uint8_t *src_data, uint8_t *dst_data, int B, int C, int H, int W) { + if (Precision::FP32 == input_prec) { + auto float_src_data = reinterpret_cast(src_data); + if (Precision::FP32 == output_prec) { + auto float_dst_data = reinterpret_cast(dst_data); + calculate(float_src_data, float_dst_data, B, C, H, W); + } else if (Precision::BF16 == output_prec) { + auto bf16_dst_data = reinterpret_cast(dst_data); + calculate(float_src_data, bf16_dst_data, B, C, H, W); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); + } + } else if (Precision::BF16 == input_prec) { + auto bf16_src_data = reinterpret_cast(src_data); + if (Precision::FP32 == output_prec) { + auto float_dst_data = reinterpret_cast(dst_data); + calculate(bf16_src_data, float_dst_data, B, C, H, W); + } else if (Precision::BF16 == output_prec) { + auto bf16_dst_data = reinterpret_cast(dst_data); + calculate(bf16_dst_data, bf16_dst_data, B, C, H, W); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); + } + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); + } +} diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/softmax.h b/inference-engine/src/mkldnn_plugin/nodes/common/softmax.h index 2849439c370a70..53046ed406ecfc 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/softmax.h +++ b/inference-engine/src/mkldnn_plugin/nodes/common/softmax.h @@ -6,6 +6,7 @@ #include #include +#include #include "defs.h" #include "ie_parallel.hpp" @@ -37,12 +38,16 @@ void softmax_many_batches(const float *src_data, float *dst_data, int B, int C, class SoftmaxGeneric { public: - SoftmaxGeneric(); + SoftmaxGeneric(InferenceEngine::Precision inpPrc, InferenceEngine::Precision outPrc); - void execute(const float *src_data, float *dst_data, int B, int C, int H, int W); + void execute(const uint8_t *src_data, uint8_t *dst_data, int B, int C, int H, int W); +private: + template + void calculate(const in_data_t* src_data, out_data_t* dst_data, int B, int C, int H, int W); private: int block_size; + InferenceEngine::Precision input_prec, output_prec; std::shared_ptr softmax_kernel; }; diff --git a/inference-engine/src/mkldnn_plugin/nodes/convert.cpp b/inference-engine/src/mkldnn_plugin/nodes/convert.cpp index eed226db4b211e..9e2cf81c6d4b8e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/convert.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/convert.cpp @@ -4,10 +4,8 @@ #include "base.hpp" -#include #include #include -#include "ie_parallel.hpp" #include "ie_precision.hpp" #include "common/cpu_convert.h" diff --git a/inference-engine/src/mkldnn_plugin/nodes/ctc_greedy.cpp b/inference-engine/src/mkldnn_plugin/nodes/ctc_greedy.cpp index 717af9f9e3ff17..87e688684a1d3f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/ctc_greedy.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/ctc_greedy.cpp @@ -20,8 +20,8 @@ class CTCGreedyDecoderImpl: public ExtLayerBase { THROW_IE_EXCEPTION << "Incorrect number of input/output edges!"; std::vector inps; - inps.resize(layer->insData.size(), DataConfigurator(ConfLayout::PLN)); - addConfig(layer, inps, {DataConfigurator(ConfLayout::PLN)}); + inps.resize(layer->insData.size(), DataConfigurator(ConfLayout::PLN, Precision::FP32)); + addConfig(layer, inps, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp b/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp index a1954db4378dcb..6ac058e22c977f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/ctc_loss.cpp @@ -27,13 +27,10 @@ class CTCLossImpl : public ExtLayerBase { auto logitsData = layer->insData[0].lock(); if (logitsData == nullptr) THROW_IE_EXCEPTION << _logPrefix << " has nullable logits data"; - auto logitsPrecision = logitsData->getTensorDesc().getPrecision(); - if (logitsPrecision == Precision::BF16) - logitsPrecision = Precision::FP32; LayerConfig config; config.inConfs.resize(layer->insData.size()); - config.inConfs[0].desc = TensorDesc(logitsPrecision, + config.inConfs[0].desc = TensorDesc(Precision::FP32, logitsData->getTensorDesc().getDims(), TensorDesc::getLayoutByDims(logitsData->getTensorDesc().getDims())); auto intPrecision = Precision::I32; @@ -48,7 +45,7 @@ class CTCLossImpl : public ExtLayerBase { DataConfig outConfig; auto& outDims = layer->outData[0]->getTensorDesc().getDims(); - outConfig.desc = TensorDesc(logitsPrecision, + outConfig.desc = TensorDesc(Precision::FP32, outDims, TensorDesc::getLayoutByDims(outDims)); config.outConfs.push_back(outConfig); diff --git a/inference-engine/src/mkldnn_plugin/nodes/detectionoutput.cpp b/inference-engine/src/mkldnn_plugin/nodes/detectionoutput.cpp index 140a56e5416eef..e96cf5ee32eaa4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/detectionoutput.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/detectionoutput.cpp @@ -112,8 +112,8 @@ class DetectionOutputImpl: public ExtLayerBase { _num_priors_actual = InferenceEngine::make_shared_blob({Precision::I32, num_priors_actual_size, C}); _num_priors_actual->allocate(); - std::vector in_data_conf(layer->insData.size(), DataConfigurator(ConfLayout::PLN)); - addConfig(layer, in_data_conf, {DataConfigurator(ConfLayout::PLN)}); + std::vector in_data_conf(layer->insData.size(), DataConfigurator(ConfLayout::PLN, Precision::FP32)); + addConfig(layer, in_data_conf, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/embedding_bag_sum.cpp b/inference-engine/src/mkldnn_plugin/nodes/embedding_bag_sum.cpp index 58f907f5835d76..dace4c5195c72f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/embedding_bag_sum.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/embedding_bag_sum.cpp @@ -4,7 +4,6 @@ #include "embedding_bag_sum.hpp" #include "ie_parallel.hpp" -#include "jit_generator.hpp" #include "list.hpp" #include diff --git a/inference-engine/src/mkldnn_plugin/nodes/fill.cpp b/inference-engine/src/mkldnn_plugin/nodes/fill.cpp index e08897184a1701..e3831b83ef1e36 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/fill.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/fill.cpp @@ -28,9 +28,6 @@ class FillImpl: public ExtLayerBase { if (fill_dims.size() > 1) THROW_IE_EXCEPTION << layer->name << " Fill dimensions vector should be 1 dimension"; - if (layer->insData[FILL_DIMS].lock()->getTensorDesc().getPrecision() != Precision::I32) - THROW_IE_EXCEPTION << layer->name << " Fill dimensions vector should be I32!"; - SizeVector value_dims = layer->insData[FILL_VALUE].lock()->getTensorDesc().getDims(); if (value_dims.size() > 1) THROW_IE_EXCEPTION << layer->name << " Value scalar should have 1 dimension"; @@ -39,12 +36,12 @@ class FillImpl: public ExtLayerBase { layer->outData[0]->getTensorDesc().getPrecision() == Precision::I32) && !(layer->insData[FILL_VALUE].lock()->getTensorDesc().getPrecision() == Precision::FP32 && layer->outData[0]->getTensorDesc().getPrecision() == Precision::FP32)) { - THROW_IE_EXCEPTION << layer->name << - " 'Value' input scalars and output tensor should have same precision and only FP32 and I32 are supported!"; + addConfig(layer, { DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::FP32) }, + { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); + } else { + addConfig(layer, { DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN) }, + { DataConfigurator(ConfLayout::PLN) }); } - - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/gather.cpp b/inference-engine/src/mkldnn_plugin/nodes/gather.cpp index 4cc1e6fb10ca1c..24af01f77546c3 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/gather.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/gather.cpp @@ -27,7 +27,7 @@ class GatherImpl: public ExtLayerBase { Precision inIdxPrecision = layer->insData[GATHER_INDEXES].lock()->getTensorDesc().getPrecision(); if (inIdxPrecision != Precision::FP32 && inIdxPrecision != Precision::I32 && inIdxPrecision != Precision::FP16) - THROW_IE_EXCEPTION << layer->name << " Incorrect input precision. Only FP32, FP16 or I32 are supported!"; + inIdxPrecision = Precision::I32; axis = layer->GetParamAsInt("axis"); @@ -52,7 +52,7 @@ class GatherImpl: public ExtLayerBase { LayerConfig config; DataConfig dataConfigIdx, dataConfigDct; - Precision dataPrecision = layer->outData[0]->getTensorDesc().getPrecision(); + Precision dataPrecision = layer->insData[GATHER_DICTIONARY].lock()->getTensorDesc().getPrecision(); dataConfigDct.desc = TensorDesc(dataPrecision, dictionary_dims, layer->insData[GATHER_DICTIONARY].lock()->getTensorDesc().getLayoutByDims(dictionary_dims)); config.inConfs.push_back(dataConfigDct); diff --git a/inference-engine/src/mkldnn_plugin/nodes/gather_tree.cpp b/inference-engine/src/mkldnn_plugin/nodes/gather_tree.cpp index 5e420b22ddd23a..7a0b527c18af16 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/gather_tree.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/gather_tree.cpp @@ -30,9 +30,8 @@ class GatherTreeImpl: public ExtLayerBase { THROW_IE_EXCEPTION << layer->name << " Incorrect number of output edges."; precision = layer->insData[GATHER_TREE_STEP_IDX].lock()->getTensorDesc().getPrecision(); - if (precision != Precision::FP32 && precision != Precision::I32) - THROW_IE_EXCEPTION << layer->name << " Incorrect data tensor precision. Only I32 or FP32 are supported."; + precision = Precision::FP32; if (layer->insData[GATHER_TREE_PARENT_IDX].lock()->getTensorDesc().getPrecision() != precision || layer->insData[GATHER_TREE_MAX_SEQ_LEN].lock()->getTensorDesc().getPrecision() != precision || @@ -49,9 +48,9 @@ class GatherTreeImpl: public ExtLayerBase { if (layer->insData[GATHER_TREE_END_TOKEN].lock()->getTensorDesc().getDims().size() != 1) THROW_IE_EXCEPTION << layer->name << " end_token should be 1 dimension"; - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + addConfig(layer, { DataConfigurator(ConfLayout::PLN, precision), DataConfigurator(ConfLayout::PLN, precision), + DataConfigurator(ConfLayout::PLN, precision), DataConfigurator(ConfLayout::PLN, precision) }, + { DataConfigurator(ConfLayout::PLN, precision) }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/grn.cpp b/inference-engine/src/mkldnn_plugin/nodes/grn.cpp index b5e4e214965ade..d412ab38554653 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/grn.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/grn.cpp @@ -22,7 +22,7 @@ class GRNImpl: public ExtLayerBase { bias = layer->GetParamAsFloat("bias"); - addConfig(layer, {{ConfLayout::PLN, false, 0}}, {{ConfLayout::PLN, false, 0}}); + addConfig(layer, {{ConfLayout::PLN, false, 0, Precision::FP32}}, {{ConfLayout::PLN, false, 0, Precision::FP32}}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/interp.cpp b/inference-engine/src/mkldnn_plugin/nodes/interp.cpp index 873575b8be4b96..6e2186899c3c33 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/interp.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/interp.cpp @@ -157,21 +157,13 @@ class InterpImpl: public ExtLayerBase { if (inData->getTensorDesc().getDims().size() != 4) THROW_IE_EXCEPTION << "Interp supports only 4d blobs!"; - auto src_precision = inData->getTensorDesc().getPrecision(); - if (src_precision != Precision::FP32 && src_precision != Precision::U8 && src_precision != Precision::BF16) - THROW_IE_EXCEPTION << layer->name << " Incorrect input data tensor precision. Only U8 or FP32 or BF16 are supported!"; - - auto dst_precision = layer->outData[0]->getTensorDesc().getPrecision(); - if (dst_precision != Precision::FP32 && dst_precision != Precision::BF16) - THROW_IE_EXCEPTION << layer->name << " Incorrect output data tensor precision. Only FP32 or BF16 are supported!"; - // We don't read other parameters since they are needed only for dst reshape in caffe pad_beg = layer->GetParamAsInt("pad_beg"); pad_end = layer->GetParamAsInt("pad_end"); align_corners = layer->GetParamAsBool("align_corners", true); ConfLayout blk_layout; - if (src_precision == Precision::U8) { + if (inData->getTensorDesc().getPrecision() == Precision::U8) { LayerConfig config; DataConfig dataConfigDct; dataConfigDct.desc = TensorDesc(Precision::U8, inData->getTensorDesc().getDims(), Layout::NCHW); @@ -197,15 +189,15 @@ class InterpImpl: public ExtLayerBase { if (mayiuse(avx512_common)) { blk_layout = ConfLayout::BLK16; interp_kernel.reset(new jit_uni_interp_kernel_f32()); - addConfig(layer, { DataConfigurator(blk_layout) }, { DataConfigurator(blk_layout) }); + addConfig(layer, { DataConfigurator(blk_layout, Precision::FP32) }, { DataConfigurator(blk_layout, Precision::FP32) }); } else if (mayiuse(avx2)) { blk_layout = ConfLayout::BLK8; interp_kernel.reset(new jit_uni_interp_kernel_f32()); - addConfig(layer, { DataConfigurator(blk_layout) }, { DataConfigurator(blk_layout) }); + addConfig(layer, { DataConfigurator(blk_layout, Precision::FP32) }, { DataConfigurator(blk_layout, Precision::FP32) }); } else { blk_layout = ConfLayout::BLK8; interp_kernel.reset(new jit_uni_interp_kernel_f32()); - addConfig(layer, { DataConfigurator(blk_layout) }, { DataConfigurator(blk_layout) }); + addConfig(layer, { DataConfigurator(blk_layout, Precision::FP32) }, { DataConfigurator(blk_layout, Precision::FP32) }); } } } catch (InferenceEngine::details::InferenceEngineException &ex) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp b/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp index 3c3a32e4862ce6..d95309afc4797f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/log_softmax.cpp @@ -51,7 +51,7 @@ class LogSoftmaxImpl: public ExtLayerBase { for (size_t i = (axis + 1); i < dims.size(); i++) reduced_axis_stride *= dims[i]; - addConfig(layer, { { ConfLayout::PLN, false, 0 } }, { { ConfLayout::PLN, false, 0 } }); + addConfig(layer, { { ConfLayout::PLN, false, 0, Precision::FP32 } }, { { ConfLayout::PLN, false, 0, Precision::FP32 } }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/math.cpp b/inference-engine/src/mkldnn_plugin/nodes/math.cpp index 5a63ffe7128bb9..26d5939b98631f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/math.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/math.cpp @@ -86,7 +86,7 @@ class MathImpl: public ExtLayerBase { else THROW_IE_EXCEPTION << layer->name << " Incorrect Math layer type!"; - addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, false, 0, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, false, 0, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp index 3b90a458ddb3af..d6dde5692f1516 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_crop_node.cpp @@ -58,13 +58,12 @@ void MKLDNNCropNode::initSupportedPrimitiveDescriptors() { return; InferenceEngine::Precision precision = getCnnLayer()->insData[0].lock()->getPrecision(); - if (precision != InferenceEngine::Precision::FP32) - precision = InferenceEngine::Precision::FP32; auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision); precision = getCnnLayer()->outData[0]->getPrecision(); - if (precision != InferenceEngine::Precision::FP32) - precision = InferenceEngine::Precision::FP32; auto outputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision); + if (inputDataType != outputDataType) { + outputDataType = inputDataType; // Crop doesn't convert precisions, only moves data + } auto& inDims = getParentEdgeAt(0)->getDims(); if (inDims.ndims() != 2 && inDims.ndims() != 4 && inDims.ndims() != 5) { @@ -125,19 +124,19 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { if (!MKLDNNMemory::IsPlainFormat(parentMem.GetFormat())) { m_block_size = parentMem.GetDescriptor().data.layout_desc.blocking.block_dims[1]; } - int m_inner_dim = dims[dims.size() - 1] * m_block_size; + const int m_inner_dim = dims[dims.size() - 1] * m_block_size; const memory &dst_d = getChildEdgeAt(0)->getMemory().GetPrimitive(); - int dst_ndims = dst_d.get_primitive_desc().desc().data.ndims; + const int dst_ndims = dst_d.get_primitive_desc().desc().data.ndims; // TODO: Rewrite it in general case. For every tensor // and rank, without using letter N,C,D,H,W - int OFFSET_N = (dst_ndims > 0) ? offsets[0] : 0; - int OFFSET_C = (dst_ndims > 1) ? offsets[1] : 0; - int OFFSET_D = (dst_ndims > 4) ? offsets[offsets.size() - 3] : 0; - int OFFSET_H = (dst_ndims > 2) ? offsets[offsets.size() - 2] : 0; - int OFFSET_W = (dst_ndims > 3) ? offsets[offsets.size() - 1] : 0; + const int OFFSET_N = (dst_ndims > 0) ? offsets[0] : 0; + const int OFFSET_C = (dst_ndims > 1) ? offsets[1] : 0; + const int OFFSET_D = (dst_ndims > 4) ? offsets[offsets.size() - 3] : 0; + const int OFFSET_H = (dst_ndims > 2) ? offsets[offsets.size() - 2] : 0; + const int OFFSET_W = (dst_ndims > 3) ? offsets[offsets.size() - 1] : 0; // TODO: Check applicability of dyn_batch_lim in early steps. // crop of batch dimension doesn't support dyn batch. @@ -155,42 +154,16 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { const int IH = (src_ndims > 2) ? src_dims[src_dims.size() - 2] : 1; const int IW = (src_ndims > 3) ? src_dims[src_dims.size() - 1] : 1; - const auto *src_data = reinterpret_cast(parentMem.GetData()) + - parentMem.GetDescriptor().data.layout_desc.blocking.offset_padding; - float *dst_data = reinterpret_cast(getChildEdgeAt(0)->getMemory().GetData()) + - getChildEdgeAt(0)->getMemory().GetDescriptor().data.layout_desc.blocking.offset_padding; + const uint8_t itemSize = MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(parentMem.GetDataType())); + + const auto *src_data = reinterpret_cast(parentMem.GetData()) + + itemSize * parentMem.GetDescriptor().data.layout_desc.blocking.offset_padding; + auto *dst_data = reinterpret_cast(getChildEdgeAt(0)->getMemory().GetData()) + + itemSize * getChildEdgeAt(0)->getMemory().GetDescriptor().data.layout_desc.blocking.offset_padding; -#ifdef _WIN32 - if (OD == 1 && OH == 1 && OW == 1 && ID == 1 && IH == 1 && IW == 1) { - for (int n = 0; n < ON; ++n) { - cpu_memcpy(&dst_data[n*OC], &src_data[(n+OFFSET_N)*IC + OFFSET_C], OC * sizeof(float)); - } - } else { - for (int n = 0; n < ON; ++n) { - for (int c = 0; c < OC; c += m_block_size) { - for (int d = 0; d < OD; ++d) { - for (int h = 0; h < OH; ++h) { - int dst_ind = - n*OC*OD*OH*OW + c*OD*OH*OW + d*OH*OW*m_block_size + - h*OW*m_block_size; - - int src_ind = - (n+OFFSET_N)*IC*ID*IH*IW + - (c+OFFSET_C)*ID*IH*IW + - (d+OFFSET_D)*IH*IW*m_block_size + - (h+OFFSET_H)*IW*m_block_size + - OFFSET_W*m_block_size; - - cpu_memcpy(dst_data + dst_ind, src_data + src_ind, m_inner_dim * sizeof(float)); - } - } - } - } - } -#else if (OD == 1 && OH == 1 && OW == 1 && ID == 1 && IH == 1 && IW == 1) { parallel_for(ON, [&](int n) { - cpu_memcpy(&dst_data[n*OC], &src_data[(n+OFFSET_N)*IC + OFFSET_C], OC * sizeof(float)); + cpu_memcpy(dst_data + itemSize * n * OC, src_data + itemSize *((n+OFFSET_N)*IC + OFFSET_C), OC * itemSize); }); } else { parallel_for2d(ON, (OC / m_block_size), [&](int n, int c) { @@ -201,7 +174,7 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { ((d+OFFSET_D)*IH*IW + OFFSET_H*IW + OFFSET_W)*m_block_size; for (int h = 0; h < OH; ++h) { - cpu_memcpy(dst_data + dst_ind, src_data + src_ind, m_inner_dim * sizeof(float)); + cpu_memcpy(dst_data + itemSize * dst_ind, src_data + itemSize * src_ind, m_inner_dim * itemSize); src_ind += IW * m_block_size; dst_ind += OW * m_block_size; @@ -209,7 +182,6 @@ void MKLDNNCropNode::execute(mkldnn::stream strm) { } }); } -#endif } bool MKLDNNCropNode::created() const { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp index 625a5b276541fe..2181c3f47167f6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp @@ -12,6 +12,7 @@ #include #include #include +#include "utils/bfloat16.hpp" #include #include "ie_parallel.hpp" #include @@ -31,6 +32,15 @@ using namespace Xbyak; #define GET_OFF(field) offsetof(jit_mvn_call_args, field) +// some utility functions +static inline bool isFloatCompatible(Precision prc) { + return Precision::FP32 == prc || Precision::BF16 == prc; +} + +static inline bool isFloatCompatible(memory::data_type type) { + return memory::f32 == type || memory::bf16 == type; +} + // normalize_variance = false : src->mean // normalize_variance = true : src+mean->variance:sqr(x-mean) template @@ -88,13 +98,13 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k load_vector(vmm_val, ptr[reg_src], jcp_.src_dt); if (jcp_.normalize_variance) { - if (jcp_.src_dt != memory::f32) + if (!isFloatCompatible(jcp_.src_dt)) uni_vcvtdq2ps(vmm_val, vmm_val); uni_vsubps(vmm_val, vmm_val, vmm_mean); uni_vfmadd231ps(vmm_variance, vmm_val, vmm_val); } else { - if (jcp_.src_dt != memory::f32) + if (!isFloatCompatible(jcp_.src_dt)) uni_vpaddd(vmm_sum, vmm_sum, vmm_val); else uni_vaddps(vmm_sum, vmm_sum, vmm_val); @@ -138,7 +148,7 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k uni_vmovups(ptr[reg_variance], vmm_variance); } else { - if (jcp_.src_dt != memory::f32) + if (!isFloatCompatible(jcp_.src_dt)) uni_vcvtdq2ps(vmm_sum, vmm_sum); if (!jcp_.planar_layout && !jcp_.across_channels) { @@ -199,6 +209,10 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k case memory::u8: uni_vpmovzxbd(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; default: assert(!"unknown dst_dt"); } @@ -348,11 +362,15 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator case memory::u8: uni_vpmovzxbd(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; default: assert(!"unknown dst_dt"); } - if (src_dt != memory::f32) + if (!isFloatCompatible(src_dt)) uni_vcvtdq2ps(vmm_src, vmm_src); } @@ -362,6 +380,9 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator if (dst_dt == memory::f32) { uni_vmovups(op, vmm_dst); + } else if (dst_dt == memory::bf16) { + vcvtneps2bf16(ymm_dst, vmm_dst); + uni_vmovups(op, ymm_dst); } else if (dst_dt == memory::u8) { uni_vcvtps2dq(vmm_dst, vmm_dst); if (isa == cpu::avx512_common) { @@ -413,7 +434,7 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator depthwise_inj_idx++; } else if (post_op.is_quantization()) { bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize; - bool do_rounding = do_dequantization || dst_dt == memory::f32 || i != p.len_ - 1; + bool do_rounding = do_dequantization || isFloatCompatible(dst_dt) || i != p.len_ - 1; int s_idx = vmm_val.getIdx(); quantization_injectors[quantization_inj_idx]->init_crop_ptrs(reg_oc_off); @@ -475,8 +496,17 @@ void MKLDNNMVNNode::initSupportedPrimitiveDescriptors() { if (getParentEdgeAt(0)->getDims().ndims() < 4 || getParentEdgeAt(0)->getDims().ndims() > 5 || across_channels != 0 || normalize_variance != 1) { - inputPrecision = Precision::FP32; - outputPrecision = Precision::FP32; + if (!isFloatCompatible(inputPrecision)) { + inputPrecision = Precision::FP32; + } + if (!isFloatCompatible(outputPrecision)) { + outputPrecision = Precision::FP32; + } + } + + if (!mayiuse(avx512_core_bf16)) { + if (outputPrecision == Precision::BF16) + outputPrecision = Precision::FP32; } auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(inputPrecision); @@ -498,39 +528,50 @@ void MKLDNNMVNNode::initSupportedPrimitiveDescriptors() { config.inConfs[0].inPlace = -1; config.outConfs[0].inPlace = canBeInplace ? 0 : -1; - auto pushDesc = [&](memory::format format) { + auto pushDesc = [&](memory::format format, impl_desc_type impl_type) { config.inConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(0)->getDims(), inputDataType, format); config.outConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(0)->getDims(), outputDataType, format); - supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown, format}); + supportedPrimitiveDescriptors.push_back({config, impl_type, format}); }; + impl_desc_type impl_type; + if (mayiuse(cpu::avx512_common)) { + impl_type = impl_desc_type::jit_avx512; + } else if (mayiuse(cpu::avx2)) { + impl_type = impl_desc_type::jit_avx2; + } else if (mayiuse(cpu::sse42)) { + impl_type = impl_desc_type::jit_sse42; + } else { + impl_type = impl_desc_type::ref; + } + if (across_channels == 0 && normalize_variance == 1) { if (getParentEdgeAt(0)->getDims().ndims() == 4) { - pushDesc(memory::nhwc); + pushDesc(memory::nhwc, impl_type); } else if (getParentEdgeAt(0)->getDims().ndims() == 5) { - pushDesc(memory::ndhwc); + pushDesc(memory::ndhwc, impl_type); } } - if (inputPrecision == Precision::FP32 && outputPrecision == Precision::FP32) { - if (getParentEdgeAt(0)->getDims().ndims() == 4) { - if (mayiuse(cpu::avx512_common)) { - pushDesc(memory::nChw16c); - } else if (mayiuse(cpu::avx2) || mayiuse(cpu::sse42)) { - pushDesc(memory::nChw8c); + if (isFloatCompatible(inputPrecision) && isFloatCompatible(outputPrecision)) { + if (impl_desc_type::jit_avx512 == impl_type) { + if (getParentEdgeAt(0)->getDims().ndims() == 4) { + pushDesc(memory::nChw16c, impl_type); + } else if (getParentEdgeAt(0)->getDims().ndims() == 5) { + pushDesc(memory::nCdhw16c, impl_type); } - } else if (getParentEdgeAt(0)->getDims().ndims() == 5) { - if (mayiuse(cpu::avx512_common)) { - pushDesc(memory::nCdhw16c); - } else if (mayiuse(cpu::avx2) || mayiuse(cpu::sse42)) { - pushDesc(memory::nCdhw8c); + } else if (impl_desc_type::jit_avx2 == impl_type || impl_desc_type::jit_sse42 == impl_type) { + if (getParentEdgeAt(0)->getDims().ndims() == 4) { + pushDesc(memory::nChw8c, impl_type); + } else if (getParentEdgeAt(0)->getDims().ndims() == 5) { + pushDesc(memory::nCdhw8c, impl_type); } } if (fusedWith.empty()) { if (canBeInplace) config.inConfs[0].inPlace = 0; - pushDesc(MKLDNNMemory::GetPlainFormat(getChildEdgeAt(0)->getDims())); + pushDesc(MKLDNNMemory::GetPlainFormat(getChildEdgeAt(0)->getDims()), impl_type); } } } @@ -614,11 +655,32 @@ void MKLDNNMVNNode::execute(mkldnn::stream strm) { Layout layout = getParentEdgeAt(0)->getDesc().getLayout(); - auto src_data = reinterpret_cast(srcMemPtr->GetData()); - auto dst_data = reinterpret_cast(dstMemPtr->GetData()); - if (layout == C || layout == NC || layout == CHW || layout == NCHW || layout == NCDHW) { - mvn_pln(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + if (input_prec == Precision::FP32) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + if (output_prec == Precision::FP32) { + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + mvn_pln(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (output_prec == Precision::BF16) { + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + mvn_pln(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); + } + } else if (input_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + if (output_prec == Precision::FP32) { + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + mvn_pln(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (output_prec == Precision::BF16) { + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + mvn_pln(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); + } + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); + } } else { if (output_prec == Precision::U8) { auto dst_data = reinterpret_cast(dstMemPtr->GetData()); @@ -631,6 +693,11 @@ void MKLDNNMVNNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (input_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } } else if (output_prec == Precision::I8) { auto dst_data = reinterpret_cast(dstMemPtr->GetData()); @@ -643,6 +710,11 @@ void MKLDNNMVNNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (input_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } } else if (output_prec == Precision::FP32) { auto dst_data = reinterpret_cast(dstMemPtr->GetData()); @@ -655,7 +727,31 @@ void MKLDNNMVNNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (input_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } + } else if (output_prec == Precision::BF16) { + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + if (input_prec == Precision::U8) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (input_prec == Precision::I8) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (input_prec == Precision::FP32) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else if (input_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + mvn_blk(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims()); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); + } + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); } } } @@ -673,7 +769,8 @@ std::tuple MKLDNNMVNNode::get5dShapes(co return shapes; } -void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVector& dims) { +template +void MKLDNNMVNNode::mvn_pln(const in_data_t* src_data, out_data_t* dst_data, const SizeVector& dims) { size_t blk_size = 1; // blk size in vmm if (mayiuse(cpu::avx512_common)) { blk_size = 16; @@ -705,7 +802,7 @@ void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVe auto arg = jit_mvn_call_args(); arg.src = src_data + cc; arg.sum = static_cast(&mean_internal); - arg.src_stride = static_cast(blk_size * sizeof(float)); + arg.src_stride = static_cast(blk_size * sizeof(in_data_t)); arg.work_amount = static_cast(C2 / blk_size); (*mvn_mean_kernel)(&arg); for (size_t tail = tail_across_channels; tail < C2; tail++) { @@ -737,7 +834,7 @@ void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVe arg.src = src_data + cc; arg.mean = static_cast(&mean); arg.variance = static_cast(&variance_internal); - arg.src_stride = static_cast(blk_size * sizeof(float)); + arg.src_stride = static_cast(blk_size * sizeof(in_data_t)); arg.work_amount = static_cast(C2 / blk_size); (*mvn_variance_kernel)(&arg); @@ -766,8 +863,8 @@ void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVe arg.dst = dst_data + cc; arg.mean = static_cast(&mean); arg.variance = static_cast(&variance); - arg.src_stride = static_cast(blk_size * sizeof(float)); - arg.dst_stride = static_cast(blk_size * sizeof(float)); + arg.src_stride = static_cast(blk_size * sizeof(in_data_t)); + arg.dst_stride = static_cast(blk_size * sizeof(out_data_t)); arg.work_amount = static_cast(C2 / blk_size); (*mvn_kernel)(&arg); @@ -792,8 +889,8 @@ void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVe arg.src = src_data + cc; arg.dst = dst_data + cc; arg.mean = static_cast(&mean); - arg.src_stride = static_cast(blk_size * sizeof(float)); - arg.dst_stride = static_cast(blk_size * sizeof(float)); + arg.src_stride = static_cast(blk_size * sizeof(in_data_t)); + arg.dst_stride = static_cast(blk_size * sizeof(out_data_t)); arg.work_amount = static_cast(C2 / blk_size); (*mvn_kernel)(&arg); @@ -823,8 +920,8 @@ void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVe arg.src = src_data + cc; arg.dst = dst_data + cc; arg.sum = static_cast(&mean); - arg.src_stride = static_cast(blk_size * sizeof(float)); - arg.dst_stride = static_cast(blk_size * sizeof(float)); + arg.src_stride = static_cast(blk_size * sizeof(in_data_t)); + arg.dst_stride = static_cast(blk_size * sizeof(out_data_t)); arg.work_amount = static_cast(C2 / blk_size); (*mvn_mean_kernel)(&arg); @@ -1227,7 +1324,7 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con } else if (post_op.is_quantization()) { bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize; - bool do_rounding = do_dequantization || output_prec == Precision::FP32 || + bool do_rounding = do_dequantization || isFloatCompatible(output_prec) || i != p.len_ - 1; auto quant = post_op.quantization; @@ -1251,7 +1348,7 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con } } } - if (output_prec == Precision::FP32) { + if (isFloatCompatible(output_prec)) { dst_data[ch + w * src_stride] = dst_value; } else if (output_prec == Precision::U8) { dst_data[ch + w * src_stride] = (dst_value >= 0) ? lroundf(dst_value) : 0; @@ -1300,7 +1397,7 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con size_t ch = cd + h * C0; for (size_t w = 0lu; w < W; w++) { float dst_value = src_data[ch + w * src_stride] - mean_buffer_ptr[c]; - if (output_prec == Precision::FP32) { + if (isFloatCompatible(output_prec)) { dst_data[ch + w * src_stride] = dst_value; } else if (output_prec == Precision::U8) { dst_data[ch + w * src_stride] = (dst_value >= 0) ? lroundf(dst_value) : 0; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h index 3919b94817b8ef..97203d9b22e513 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h @@ -81,7 +81,8 @@ class MKLDNNMVNNode : public MKLDNNNode { } private: - void mvn_pln(const float* src_data, float* dst_data, const InferenceEngine::SizeVector& dims); + template + void mvn_pln(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims); template void mvn_blk(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp index 114579e3fb80e7..6ce21a3911723c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp @@ -5,6 +5,7 @@ #include "mkldnn_quantize_node.h" #include "mkldnn_eltwise_node.h" #include +#include "utils/bfloat16.hpp" #include #include "ie_parallel.hpp" #include "jit_uni_eltwise.hpp" @@ -24,6 +25,10 @@ using namespace Xbyak; #define GET_OFF(field) offsetof(jit_normalize_call_args, field) +static inline bool isFloatCompatible(memory::data_type type) { + return memory::f32 == type || memory::bf16 == type; +} + template struct jit_uni_normalize_modulo_kernel_f32 : public jit_uni_normalize_modulo_kernel, public jit_generator { DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_normalize_modulo_kernel_f32) @@ -119,6 +124,10 @@ struct jit_uni_normalize_modulo_kernel_f32 : public jit_uni_normalize_modulo_ker case memory::s32: uni_vmovups(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; case memory::s8: uni_vpmovsxbd(vmm_src, op); break; @@ -128,8 +137,7 @@ struct jit_uni_normalize_modulo_kernel_f32 : public jit_uni_normalize_modulo_ker default: assert(!"unknown dst_dt"); } - - if (src_dt != memory::f32) + if (!isFloatCompatible(src_dt)) uni_vcvtdq2ps(vmm_src, vmm_src); } }; @@ -239,7 +247,7 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji Xbyak::Label tail_loop_label; Xbyak::Label tail_loop_end_label; - int step = vlen / sizeof(float); + int step = jcp_.src_dt == memory::bf16 ? 16 : (vlen / sizeof(float)); L(main_loop_label); { cmp(reg_work_amount, step); @@ -322,7 +330,7 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji Xbyak::Label tail_loop_label; Xbyak::Label tail_loop_end_label; - int step = vlen / sizeof(float); + int step = jcp_.src_dt == memory::bf16 ? 16 : (vlen / sizeof(float)); L(main_loop_label); { cmp(reg_work_amount, step); @@ -520,6 +528,10 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji case memory::s32: uni_vmovups(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; case memory::s8: uni_vpmovsxbd(vmm_src, op); break; @@ -529,8 +541,7 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji default: assert(!"unknown dst_dt"); } - - if (src_dt != memory::f32) + if (!isFloatCompatible(src_dt)) uni_vcvtdq2ps(vmm_src, vmm_src); } @@ -540,6 +551,10 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji case memory::s32: movss(xmm_src, op); break; + case memory::bf16: + pinsrw(xmm_src, op, 0x0); + uni_vpslld(xmm_src, xmm_src, 16); + break; case memory::s8: movsx(reg_tmp_32, op); movq(xmm_src, reg_tmp_64); @@ -552,7 +567,7 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji assert(!"unknown dst_dt"); } - if (src_dt != data_type::f32) { + if (!isFloatCompatible(src_dt)) { uni_vcvtdq2ps(xmm_src, xmm_src); } } @@ -563,6 +578,9 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji if (dst_dt == memory::f32) { uni_vmovups(op, vmm_dst); + } else if (dst_dt == memory::bf16) { + vcvtneps2bf16(ymm_dst, vmm_dst); + vmovdqu16(op, ymm_dst); } else if (dst_dt == memory::u8) { uni_vcvtps2dq(vmm_dst, vmm_dst); if (isa == cpu::avx512_common) { @@ -596,7 +614,7 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji } inline void store_scalar(const Xbyak::Address &op, Xmm xmm_dst, memory::data_type dst_dt) { - if (dst_dt != data_type::f32) { + if (!isFloatCompatible(dst_dt)) { uni_vcvtps2dq(xmm_dst, xmm_dst); } @@ -605,6 +623,10 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji case memory::s32: movss(op, xmm_dst); break; + case memory::bf16: + uni_vpsrld(xmm_dst, xmm_dst, 16); + pextrw(op, xmm_dst, 0x0); + break; case memory::s8: uni_vpackssdw(xmm_dst, xmm_dst, xmm_dst); uni_vpacksswb(xmm_dst, xmm_dst, xmm_dst); @@ -653,7 +675,7 @@ struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public ji || quantization_injectors[quantization_inj_idx] == nullptr) assert(!"Invalid quantization injectors."); bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize; - bool do_rounding = do_dequantization || dst_dt == memory::f32 || i != p.len_ - 1; + bool do_rounding = do_dequantization || isFloatCompatible(dst_dt) || i != p.len_ - 1; int s_idx = vmm_val.getIdx(); @@ -747,9 +769,7 @@ void MKLDNNNormalizeNode::initSupportedPrimitiveDescriptors() { setPostOps(attr, true); Precision inputPrecision = getCnnLayer()->insData[0].lock()->getPrecision(); - inputPrecision = inputPrecision == Precision::BF16 ? Precision(Precision::FP32) : inputPrecision; Precision outputPrecision = getCnnLayer()->outData[0]->getPrecision(); - outputPrecision = outputPrecision == Precision::BF16 ? Precision(Precision::FP32) : outputPrecision; if (!fusedWith.empty()) { auto lastFusedLayer = fusedWith[fusedWith.size() - 1].get()->getCnnLayer(); @@ -758,6 +778,13 @@ void MKLDNNNormalizeNode::initSupportedPrimitiveDescriptors() { } } + if (inputPrecision == Precision::BF16 || outputPrecision == Precision::BF16) { + if (!mayiuse(avx512_core_bf16)) + inputPrecision = outputPrecision = Precision::FP32; + else + inputPrecision = outputPrecision = Precision::BF16; + } + auto isOneOf = [&](InferenceEngine::Precision precision, std::vector precisions) { for (auto p : precisions) { if (precision == p) { @@ -766,10 +793,10 @@ void MKLDNNNormalizeNode::initSupportedPrimitiveDescriptors() { } return false; }; - if (!isOneOf(inputPrecision, {Precision::FP32, Precision::I8, Precision::U8})) { + if (!isOneOf(inputPrecision, {Precision::FP32, Precision::BF16, Precision::I8, Precision::U8})) { THROW_IE_EXCEPTION << "Unsupported input precision. " << getName(); } - if (!isOneOf(outputPrecision, {Precision::FP32, Precision::I8, Precision::U8})) { + if (!isOneOf(outputPrecision, {Precision::FP32, Precision::BF16, Precision::I8, Precision::U8})) { THROW_IE_EXCEPTION << "Unsupported output precision. " << getName(); } if (!isOneOf(weights_prec, {Precision::FP32, Precision::BF16})) { @@ -918,6 +945,8 @@ void MKLDNNNormalizeNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(src_ptr); normalize_function(src_data, dst_data, dims); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } } else if (output_prec == Precision::I8) { auto dst_data = reinterpret_cast(dst_ptr); @@ -930,6 +959,8 @@ void MKLDNNNormalizeNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(src_ptr); normalize_function(src_data, dst_data, dims); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } } else if (output_prec == Precision::FP32) { auto dst_data = reinterpret_cast(dst_ptr); @@ -942,7 +973,15 @@ void MKLDNNNormalizeNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(src_ptr); normalize_function(src_data, dst_data, dims); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } + } else if (output_prec == Precision::BF16) { + auto dst_data = reinterpret_cast(dst_ptr); + auto src_data = reinterpret_cast(src_ptr); + normalize_function(src_data, dst_data, dims); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); } } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp index 6c597e309e06ed..30cc84ae586a5b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp @@ -91,13 +91,7 @@ void MKLDNNPoolingNode::getSupportedDescriptors() { MKLDNNMemoryDesc in_candidate{parentDims, inputDataType, parentDims.ndims() == 5 ? memory::format::ndhwc : memory::format::nhwc}; MKLDNNMemoryDesc out_candidate{childDims, outputDataType, parentDims.ndims() == 5 ? memory::format::ndhwc : memory::format::nhwc}; createDescriptor({ in_candidate }, { out_candidate }); - } else if ((parentDims.ndims() == 4 || parentDims.ndims() == 5) && (inputDataType == memory::bf16 || outputDataType == memory::bf16)) { - MKLDNNMemoryDesc in_candidate{ parentDims, memory::bf16, parentDims.ndims() == 5 ? memory::format::nCdhw16c : memory::format::nChw16c}; - MKLDNNMemoryDesc out_candidate{ childDims, memory::bf16, parentDims.ndims() == 5 ? memory::format::nCdhw16c : memory::format::nChw16c}; - createDescriptor({ in_candidate }, { out_candidate }); } else if ((parentDims.ndims() == 4 || parentDims.ndims() == 5) && parentDims[1] == 1) { - inputDataType = memory::f32; - outputDataType = memory::f32; // WA. We should force planar layout since it provides better performance MKLDNNMemoryDesc in_candidate{parentDims, inputDataType, parentDims.ndims() == 5 ? memory::format::ncdhw : memory::format::nchw}; MKLDNNMemoryDesc out_candidate{childDims, outputDataType, parentDims.ndims() == 5 ? memory::format::ncdhw : memory::format::nchw}; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp index d43b347e7e69d2..81c11c330b955e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp @@ -12,6 +12,7 @@ #include #include #include +#include "utils/bfloat16.hpp" #include "ie_parallel.hpp" #include @@ -64,6 +65,11 @@ using namespace Xbyak; #define GET_PTR_NCD_BASE_PTR_N_BLK const uint8_t *in_ptr_ncd = in_ptr_n + src_data_size * (icb * ID + id) * IH * IW * blk_size; \ uint8_t *out_ptr_ncd = out_ptr_n + dst_data_size * (ocb * OD + od) * OH * OW * blk_size; +// some utility functions +static inline bool isFloatCompatible(memory::data_type type) { + return memory::f32 == type || memory::bf16 == type; +} + template struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_generator { DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_reduce_kernel_f32) @@ -278,13 +284,13 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene uni_vpxor(vmm_dst, vmm_dst, vmm_dst); break; case Reduce::Max: - if (jcp_.dst_dt == memory::f32) + if (isFloatCompatible(jcp_.dst_dt)) uni_vmovups(vmm_dst, table_val(2)); else uni_vmovups(vmm_dst, table_val(4)); break; case Reduce::Min: - if (jcp_.dst_dt == memory::f32) + if (isFloatCompatible(jcp_.dst_dt)) uni_vmovups(vmm_dst, table_val(3)); else uni_vmovups(vmm_dst, table_val(5)); @@ -540,6 +546,10 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene case memory::s32: uni_vmovups(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; case memory::s8: uni_vpmovsxbd(vmm_src, op); break; @@ -550,7 +560,7 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene assert(!"unknown src_dt"); } - if (src_dt != memory::f32) + if (!isFloatCompatible(src_dt)) uni_vcvtdq2ps(vmm_src, vmm_src); } @@ -560,6 +570,10 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene case memory::s32: movss(xmm_src, op); break; + case memory::bf16: + pinsrw(xmm_src, op, 0x0); + uni_vpslld(xmm_src, xmm_src, 16); + break; case memory::s8: movsx(reg_tmp_32, op); movq(xmm_src, reg_tmp_64); @@ -572,7 +586,7 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene assert(!"unknown src_dt"); } - if (src_dt != data_type::f32) { + if (!isFloatCompatible(src_dt)) { uni_vcvtdq2ps(xmm_src, xmm_src); } } @@ -581,7 +595,7 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene Xmm xmm_dst = Xmm(vmm_dst.getIdx()); Ymm ymm_dst = Ymm(vmm_dst.getIdx()); - if (dst_dt != memory::f32) { + if (!isFloatCompatible(dst_dt)) { uni_vcvtps2dq(vmm_dst, vmm_dst); } @@ -590,6 +604,10 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene case memory::s32: uni_vmovups(op, vmm_dst); break; + case memory::bf16: + vcvtneps2bf16(ymm_dst, vmm_dst); + uni_vmovups(op, ymm_dst); + break; case memory::s8: if (isa == avx512_common) { vmaxps(vmm_dst, vmm_zero, vmm_dst); @@ -625,7 +643,7 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene } inline void store_scalar(const Xbyak::Address &op, Xmm xmm_dst, memory::data_type dst_dt) { - if (dst_dt != memory::f32) { + if (!isFloatCompatible(dst_dt)) { uni_vcvtps2dq(xmm_dst, xmm_dst); } @@ -634,6 +652,10 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene case memory::s32: movss(op, xmm_dst); break; + case memory::bf16: + uni_vpsrld(xmm_dst, xmm_dst, 16); + pextrw(op, xmm_dst, 0x0); + break; case memory::s8: uni_vpackssdw(xmm_dst, xmm_dst, xmm_dst); uni_vpacksswb(xmm_dst, xmm_dst, xmm_dst); @@ -680,9 +702,10 @@ struct jit_uni_reduce_kernel_f32 : public jit_uni_reduce_kernel, public jit_gene horiz_ps(xmm_dst, xmm_aux3); // dst:f(1,2,3,4),... switch (dst_dt) { case memory::f32: - movss(xmm_aux3, ptr[reg_dst]); + case memory::bf16: + load_scalar(xmm_aux3, ptr[reg_dst], dst_dt); horiz_ps(xmm_dst, xmm_aux3); - movss(ptr[reg_dst], xmm_dst); + store_scalar(ptr[reg_dst], xmm_dst, dst_dt); break; case memory::s32: movss(xmm_aux3, ptr[reg_dst]); @@ -981,6 +1004,10 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi case memory::s32: uni_vmovups(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; case memory::s8: uni_vpmovsxbd(vmm_src, op); break; @@ -991,7 +1018,7 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi assert(!"unknown src_dt"); } - if (src_dt != memory::f32) + if (!isFloatCompatible(src_dt)) uni_vcvtdq2ps(vmm_src, vmm_src); } @@ -1001,6 +1028,10 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi case memory::s32: movss(xmm_src, op); break; + case memory::bf16: + pinsrw(xmm_src, op, 0x0); + uni_vpslld(xmm_src, xmm_src, 16); + break; case memory::s8: movsx(reg_tmp_32, op); movq(xmm_src, reg_tmp_64); @@ -1013,7 +1044,7 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi assert(!"unknown src_dt"); } - if (src_dt != data_type::f32) { + if (!isFloatCompatible(src_dt)) { uni_vcvtdq2ps(xmm_src, xmm_src); } } @@ -1022,7 +1053,7 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi Xmm xmm_dst = Xmm(vmm_dst.getIdx()); Ymm ymm_dst = Ymm(vmm_dst.getIdx()); - if (dst_dt != memory::f32) { + if (!isFloatCompatible(dst_dt)) { uni_vcvtps2dq(vmm_dst, vmm_dst); } @@ -1031,6 +1062,10 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi case memory::s32: uni_vmovups(op, vmm_dst); break; + case memory::bf16: + vcvtneps2bf16(ymm_dst, vmm_dst); + uni_vmovups(op, ymm_dst); + break; case memory::s8: if (isa == avx512_common) { vmaxps(vmm_dst, vmm_zero, vmm_dst); @@ -1066,7 +1101,7 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi } inline void store_scalar(const Xbyak::Address &op, Xmm xmm_dst, memory::data_type dst_dt) { - if (dst_dt != memory::f32) { + if (!isFloatCompatible(dst_dt)) { uni_vcvtps2dq(xmm_dst, xmm_dst); } @@ -1075,6 +1110,10 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi case memory::s32: movss(op, xmm_dst); break; + case memory::bf16: + uni_vpsrld(xmm_dst, xmm_dst, 16); + pextrw(op, xmm_dst, 0x0); + break; case memory::s8: uni_vpackssdw(xmm_dst, xmm_dst, xmm_dst); uni_vpacksswb(xmm_dst, xmm_dst, xmm_dst); @@ -1123,6 +1162,10 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi case memory::f32: movss(ptr[reg_dst], xmm_dst); break; + case memory::bf16: + uni_vpsrld(xmm_dst, xmm_dst, 16); + pextrw(ptr[reg_dst], xmm_dst, 0x0); + break; case memory::s32: uni_vcvtps2dq(xmm_dst, xmm_dst); movss(ptr[reg_dst], xmm_dst); @@ -1173,9 +1216,10 @@ struct jit_uni_reduce_post_kernel_f32 : public jit_uni_reduce_post_kernel, publi horiz_ps(xmm_dst, xmm_aux3); // dst:f(1,2,3,4),... switch (dst_dt) { case memory::f32: - movss(xmm_aux3, ptr[reg_dst]); + case memory::bf16: + load_scalar(xmm_aux3, ptr[reg_dst], dst_dt); horiz_ps(xmm_dst, xmm_aux3); - movss(ptr[reg_dst], xmm_dst); + store_scalar(ptr[reg_dst], xmm_dst, dst_dt); break; case memory::s32: movss(xmm_aux3, ptr[reg_dst]); @@ -1292,11 +1336,33 @@ void MKLDNNReduceNode::initSupportedPrimitiveDescriptors() { if (!supportedPrimitiveDescriptors.empty()) return; + static const Precision supportedPrecisions[] = { + Precision::FP32, + Precision::BF16, + Precision::I32, + Precision::I8, + Precision::U8 + }; + Precision inputPrecision = getCnnLayer()->insData[REDUCE_DATA].lock()->getPrecision(); Precision outputPrecision = getCnnLayer()->outData[0]->getPrecision(); - if (inputPrecision == Precision::BF16) inputPrecision = Precision::FP32; - if (outputPrecision == Precision::BF16) outputPrecision = Precision::FP32; + jit_mode = (mayiuse(cpu::sse42)) && getParentEdgeAt(REDUCE_DATA)->getDims().ndims() <= 5 && + std::find(std::begin(supportedPrecisions), std::end(supportedPrecisions), inputPrecision) != std::end(supportedPrecisions) && + std::find(std::begin(supportedPrecisions), std::end(supportedPrecisions), outputPrecision) != std::end(supportedPrecisions); + + if (jit_mode) { + // Since in jit mode we use the output memory as an intermediate accumulator for certain reduce modes, we can't use BF16 output precision due to + // the possible accuracy loss. Therefore, for such mods, we will change the output precision to FP32. + if (Precision::BF16 == outputPrecision) { + if (!mayiuse(avx512_core_bf16)) { + outputPrecision = Precision::FP32; + } else if (reduceMode != Reduce::And && reduceMode != Reduce::Or && + reduceMode != Reduce::Max && reduceMode != Reduce::Min) { + outputPrecision = Precision::FP32; + } + } + } auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(inputPrecision); auto outputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(outputPrecision); @@ -1317,37 +1383,42 @@ void MKLDNNReduceNode::initSupportedPrimitiveDescriptors() { config.inConfs[REDUCE_INDEXES].inPlace = -1; config.outConfs[0].inPlace = -1; - auto pushDesc = [&](memory::format inFormat, memory::format outFormat, memory::data_type inDataType, memory::data_type outDataType) { + auto pushDesc = [&](memory::format inFormat, memory::format outFormat, memory::data_type inDataType, + memory::data_type outDataType, impl_desc_type impl_type) { config.inConfs[REDUCE_DATA].desc = MKLDNNMemoryDesc(getParentEdgeAt(REDUCE_DATA)->getDims(), inDataType, inFormat); config.inConfs[REDUCE_INDEXES].desc = MKLDNNMemoryDesc(getParentEdgeAt(REDUCE_INDEXES)->getDims(), memory::s32, memory::x); config.outConfs[0].desc = MKLDNNMemoryDesc(getChildEdgeAt(0)->getDims(), outDataType, outFormat); - supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown, outFormat}); + supportedPrimitiveDescriptors.push_back({config, impl_type, outFormat}); }; - jit_mode = (mayiuse(cpu::sse42)) && getParentEdgeAt(REDUCE_DATA)->getDims().ndims() <= 5 && - (inputPrecision == Precision::FP32 || inputPrecision == Precision::I32 || inputPrecision == Precision::U8 || inputPrecision == Precision::I8) && - (outputPrecision == Precision::FP32 || outputPrecision == Precision::I32 || outputPrecision == Precision::U8 || outputPrecision == Precision::I8); if (jit_mode) { + impl_desc_type impl_type = impl_desc_type::jit_sse42; + if (mayiuse(cpu::avx512_common)) { + impl_type = impl_desc_type::jit_avx512; + } else if (mayiuse(cpu::avx2)) { + impl_type = impl_desc_type::jit_avx2; + } + pushDesc(MKLDNNMemory::GetPlainFormat(memory::dims(getParentEdgeAt(REDUCE_DATA)->getDims().ndims())), - MKLDNNMemory::GetPlainFormat(memory::dims(getChildEdgeAt(0)->getDims().ndims())), inputDataType, outputDataType); + MKLDNNMemory::GetPlainFormat(memory::dims(getChildEdgeAt(0)->getDims().ndims())), inputDataType, outputDataType, impl_type); if (keep_dims) { if (getParentEdgeAt(REDUCE_DATA)->getDims().ndims() == 4 && getParentEdgeAt(REDUCE_DATA)->getDims().ToSizeVector()[1] > 1) { if (mayiuse(cpu::avx512_common)) { - pushDesc(memory::nChw16c, memory::nChw16c, inputDataType, outputDataType); + pushDesc(memory::nChw16c, memory::nChw16c, inputDataType, outputDataType, impl_type); } else if (mayiuse(cpu::avx2) || mayiuse(cpu::sse42)) { - pushDesc(memory::nChw8c, memory::nChw8c, inputDataType, outputDataType); + pushDesc(memory::nChw8c, memory::nChw8c, inputDataType, outputDataType, impl_type); } } else if (getParentEdgeAt(REDUCE_DATA)->getDims().ndims() == 5 && getParentEdgeAt(REDUCE_DATA)->getDims().ToSizeVector()[1] > 1) { if (mayiuse(cpu::avx512_common)) { - pushDesc(memory::nCdhw16c, memory::nCdhw16c, inputDataType, outputDataType); + pushDesc(memory::nCdhw16c, memory::nCdhw16c, inputDataType, outputDataType, impl_type); } else if (mayiuse(cpu::avx2) || mayiuse(cpu::sse42)) { - pushDesc(memory::nCdhw8c, memory::nCdhw8c, inputDataType, outputDataType); + pushDesc(memory::nCdhw8c, memory::nCdhw8c, inputDataType, outputDataType, impl_type); } } } } else { pushDesc(MKLDNNMemory::GetPlainFormat(memory::dims(getParentEdgeAt(REDUCE_DATA)->getDims().ndims())), - MKLDNNMemory::GetPlainFormat(memory::dims(getChildEdgeAt(0)->getDims().ndims())), memory::f32, memory::f32); + MKLDNNMemory::GetPlainFormat(memory::dims(getChildEdgeAt(0)->getDims().ndims())), memory::f32, memory::f32, impl_desc_type::ref); } } @@ -1714,6 +1785,9 @@ inline void MKLDNNReduceNode::init_dst_data(uint8_t *out_ptr, size_t dst_size) { } else if (output_prec == Precision::I32) { auto out_p = reinterpret_cast(out_ptr); parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = static_cast(1); }); + } else if (output_prec == Precision::BF16) { + auto out_p = reinterpret_cast(out_ptr); + parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = static_cast(1); }); } else if (output_prec == Precision::U8) { auto out_p = reinterpret_cast(out_ptr); parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = static_cast(1); }); @@ -1729,6 +1803,9 @@ inline void MKLDNNReduceNode::init_dst_data(uint8_t *out_ptr, size_t dst_size) { } else if (output_prec == Precision::I32) { auto out_p = reinterpret_cast(out_ptr); parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = std::numeric_limits::min(); }); + } else if (output_prec == Precision::BF16) { + auto out_p = reinterpret_cast(out_ptr); + parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = std::numeric_limits::min(); }); } else if (output_prec == Precision::U8) { auto out_p = reinterpret_cast(out_ptr); parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = std::numeric_limits::min(); }); @@ -1744,6 +1821,9 @@ inline void MKLDNNReduceNode::init_dst_data(uint8_t *out_ptr, size_t dst_size) { } else if (output_prec == Precision::I32) { auto out_p = reinterpret_cast(out_ptr); parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = std::numeric_limits::max(); }); + } else if (output_prec == Precision::BF16) { + auto out_p = reinterpret_cast(out_ptr); + parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = std::numeric_limits::max(); }); } else if (output_prec == Precision::U8) { auto out_p = reinterpret_cast(out_ptr); parallel_for(dst_size / dst_data_size, [&](size_t i) { out_p[i] = std::numeric_limits::max(); }); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.cpp index 035b4526548783..7ae7ce809c87d8 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.cpp @@ -12,6 +12,7 @@ #include #include #include +#include "utils/bfloat16.hpp" #include #include "ie_parallel.hpp" #include @@ -33,6 +34,14 @@ using namespace Xbyak; #define GET_OFF(field) offsetof(jit_resample_call_args, field) +static inline bool isFloatCompatible(Precision prc) { + return Precision::FP32 == prc || Precision::BF16 == prc; +} + +static inline bool isFloatCompatible(memory::data_type type) { + return memory::f32 == type || memory::bf16 == type; +} + template struct jit_uni_resample_nearest_kernel_f32 : public jit_uni_resample_nearest_kernel, public jit_generator { DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_resample_nearest_kernel_f32) @@ -73,7 +82,7 @@ struct jit_uni_resample_nearest_kernel_f32 : public jit_uni_resample_nearest_ker if (isa == cpu::avx512_common) uni_vpxor(vmm_zero, vmm_zero, vmm_zero); - int blk_size = vlen / sizeof(float); + int blk_size = jcp_.src_dt == memory::bf16 ? 16 : (vlen / sizeof(float)); if (isa == cpu::sse42) blk_size *= 2; @@ -197,11 +206,15 @@ struct jit_uni_resample_nearest_kernel_f32 : public jit_uni_resample_nearest_ker case memory::u8: uni_vpmovzxbd(vmm_src, op); break; + case memory::bf16: + uni_vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; default: assert(!"unknown dst_dt"); } - if (src_dt != memory::f32) + if (!isFloatCompatible(src_dt)) uni_vcvtdq2ps(vmm_src, vmm_src); } @@ -211,6 +224,9 @@ struct jit_uni_resample_nearest_kernel_f32 : public jit_uni_resample_nearest_ker if (dst_dt == memory::f32) { uni_vmovups(op, vmm_dst); + } else if (dst_dt == memory::bf16) { + vcvtneps2bf16(ymm_dst, vmm_dst); + vmovdqu16(op, ymm_dst); } else if (dst_dt == memory::u8) { uni_vcvtps2dq(vmm_dst, vmm_dst); if (isa == cpu::avx512_common) { @@ -262,8 +278,7 @@ struct jit_uni_resample_nearest_kernel_f32 : public jit_uni_resample_nearest_ker depthwise_inj_idx++; } else if (post_op.is_quantization()) { bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize; - bool do_rounding = do_dequantization || dst_dt == memory::f32 || i != p.len_ - 1; - + bool do_rounding = do_dequantization || isFloatCompatible(dst_dt) || i != p.len_ - 1; int s_idx = vmm_val.getIdx(); quantization_injectors[quantization_inj_idx]->init_crop_ptrs(reg_oc_off); @@ -320,12 +335,11 @@ void MKLDNNResampleNode::initSupportedPrimitiveDescriptors() { } } - if (inputPrecision == Precision::BF16) { - inputPrecision = Precision::FP32; - } - - if (outputPrecision == Precision::BF16) { - outputPrecision = Precision::FP32; + if (inputPrecision == Precision::BF16 || outputPrecision == Precision::BF16) { + if (!mayiuse(avx512_core_bf16)) + inputPrecision = outputPrecision = Precision::FP32; + else + inputPrecision = outputPrecision = Precision::BF16; } auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(inputPrecision); @@ -358,7 +372,7 @@ void MKLDNNResampleNode::initSupportedPrimitiveDescriptors() { pushDesc(memory::ndhwc); } - if (inputPrecision == Precision::FP32 && outputPrecision == Precision::FP32) { + if (isFloatCompatible(inputPrecision) && isFloatCompatible(outputPrecision)) { if (getParentEdgeAt(0)->getDims().ndims() == 4) { if (mayiuse(cpu::avx512_common)) { pushDesc(memory::nChw16c); @@ -456,9 +470,6 @@ void MKLDNNResampleNode::execute(mkldnn::stream strm) { Layout layout = getParentEdgeAt(0)->getDesc().getLayout(); - const auto src_data = reinterpret_cast(srcMemPtr->GetData()); - auto dst_data = reinterpret_cast(dstMemPtr->GetData()); - SizeVector src_dim = getParentEdgeAt(0)->getDesc().getDims(); SizeVector dst_dim = getChildEdgeAt(0)->getDesc().getDims(); @@ -479,7 +490,17 @@ void MKLDNNResampleNode::execute(mkldnn::stream strm) { if (type == "caffe.ResampleParameter.NEAREST") { if (layout == NCHW || layout == NCDHW) { - NearestNeighbor_PLN(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + if (output_prec == Precision::FP32) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + NearestNeighbor_PLN(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + } else if (output_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + NearestNeighbor_PLN(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); + } } else { if (output_prec == Precision::U8) { auto dst_data = reinterpret_cast(dstMemPtr->GetData()); @@ -492,6 +513,8 @@ void MKLDNNResampleNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); NearestNeighbor_BLK(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); } } else if (output_prec == Precision::I8) { auto dst_data = reinterpret_cast(dstMemPtr->GetData()); @@ -504,6 +527,8 @@ void MKLDNNResampleNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); NearestNeighbor_BLK(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); } } else if (output_prec == Precision::FP32) { auto dst_data = reinterpret_cast(dstMemPtr->GetData()); @@ -516,7 +541,15 @@ void MKLDNNResampleNode::execute(mkldnn::stream strm) { } else if (input_prec == Precision::FP32) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); NearestNeighbor_BLK(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); } + } else if (output_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + NearestNeighbor_BLK(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW); + } else { + THROW_IE_EXCEPTION << "Unsupported output precision: " << output_prec.name(); } } } else if (type == "caffe.ResampleParameter.LINEAR") { @@ -535,12 +568,22 @@ void MKLDNNResampleNode::execute(mkldnn::stream strm) { auto src_data = reinterpret_cast(srcMemPtr->GetData()); auto dst_data = reinterpret_cast(dstMemPtr->GetData()); LinearInterpolation(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW, kernel_width, isDownsample && antialias); + } else if (input_prec == Precision::BF16) { + auto src_data = reinterpret_cast(srcMemPtr->GetData()); + auto dst_data = reinterpret_cast(dstMemPtr->GetData()); + LinearInterpolation(src_data, dst_data, N, C, ID, IH, IW, fx, fy, fz, OD, OH, OW, kernel_width, + isDownsample && antialias); + } else { + THROW_IE_EXCEPTION << "Unsupported input precision: " << input_prec.name(); } + } else { + THROW_IE_EXCEPTION << "Unsupported resample parameter type: " << type; } } // f32 and no fused, f32->input is f32, no fuse->output is f32 -void MKLDNNResampleNode::NearestNeighbor_PLN(const float *in_ptr_, float *out_ptr_, int B, int C, int ID, int IH, int IW, +template +void MKLDNNResampleNode::NearestNeighbor_PLN(const in_data_t *in_ptr_, out_data_t *out_ptr_, int B, int C, int ID, int IH, int IW, float fx, float fy, float fz, int OD, int OH, int OW) { std::vector index_buffer(OD * OH * OW); for (int oz = 0; oz < OD; oz++) { @@ -560,8 +603,8 @@ void MKLDNNResampleNode::NearestNeighbor_PLN(const float *in_ptr_, float *out_pt } if (resample_nearest_kernel) { parallel_for2d(B, C, [&](size_t b, size_t c) { - const float *in_ptr = in_ptr_ + IW * IH * ID * C * b + IW * IH * ID * c; - float *out_ptr = out_ptr_ + OW * OH * OD * C * b + OW * OH * OD * c; + const in_data_t *in_ptr = in_ptr_ + IW * IH * ID * C * b + IW * IH * ID * c; + out_data_t *out_ptr = out_ptr_ + OW * OH * OD * C * b + OW * OH * OD * c; // for OW*OH*OD auto arg = jit_resample_call_args(); @@ -580,8 +623,8 @@ void MKLDNNResampleNode::NearestNeighbor_PLN(const float *in_ptr_, float *out_pt }); } else { parallel_for2d(B, C, [&](size_t b, size_t c) { - const float *in_ptr = in_ptr_ + IW * IH * ID * C * b + IW * IH * ID * c; - float *out_ptr = out_ptr_ + OW * OH * OD * C * b + OW * OH * OD * c; + const in_data_t *in_ptr = in_ptr_ + IW * IH * ID * C * b + IW * IH * ID * c; + out_data_t *out_ptr = out_ptr_ + OW * OH * OD * C * b + OW * OH * OD * c; for (int i_dst = 0; i_dst < OW * OH * OD; i_dst++) { out_ptr[i_dst] = in_ptr[index_buffer[i_dst]]; @@ -646,7 +689,7 @@ void MKLDNNResampleNode::NearestNeighbor_BLK(const in_data_t *in_ptr_, out_data_ for (int c = tail; c < C; c++) { float dst_value = static_cast(in_ptr_dhw[c]); apply_post_ops_scalar(dst_value, c); - if (output_prec == Precision::FP32) { + if (isFloatCompatible(output_prec)) { out_ptr_dhw[c] = dst_value; } else if (output_prec == Precision::U8) { out_ptr_dhw[c] = (dst_value >= 0) ? lroundf(dst_value) : 0; @@ -671,7 +714,7 @@ void MKLDNNResampleNode::NearestNeighbor_BLK(const in_data_t *in_ptr_, out_data_ for (int c = 0; c < C; c++) { float dst_value = static_cast(in_ptr_dhw[c]); apply_post_ops_scalar(dst_value, c); - if (output_prec == Precision::FP32) { + if (isFloatCompatible(output_prec)) { out_ptr_dhw[c] = dst_value; } else if (output_prec == Precision::U8) { out_ptr_dhw[c] = (dst_value >= 0) ? lroundf(dst_value) : 0; @@ -723,7 +766,7 @@ void MKLDNNResampleNode::NearestNeighbor_BLK(const in_data_t *in_ptr_, out_data_ for (int blk = 0; blk < blk_size; blk++) { float dst_value = static_cast(in_ptr_cbdhw[blk]); apply_post_ops_scalar(dst_value, cb * blk_size + blk); - if (output_prec == Precision::FP32) { + if (isFloatCompatible(output_prec)) { out_ptr_cbdhw[blk] = dst_value; } else if (output_prec == Precision::U8) { out_ptr_cbdhw[blk] = (dst_value >= 0) ? lroundf(dst_value) : 0; @@ -749,8 +792,8 @@ void MKLDNNResampleNode::LinearInterpolation(const in_data_t *in_ptr_, out_data_ float fx, float fy, float fz, int OD, int OH, int OW, int kernel_width, bool antialias) { if (IW == OW && IH == OH && ID == OD) { size_t size = B * C * ID * IH * IW; - if (input_prec == Precision::FP32) { - size *= sizeof(float); + if (isFloatCompatible(input_prec)) { + size *= sizeof(in_data_t); } cpu_memcpy(out_ptr_, in_ptr_, size); return; @@ -816,7 +859,7 @@ void MKLDNNResampleNode::LinearInterpolation(const in_data_t *in_ptr_, out_data_ out_ptr_ncdh[ox] = 0; } else { float dst_value = sum / wsum; - if (output_prec == Precision::FP32) { + if (isFloatCompatible(output_prec)) { out_ptr_ncdh[ox] = dst_value; } else if (output_prec == Precision::U8) { out_ptr_ncdh[ox] = (dst_value >= 0) ? lroundf(dst_value) : 0; @@ -846,7 +889,7 @@ inline void MKLDNNResampleNode::apply_post_ops_scalar(float &dst_value, int inde } else if (post_op.is_quantization()) { bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize; - bool do_rounding = do_dequantization || output_prec == Precision::FP32 || + bool do_rounding = do_dequantization || isFloatCompatible(output_prec) || i != p.len_ - 1; auto quant = post_op.quantization; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.h index 15603f158ab371..47137a0dfefee4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.h @@ -78,7 +78,8 @@ class MKLDNNResampleNode : public MKLDNNNode { } private: - void NearestNeighbor_PLN(const float *in_ptr_, float *out_ptr_, int B, int C, int ID, int IH, int IW, + template + void NearestNeighbor_PLN(const in_data_t *in_ptr_, out_data_t *out_ptr_, int B, int C, int ID, int IH, int IW, float fx, float fy, float fz, int OD, int OH, int OW); template void NearestNeighbor_BLK(const in_data_t *in_ptr_, out_data_t *out_ptr_, int B, int C, int ID, int IH, int IW, diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp index fe34f812c623ed..b83159f28657eb 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp @@ -14,21 +14,11 @@ #include #include "ie_parallel.hpp" #include - -#include "jit_generator.hpp" -#include "jit_uni_eltwise.hpp" -#include "jit_uni_depthwise.hpp" -#include "jit_uni_quantization.hpp" #include "common/cpu_memcpy.h" using namespace mkldnn; using namespace MKLDNNPlugin; using namespace InferenceEngine; -using namespace mkldnn::impl; -using namespace mkldnn::impl::cpu; -using namespace mkldnn::impl::utils; -using namespace Xbyak; - MKLDNNScatterUpdateNode::MKLDNNScatterUpdateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(layer, eng, cache), dataSize(0lu), indicesSize(0lu), axisSize(0lu), diff --git a/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp b/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp index 9abad7b1878eaa..62bff6bccd9e5c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/non_max_suppression.cpp @@ -400,6 +400,9 @@ class NonMaxSuppressionImpl: public ExtLayerBase { } } std::fill(selectedIndicesPtr, selectedIndicesPtr + (selectedBoxesNum - idx) * selectedIndicesStride, -1); + if (outputs.size() > NMS_SELECTEDSCORES) { + std::fill(selectedScoresPtr, selectedScoresPtr + (selectedBoxesNum - idx) * selectedIndicesStride, -1.f); + } if (outputs.size() > NMS_VALIDOUTPUTS) *valid_outputs = static_cast(validOutputs); diff --git a/inference-engine/src/mkldnn_plugin/nodes/one_hot.cpp b/inference-engine/src/mkldnn_plugin/nodes/one_hot.cpp index 56d43ea7872a3c..e470a48ce7f41e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/one_hot.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/one_hot.cpp @@ -36,6 +36,8 @@ class OneHotImpl: public ExtLayerBase { // check a precision of the input tensor input_precision = layer->insData[0].lock()->getTensorDesc().getPrecision(); + if (input_precision == Precision::BF16) + input_precision = Precision::FP32; if (input_precision != Precision::I32 && input_precision != Precision::FP32) { THROW_IE_EXCEPTION << layer->name << " Incorrect input precision for the input. Only I32 and FP32 are supported!"; } diff --git a/inference-engine/src/mkldnn_plugin/nodes/powerfile.cpp b/inference-engine/src/mkldnn_plugin/nodes/powerfile.cpp index c0a0cbf9ab0fe4..6aa503ae018317 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/powerfile.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/powerfile.cpp @@ -27,7 +27,7 @@ class PowerFileImpl: public ExtLayerBase { shift_.push_back(1); shift_.push_back(0); - addConfig(layer, {DataConfigurator(ConfLayout::PLN)}, {DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/priorbox.cpp b/inference-engine/src/mkldnn_plugin/nodes/priorbox.cpp index d372c76074670e..74c5d2b5461c10 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/priorbox.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/priorbox.cpp @@ -116,7 +116,7 @@ class PriorBoxImpl: public ExtLayerBase { THROW_IE_EXCEPTION << "Wrong number of variance values. Not less than 1 and more than 4 variance values."; } - addConfig(layer, {{ConfLayout::ANY, true}, {ConfLayout::ANY, true}}, {{ConfLayout::PLN, true}}); + addConfig(layer, {{ConfLayout::ANY, true}, {ConfLayout::ANY, true}}, {{ConfLayout::PLN, true, -1, Precision::FP32}}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/priorbox_clustered.cpp b/inference-engine/src/mkldnn_plugin/nodes/priorbox_clustered.cpp index 954f7d6fed6628..1fcd1df6c395a0 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/priorbox_clustered.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/priorbox_clustered.cpp @@ -32,7 +32,7 @@ class PriorBoxClusteredImpl: public ExtLayerBase { step_w_ = layer->GetParamAsFloat("step_w", 0); offset_ = layer->GetParamAsFloat("offset"); - addConfig(layer, {{ConfLayout::PLN, true}, {ConfLayout::PLN, true}}, {{ConfLayout::PLN, true}}); + addConfig(layer, {{ConfLayout::PLN, true}, {ConfLayout::PLN, true}}, {{ConfLayout::PLN, true, -1, Precision::FP32}}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/priorgridgenerator_onnx.cpp b/inference-engine/src/mkldnn_plugin/nodes/priorgridgenerator_onnx.cpp index c783797ea26413..c98e7475f2364e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/priorgridgenerator_onnx.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/priorgridgenerator_onnx.cpp @@ -45,8 +45,8 @@ class ExperimentalDetectronPriorGridGeneratorImpl: public ExtLayerBase { stride_w_ = layer->GetParamAsFloat("stride_x", 0); addConfig(layer, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::ANY), DataConfigurator(ConfLayout::ANY)}, - {DataConfigurator(ConfLayout::PLN)}); + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::ANY), DataConfigurator(ConfLayout::ANY)}, + {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp b/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp index 2b1dd1b599c04e..ac9f12ba77b3a5 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/proposal.cpp @@ -119,11 +119,12 @@ class ProposalImpl : public ExtLayerBase { store_prob = layer->outData.size() == 2; if (store_prob) { - addConfig(layer, {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32)}, + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } else { - addConfig(layer, {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}, - {DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } } catch (const InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/proposal_onnx.cpp b/inference-engine/src/mkldnn_plugin/nodes/proposal_onnx.cpp index e6370b16a5a173..12e2dd61499ee3 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/proposal_onnx.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/proposal_onnx.cpp @@ -296,9 +296,9 @@ class ONNXCustomProposalImpl : public ExtLayerBase { roi_indices_.resize(post_nms_topn_); addConfig(layer, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}); + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32)}, + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/psroi.cpp b/inference-engine/src/mkldnn_plugin/nodes/psroi.cpp index f42061338b56e4..7b03df16e83cd2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/psroi.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/psroi.cpp @@ -212,8 +212,8 @@ class PSROIPoolingImpl: public ExtLayerBase { int part_w = w * part_size_ / pooled_width_; int class_id = c / channels_each_class; float trans_x = no_trans_ ? 0 : - bottom_trans[(((n * num_classes + class_id) * 2) * part_size_ + part_h) - * part_size_ + part_w] * trans_std_; + bottom_trans[(((n * num_classes + class_id) * 2) * part_size_ + part_h) + * part_size_ + part_w] * trans_std_; float trans_y = no_trans_ ? 0 : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size_ + part_h) * part_size_ + part_w] * trans_std_; diff --git a/inference-engine/src/mkldnn_plugin/nodes/range.cpp b/inference-engine/src/mkldnn_plugin/nodes/range.cpp index 3f6c2ecfb41ce0..693f768c83d643 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/range.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/range.cpp @@ -48,13 +48,12 @@ class RangeImpl: public ExtLayerBase { layer->insData[RANGE_LIMIT].lock()->getTensorDesc().getPrecision() == Precision::FP32 && layer->insData[RANGE_DELTA].lock()->getTensorDesc().getPrecision() == Precision::FP32 && layer->outData[0]->getTensorDesc().getPrecision() == Precision::FP32)) { - THROW_IE_EXCEPTION << layer->name << - " 'Start', 'Limit', 'Delta' input scalars and output tensor should have same precision" << - "and only FP32 and I32 are supported!"; + addConfig(layer, { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32) }, { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); + } else { + addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, + { DataConfigurator(ConfLayout::PLN) }); } - - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/region_yolo.cpp b/inference-engine/src/mkldnn_plugin/nodes/region_yolo.cpp index 9bf522a4c60069..c81a36c97399fa 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/region_yolo.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/region_yolo.cpp @@ -5,13 +5,19 @@ #include "base.hpp" #include "common/defs.h" #include "common/softmax.h" +#include "common/cpu_convert.h" #include #include #include #include +#include +#include "utils/bfloat16.hpp" +#include "common/cpu_memcpy.h" #include "jit_generator.hpp" #include "jit_uni_eltwise.hpp" +using namespace MKLDNNPlugin; +using namespace mkldnn; using namespace mkldnn::impl::cpu; using namespace mkldnn::impl::utils; @@ -22,11 +28,18 @@ namespace Cpu { #define GET_OFF(field) offsetof(jit_args_logistic, field) struct jit_args_logistic { - const float* src; - const float* dst; + const void* src; + void* dst; size_t work_amount; }; +struct jit_logistic_config_params { + InferenceEngine::Precision src_dt; + InferenceEngine::Precision dst_dt; + unsigned src_data_size; + unsigned dst_data_size; +}; + struct jit_uni_logistic_kernel { void (*ker_)(const jit_args_logistic *); @@ -40,7 +53,7 @@ template struct jit_uni_logistic_kernel_f32 : public jit_uni_logistic_kernel, public jit_generator { DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_logistic_kernel_f32) - jit_uni_logistic_kernel_f32() : jit_uni_logistic_kernel(), jit_generator() { + jit_uni_logistic_kernel_f32(jit_logistic_config_params jcp) : jit_uni_logistic_kernel(), jit_generator() { exp_injector.reset(new jit_uni_eltwise_injector_f32(this, alg_kind::eltwise_exp, 0.f, 0.f)); this->preamble(); @@ -59,12 +72,12 @@ struct jit_uni_logistic_kernel_f32 : public jit_uni_logistic_kernel, public jit_ cmp(reg_work_amount, step); jl(tail_loop_label, T_NEAR); - uni_vmovups(vmm_src, ptr[reg_src]); + load_vector(vmm_src, ptr[reg_src], jcp.src_dt); compute_kernel(); - uni_vmovups(ptr[reg_dst], vmm_src); + store_vector(ptr[reg_dst], vmm_src, jcp.dst_dt); - add(reg_src, step * sizeof(float)); - add(reg_dst, step * sizeof(float)); + add(reg_src, step * jcp.src_data_size); + add(reg_dst, step * jcp.dst_data_size); sub(reg_work_amount, step); jmp(main_loop_label, T_NEAR); @@ -75,12 +88,12 @@ struct jit_uni_logistic_kernel_f32 : public jit_uni_logistic_kernel, public jit_ cmp(reg_work_amount, step); jl(exit_label, T_NEAR); - movss(xmm_src, ptr[reg_src]); + load_scalar(xmm_src, ptr[reg_src], jcp.src_dt); compute_kernel(); - movss(ptr[reg_dst], xmm_src); + store_scalar(ptr[reg_dst], xmm_src, jcp.dst_dt); - add(reg_src, step * sizeof(float)); - add(reg_dst, step * sizeof(float)); + add(reg_src, step * jcp.src_data_size); + add(reg_dst, step * jcp.dst_data_size); sub(reg_work_amount, step); jmp(tail_loop_label, T_NEAR); @@ -164,6 +177,61 @@ struct jit_uni_logistic_kernel_f32 : public jit_uni_logistic_kernel, public jit_ int mask_sign = 0x80000000; // 0 // mask to extract sign int float_1 = 0x3f800000; // 1 // 1.0f } vals_for_logistic_activate; + + inline void load_vector(Vmm vmm_src, const Xbyak::Address &op, InferenceEngine::Precision src_dt) { + switch (src_dt) { + case InferenceEngine::Precision::FP32: + uni_vmovups(vmm_src, op); + break; + case InferenceEngine::Precision::BF16: + vpmovzxwd(vmm_src, op); + uni_vpslld(vmm_src, vmm_src, 16); + break; + default: + assert(!"unknown src_dt"); + } + } + inline void store_vector(const Xbyak::Address &op, Vmm vmm_dst, InferenceEngine::Precision dst_dt) { + Xbyak::Ymm ymm_dst = Xbyak::Ymm(vmm_dst.getIdx()); + + switch (dst_dt) { + case InferenceEngine::Precision::FP32: + uni_vmovups(op, vmm_dst); + break; + case InferenceEngine::Precision::BF16: + vcvtneps2bf16(ymm_dst, vmm_dst); + uni_vmovups(op, ymm_dst); + break; + default: + assert(!"unknown dst_dt"); + } + } + inline void load_scalar(Xbyak::Xmm xmm_src, const Xbyak::Address &op, InferenceEngine::Precision src_dt) { + switch (src_dt) { + case InferenceEngine::Precision::FP32: + movss(xmm_src, op); + break; + case InferenceEngine::Precision::BF16: + pinsrw(xmm_src, op, 0x0); + uni_vpslld(xmm_src, xmm_src, 16); + break; + default: + assert(!"unknown src_dt"); + } + } + inline void store_scalar(const Xbyak::Address &op, Xbyak::Xmm xmm_dst, InferenceEngine::Precision dst_dt) { + switch (dst_dt) { + case InferenceEngine::Precision::FP32: + movss(op, xmm_dst); + break; + case InferenceEngine::Precision::BF16: + uni_vpsrld(xmm_dst, xmm_dst, 16); + pextrw(op, xmm_dst, 0x0); + break; + default: + assert(!"unknown dst_dt"); + } + } }; class RegionYoloImpl: public ExtLayerBase { @@ -173,27 +241,48 @@ class RegionYoloImpl: public ExtLayerBase { if (layer->insData.size() != 1 || layer->outData.empty()) THROW_IE_EXCEPTION << "Incorrect number of input/output edges!"; + input_prec = layer->insData.front().lock()->getPrecision(); + output_prec = layer->outData.front()->getPrecision(); + + if (input_prec != Precision::FP32 && input_prec != Precision::BF16) { + input_prec = Precision::FP32; + } + + if (output_prec != Precision::FP32 && output_prec != Precision::BF16) { + output_prec = Precision::FP32; + } + + if (Precision::BF16 == output_prec) { + if (!mayiuse(avx512_core_bf16)) { + output_prec = Precision::FP32; + } + } + classes = layer->GetParamAsInt("classes"); coords = layer->GetParamAsInt("coords"); num = layer->GetParamAsInt("num"); do_softmax = layer->GetParamAsBool("do_softmax", true); mask = layer->GetParamAsInts("mask", {}); + jit_logistic_config_params jcp; + jcp.src_dt = jcp.dst_dt = output_prec; + jcp.src_data_size = jcp.dst_data_size = output_prec.size(); + block_size = 1; if (mayiuse(avx512_common)) { - logistic_kernel.reset(new jit_uni_logistic_kernel_f32()); + logistic_kernel.reset(new jit_uni_logistic_kernel_f32(jcp)); block_size = 16; } else if (mayiuse(avx2)) { - logistic_kernel.reset(new jit_uni_logistic_kernel_f32()); + logistic_kernel.reset(new jit_uni_logistic_kernel_f32(jcp)); block_size = 8; } else if (mayiuse(sse42)) { - logistic_kernel.reset(new jit_uni_logistic_kernel_f32()); + logistic_kernel.reset(new jit_uni_logistic_kernel_f32(jcp)); block_size = 4; } - softmax_kernel.reset(new SoftmaxGeneric()); + softmax_kernel = std::make_shared(input_prec, output_prec); - addConfig(layer, {DataConfigurator(ConfLayout::PLN)}, {DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, input_prec)}, {DataConfigurator(ConfLayout::PLN, output_prec)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } @@ -201,19 +290,12 @@ class RegionYoloImpl: public ExtLayerBase { StatusCode execute(std::vector& inputs, std::vector& outputs, ResponseDesc *resp) noexcept override { - const auto *src_data = inputs[0]->cbuffer().as(); - auto *dst_data = outputs[0]->buffer().as(); - - int mask_size = mask.size(); + size_t mask_size = mask.size(); - int IW = (inputs[0]->getTensorDesc().getDims().size() > 3) ? inputs[0]->getTensorDesc().getDims()[3] : 1; - int IH = (inputs[0]->getTensorDesc().getDims().size() > 2) ? inputs[0]->getTensorDesc().getDims()[2] : 1; - int IC = (inputs[0]->getTensorDesc().getDims().size() > 1) ? inputs[0]->getTensorDesc().getDims()[1] : 1; - int B = (inputs[0]->getTensorDesc().getDims().size() > 0) ? inputs[0]->getTensorDesc().getDims()[0] : 1; - - parallel_for(B * IC * IH * IW, [&](int i) { - dst_data[i] = src_data[i]; - }); + size_t IW = (inputs[0]->getTensorDesc().getDims().size() > 3) ? inputs[0]->getTensorDesc().getDims()[3] : 1; + size_t IH = (inputs[0]->getTensorDesc().getDims().size() > 2) ? inputs[0]->getTensorDesc().getDims()[2] : 1; + size_t IC = (inputs[0]->getTensorDesc().getDims().size() > 1) ? inputs[0]->getTensorDesc().getDims()[1] : 1; + size_t B = (inputs[0]->getTensorDesc().getDims().size() > 0) ? inputs[0]->getTensorDesc().getDims()[0] : 1; int end_index = 0; int num_ = 0; @@ -226,26 +308,41 @@ class RegionYoloImpl: public ExtLayerBase { end_index = IW * IH * (classes + 1); num_ = mask_size; } - int inputs_size = IH * IW * num_ * (classes + coords + 1); - int total_size = 2 * IH * IW; + size_t inputs_size = IH * IW * num_ * (classes + coords + 1); + size_t total_size = 2 * IH * IW; + + const auto *src_data = inputs[0]->cbuffer().as(); + auto *dst_data = outputs[0]->buffer().as(); + + try { + cpu_convert(src_data, dst_data, inputs[0]->getTensorDesc().getPrecision(), outputs[0]->getTensorDesc().getPrecision(), B * IC * IH * IW); - for (int b = 0; b < B; b++) { - for (int n = 0; n < num_; n++) { - int index = b * inputs_size + n * IW * IH * (classes + coords + 1); - calculate_logistic(index, total_size, dst_data); + for (int b = 0; b < B; b++) { + for (int n = 0; n < num_; n++) { + size_t index = b * inputs_size + n * IW * IH * (classes + coords + 1); + calculate_logistic(index, total_size, dst_data); - index = b * inputs_size + IW * IH * (n * (classes + coords + 1) + coords); - calculate_logistic(index, end_index, dst_data); + index = b * inputs_size + IW * IH * (n * (classes + coords + 1) + coords); + calculate_logistic(index, end_index, dst_data); + } } - } - if (do_softmax) { - int index = IW * IH * (coords + 1); - int batch_offset = inputs_size / num; - for (int b = 0; b < B * num; b++) - softmax_kernel->execute(src_data + index + b * batch_offset, dst_data + index + b * batch_offset, 1, classes, IH, IW); + if (do_softmax) { + int index = IW * IH * (coords + 1); + int batch_offset = inputs_size / num; + for (int b = 0; b < B * num; b++) { + softmax_kernel->execute(src_data + input_prec.size() * (index + b * batch_offset), + dst_data + output_prec.size() * (index + b * batch_offset), 1, classes, IH, IW); + } + } + } + catch (const std::exception& excp) { + snprintf(resp->msg, sizeof(resp->msg), "%s", excp.what()); + return GENERAL_ERROR; + } + catch(...) { + return GENERAL_ERROR; } - return OK; } @@ -255,6 +352,7 @@ class RegionYoloImpl: public ExtLayerBase { int num; float do_softmax; std::vector mask; + Precision input_prec, output_prec; int block_size; std::shared_ptr logistic_kernel; @@ -281,7 +379,9 @@ class RegionYoloImpl: public ExtLayerBase { return src; } - inline void calculate_logistic(int start_index, int count, float* dst_data) { + + inline void calculate_logistic(size_t start_index, int count, uint8_t * dst_data) { + auto dst_data_size = output_prec.size(); if (logistic_kernel) { int blocks_num = div_up(count, block_size); parallel_for(blocks_num, [&](int ib) { @@ -289,15 +389,24 @@ class RegionYoloImpl: public ExtLayerBase { int work_amount = std::min(count - idx, block_size); auto arg = jit_args_logistic(); - arg.src = dst_data + start_index + idx; - arg.dst = dst_data + start_index + idx; + arg.src = arg.dst = dst_data + dst_data_size * (start_index + idx); arg.work_amount = static_cast(work_amount); (*logistic_kernel)(&arg); }); } else { - for (int i = 0; i < count; i++) { - dst_data[i + start_index] = logistic_scalar(dst_data[i + start_index]); + if (Precision::FP32 == output_prec) { + auto float_dst_data = reinterpret_cast(dst_data); + for (int i = 0; i < count; i++) { + float_dst_data[i + start_index] = logistic_scalar(float_dst_data[i + start_index]); + } + } else if (Precision::BF16 == output_prec) { + auto bf16_dst_data = reinterpret_cast(dst_data); + for (int i = 0; i < count; i++) { + bf16_dst_data[i + start_index] = logistic_scalar(bf16_dst_data[i + start_index]); + } + } else { + THROW_IE_EXCEPTION << "Unsupported precision configuration outPrc=" << output_prec.name(); } } } diff --git a/inference-engine/src/mkldnn_plugin/nodes/reorg_yolo.cpp b/inference-engine/src/mkldnn_plugin/nodes/reorg_yolo.cpp index 0b74fbd4395317..750b3634015315 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/reorg_yolo.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/reorg_yolo.cpp @@ -18,7 +18,7 @@ class ReorgYoloImpl: public ExtLayerBase { stride = layer->GetParamAsInt("stride"); - addConfig(layer, {DataConfigurator(ConfLayout::PLN)}, {DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/reverse_sequence.cpp b/inference-engine/src/mkldnn_plugin/nodes/reverse_sequence.cpp index a76a0d4ce3dcbe..bcb8d90c28c36e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/reverse_sequence.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/reverse_sequence.cpp @@ -23,10 +23,12 @@ class ReverseSequenceImpl: public ExtLayerBase { THROW_IE_EXCEPTION << layer->name << " Incorrect number of input/output edges!"; src_dims = layer->insData[REVERSESEQUENCE_DATA].lock()->getTensorDesc().getDims(); + + Precision lengthsPrecision = layer->insData[REVERSESEQUENCE_LENGTHS].lock()->getTensorDesc().getPrecision(); + if (lengthsPrecision != Precision::I32 && lengthsPrecision != Precision::FP32) + lengthsPrecision = Precision::I32; + SizeVector seq_lengths_dims = layer->insData[REVERSESEQUENCE_LENGTHS].lock()->getTensorDesc().getDims(); - if (layer->insData[REVERSESEQUENCE_LENGTHS].lock()->getTensorDesc().getPrecision() != Precision::I32 && - layer->insData[REVERSESEQUENCE_LENGTHS].lock()->getTensorDesc().getPrecision() != Precision::FP32) - THROW_IE_EXCEPTION << layer->name << " Incorrect 'seq_lengths' input precision. Only FP32 and I32 are supported!"; if (seq_lengths_dims.size() > 1) THROW_IE_EXCEPTION << layer->name << " Seq_lengths vector should be 1 dimension"; @@ -60,7 +62,7 @@ class ReverseSequenceImpl: public ExtLayerBase { work_amount_dst = srcStrides[0] * src_dims[0]; addConfig(layer, - { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN) }, + { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, lengthsPrecision) }, { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/roifeatureextractor_onnx.cpp b/inference-engine/src/mkldnn_plugin/nodes/roifeatureextractor_onnx.cpp index 2c82c6e7a7face..f95de39c184ec0 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/roifeatureextractor_onnx.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/roifeatureextractor_onnx.cpp @@ -328,8 +328,8 @@ class ExperimentalDetectronROIFeatureExtractorImpl: public ExtLayerBase { pooled_height_ = output_dim_; pooled_width_ = output_dim_; - std::vector inputs_layouts(layer->insData.size(), DataConfigurator(ConfLayout::PLN)); - std::vector outputs_layouts(layer->outData.size(), DataConfigurator(ConfLayout::PLN)); + std::vector inputs_layouts(layer->insData.size(), DataConfigurator(ConfLayout::PLN, Precision::FP32)); + std::vector outputs_layouts(layer->outData.size(), DataConfigurator(ConfLayout::PLN, Precision::FP32)); addConfig(layer, inputs_layouts, outputs_layouts); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/select.cpp b/inference-engine/src/mkldnn_plugin/nodes/select.cpp index 3813986f4e134f..5e84e9fa8f7fb7 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/select.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/select.cpp @@ -31,8 +31,12 @@ class SelectImpl: public ExtLayerBase { broadcast = layer->GetParamAsString("auto_broadcast", "numpy"); - if (layer->insData[THEN].lock()->getTensorDesc().getPrecision() != layer->insData[ELSE].lock()->getTensorDesc().getPrecision()) - THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has different precisions on 'Then' and 'Else' inputs"; + auto inputPrecision = layer->insData[THEN].lock()->getTensorDesc().getPrecision(); + if (inputPrecision == Precision::BF16 || layer->insData[ELSE].lock()->getTensorDesc().getPrecision() == Precision::BF16) { + inputPrecision = Precision::BF16; + } else if (layer->insData[THEN].lock()->getTensorDesc().getPrecision() != layer->insData[ELSE].lock()->getTensorDesc().getPrecision()) { + THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has different precisions on 'Then' and 'Else' inputs "; + } const auto& conditionPrecision = layer->insData[CONDITION].lock()->getTensorDesc().getPrecision(); if (conditionPrecision != Precision::BOOL && conditionPrecision != Precision::I32 && conditionPrecision != Precision::U8) @@ -100,7 +104,7 @@ class SelectImpl: public ExtLayerBase { inConfig.inPlace = -1; inConfig.constant = false; - Precision inPrecision = layer->insData[i].lock()->getTensorDesc().getPrecision(); + Precision inPrecision = i == CONDITION ? conditionPrecision : inputPrecision; const SizeVector& inDims = layer->insData[i].lock()->getTensorDesc().getDims(); inConfig.desc = TensorDesc(inPrecision, inDims, InferenceEngine::TensorDesc::getLayoutByDims(inDims)); @@ -110,9 +114,8 @@ class SelectImpl: public ExtLayerBase { DataConfig outConfig; outConfig.inPlace = -1; outConfig.constant = false; - Precision outPrecision = layer->insData[1].lock()->getTensorDesc().getPrecision(); const SizeVector& outDims = layer->outData[0]->getTensorDesc().getDims(); - outConfig.desc = TensorDesc(outPrecision, outDims, InferenceEngine::TensorDesc::getLayoutByDims(outDims)); + outConfig.desc = TensorDesc(inputPrecision, outDims, InferenceEngine::TensorDesc::getLayoutByDims(outDims)); config.outConfs.push_back(outConfig); config.dynBatchSupport = false; diff --git a/inference-engine/src/mkldnn_plugin/nodes/simplernms.cpp b/inference-engine/src/mkldnn_plugin/nodes/simplernms.cpp index 2bc2f8c506ace2..80997266cdb443 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/simplernms.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/simplernms.cpp @@ -225,8 +225,8 @@ class SimplerNMSImpl : public ExtLayerBase { layer->insData[0].lock()->getTensorDesc().getDims().size() != 4) THROW_IE_EXCEPTION << "Unsupported dimensions!"; - addConfig(layer, {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}, - {DataConfigurator(ConfLayout::PLN)}); + addConfig(layer, {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32)}, {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/space_to_batch.cpp b/inference-engine/src/mkldnn_plugin/nodes/space_to_batch.cpp index d84da1ac400bf0..fc9b08aa05de4b 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/space_to_batch.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/space_to_batch.cpp @@ -54,15 +54,11 @@ class SpaceToBatchImpl: public ExtLayerBase { auto inData = spaceToBatchLayer->insData[i].lock(); if (inData == nullptr) THROW_IE_EXCEPTION << "'" << spaceToBatchLayer->name << "' layer has nullable input data"; - config.inConfs[i].desc = TensorDesc(inData->getTensorDesc().getPrecision(), - inData->getTensorDesc().getDims(), - inData->getTensorDesc().getLayout()); + config.inConfs[i].desc = TensorDesc(precision, inData->getTensorDesc().getDims(), inData->getTensorDesc().getLayout()); } DataConfig outConfig; - outConfig.desc = TensorDesc(layer->outData[0]->getTensorDesc().getPrecision(), - out_dims, - layer->outData[0]->getTensorDesc().getLayout()); + outConfig.desc = TensorDesc(precision, out_dims, layer->outData[0]->getTensorDesc().getLayout()); config.outConfs.push_back(outConfig); config.dynBatchSupport = false; confs.push_back(config); diff --git a/inference-engine/src/mkldnn_plugin/nodes/sparse_fill_empty_rows.cpp b/inference-engine/src/mkldnn_plugin/nodes/sparse_fill_empty_rows.cpp index 6f559832f2a260..a73e58a353fc6c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/sparse_fill_empty_rows.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/sparse_fill_empty_rows.cpp @@ -25,11 +25,6 @@ class SparseFillEmptyRowsImpl : public ExtLayerBase { THROW_IE_EXCEPTION << layer->name << " Incorrect number of input/output edges!"; } - Precision input_indices_precision = layer->insData[INPUT_INDICES_PORT].lock()->getTensorDesc().getPrecision(); - if (input_indices_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect input precision. Only FP32 is supported!"; - } - // check dimensions of input tensors SizeVector input_indices_dims = layer->insData[INPUT_INDICES_PORT].lock()->getTensorDesc().getDims(); if (input_indices_dims.size() != 2 || input_indices_dims[1] != 2) { @@ -75,8 +70,10 @@ class SparseFillEmptyRowsImpl : public ExtLayerBase { // TODO: check that dense shape value is set addConfig(layer, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}); + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32)}, + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/sparse_segment_reduce.cpp b/inference-engine/src/mkldnn_plugin/nodes/sparse_segment_reduce.cpp index c145709b0fd83d..2bea7fde1fb442 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/sparse_segment_reduce.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/sparse_segment_reduce.cpp @@ -38,20 +38,6 @@ class SparseSegmentReduceImpl : public ExtLayerBase { else THROW_IE_EXCEPTION << layer->name << " Incorrect SparseSegmentReduce layer type!"; - // check a precision of input tensors - Precision input_data_precision = layer->insData[INPUT_DATA_PORT].lock()->getTensorDesc().getPrecision(); - if (input_data_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision of the input data. Only FP32 is supported!"; - } - Precision input_indices_precision = layer->insData[INPUT_INDICES_PORT].lock()->getTensorDesc().getPrecision(); - if (input_indices_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision of the input indices. Only FP32 is supported!"; - } - Precision input_segment_ids_precision = layer->insData[INPUT_SEGMENT_IDS_PORT].lock()->getTensorDesc().getPrecision(); - if (input_segment_ids_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision of segment IDs. Only FP32 is supported!"; - } - // check shapes of the second and third input tensors input_indices_dims = layer->insData[INPUT_INDICES_PORT].lock()->getTensorDesc().getDims(); if (input_indices_dims.size() != 1) { @@ -65,12 +51,6 @@ class SparseSegmentReduceImpl : public ExtLayerBase { THROW_IE_EXCEPTION << layer->name << " Shapes for input indices and segment IDs must match."; } - // check a precision of output tensor - Precision output_precision = layer->insData[OUTPUT_PORT].lock()->getTensorDesc().getPrecision(); - if (output_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision of output data. Only FP32 is supported!"; - } - // check shapes of output tensor input_data_dims = layer->insData[INPUT_DATA_PORT].lock()->getTensorDesc().getDims(); output_dims = layer->outData[OUTPUT_PORT]->getTensorDesc().getDims(); @@ -88,8 +68,8 @@ class SparseSegmentReduceImpl : public ExtLayerBase { // confugure layouts of input and output ports addConfig(layer, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32) }, { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/sparse_to_dense.cpp b/inference-engine/src/mkldnn_plugin/nodes/sparse_to_dense.cpp index abc2c1dec29ab8..526248c580e2c4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/sparse_to_dense.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/sparse_to_dense.cpp @@ -28,26 +28,6 @@ class SparseToDenseImpl : public ExtLayerBase { with_default_value = true; } - // check precisions for input tensors - Precision input_indices_precision = layer->insData[INPUT_INDICES_PORT].lock()->getTensorDesc().getPrecision(); - if (input_indices_precision != Precision::I32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect input precision for input indices. Only I32 is supported!"; - } - Precision input_dense_shape_precision = layer->insData[INPUT_DENSE_SHAPE_PORT].lock()->getTensorDesc().getPrecision(); - if (input_dense_shape_precision != Precision::I32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect input precision for input dense shape. Only I32 is supported!"; - } - Precision input_values_precision = layer->insData[INPUT_VALUES_PORT].lock()->getTensorDesc().getPrecision(); - if (input_values_precision != Precision::I32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect input precision for input values. Only I32 is supported!"; - } - if (with_default_value) { - Precision input_default_value_precision = layer->insData[INPUT_DEFAULT_VALUE_PORT].lock()->getTensorDesc().getPrecision(); - if (input_default_value_precision != Precision::I32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect input precision for input default value. Only I32 is supported!"; - } - } - // check dimensions of input tensors SizeVector input_dense_shape_dims = layer->insData[INPUT_DENSE_SHAPE_PORT].lock()->getTensorDesc().getDims(); if (input_dense_shape_dims.size() != 1 || input_dense_shape_dims[0] < 1) { @@ -73,14 +53,14 @@ class SparseToDenseImpl : public ExtLayerBase { // TODO: check that dense shape value is set if (with_default_value) { addConfig(layer, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::I32) }, + { DataConfigurator(ConfLayout::PLN, Precision::I32) }); } else { addConfig(layer, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::I32) }, + { DataConfigurator(ConfLayout::PLN, Precision::I32) }); } } catch (InferenceEngine::details::InferenceEngineException &ex) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/sparse_weighted_reduce.cpp b/inference-engine/src/mkldnn_plugin/nodes/sparse_weighted_reduce.cpp index 2ed9b2266060ce..6023476ebf0046 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/sparse_weighted_reduce.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/sparse_weighted_reduce.cpp @@ -127,14 +127,15 @@ class ExperimentalSparseWeightedReduceImpl : public ExtLayerBase { // TODO: check that dense shape value is set if (with_weights) { addConfig(layer, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::FP32) }, { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } else { addConfig(layer, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::I32) }, { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } } catch (InferenceEngine::details::InferenceEngineException &ex) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/strided_slice.cpp b/inference-engine/src/mkldnn_plugin/nodes/strided_slice.cpp index 5e375b462d1804..f8d4af798d57ef 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/strided_slice.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/strided_slice.cpp @@ -35,8 +35,6 @@ class StridedSliceImpl: public ExtLayerBase { begin_dims = {}; if (layer->insData.size() > 1) { begin_dims = layer->insData[STRIDEDSLICE_BEGIN].lock()->getTensorDesc().getDims(); - if (layer->insData[STRIDEDSLICE_BEGIN].lock()->getTensorDesc().getPrecision() != Precision::I32) - THROW_IE_EXCEPTION << layer->name << " Incorrect 'begin' input precision. Only I32 is supported!"; if (begin_dims.size() > 1) THROW_IE_EXCEPTION << layer->name << " Begin vector should be 1 dimension"; bounds_size = begin_dims[0]; @@ -44,8 +42,6 @@ class StridedSliceImpl: public ExtLayerBase { if (layer->insData.size() > 2) { end_dims = layer->insData[STRIDEDSLICE_END].lock()->getTensorDesc().getDims(); - if (layer->insData[STRIDEDSLICE_END].lock()->getTensorDesc().getPrecision() != Precision::I32) - THROW_IE_EXCEPTION << layer->name << " Incorrect 'end' input precision. Only I32 is supported!"; if (end_dims.size() > 1) THROW_IE_EXCEPTION << layer->name << " End vector should be 1 dimension"; if (begin_dims[0] != end_dims[0]) @@ -54,8 +50,6 @@ class StridedSliceImpl: public ExtLayerBase { if (layer->insData.size() > 3) { stride_dims = layer->insData[STRIDEDSLICE_STRIDE].lock()->getTensorDesc().getDims(); - if (layer->insData[STRIDEDSLICE_STRIDE].lock()->getTensorDesc().getPrecision() != Precision::I32) - THROW_IE_EXCEPTION << layer->name << " Incorrect 'strides' input precision. Only I32 is supported!"; if (stride_dims.size() > 1) THROW_IE_EXCEPTION << layer->name << " End vector should be 1 dimension"; if (begin_dims[0] != stride_dims[0]) @@ -134,16 +128,19 @@ class StridedSliceImpl: public ExtLayerBase { srcStrides = layer->insData[STRIDEDSLICE_DATA].lock()->getTensorDesc().getBlockingDesc().getStrides(); dstStrides = layer->outData[0]->getTensorDesc().getBlockingDesc().getStrides(); + Precision dataPrecision = layer->insData[STRIDEDSLICE_DATA].lock()->getTensorDesc().getPrecision(); if (layer->insData.size() == 1) { - addConfig(layer, { DataConfigurator(ConfLayout::PLN) }, { DataConfigurator(ConfLayout::PLN) }); + addConfig(layer, { DataConfigurator(ConfLayout::PLN, dataPrecision) }, { DataConfigurator(ConfLayout::PLN, dataPrecision) }); } else if (layer->insData.size() == 2) { - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, { DataConfigurator(ConfLayout::PLN) }); + addConfig(layer, { DataConfigurator(ConfLayout::PLN, dataPrecision), DataConfigurator(ConfLayout::PLN, Precision::I32) }, + { DataConfigurator(ConfLayout::PLN, dataPrecision) }); } else if (layer->insData.size() == 3) { - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + addConfig(layer, { DataConfigurator(ConfLayout::PLN, dataPrecision), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::I32) }, { DataConfigurator(ConfLayout::PLN, dataPrecision) }); } else { - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), - DataConfigurator(ConfLayout::PLN) }, { DataConfigurator(ConfLayout::PLN) }); + addConfig(layer, { DataConfigurator(ConfLayout::PLN, dataPrecision), DataConfigurator(ConfLayout::PLN, Precision::I32), + DataConfigurator(ConfLayout::PLN, Precision::I32), DataConfigurator(ConfLayout::PLN, Precision::I32) }, + { DataConfigurator(ConfLayout::PLN, dataPrecision) }); } } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); @@ -151,8 +148,6 @@ class StridedSliceImpl: public ExtLayerBase { } StatusCode execute(std::vector& inputs, std::vector& outputs, ResponseDesc *resp) noexcept override { - const float *src_data = inputs[STRIDEDSLICE_DATA]->cbuffer().as() + - inputs[STRIDEDSLICE_DATA]->getTensorDesc().getBlockingDesc().getOffsetPadding(); int *begin = nullptr, *end = nullptr, *stride = nullptr; if (begin_dims.size()) begin = inputs[STRIDEDSLICE_BEGIN]->cbuffer().as() + inputs[STRIDEDSLICE_BEGIN]->getTensorDesc().getBlockingDesc().getOffsetPadding(); @@ -160,17 +155,12 @@ class StridedSliceImpl: public ExtLayerBase { end = inputs[STRIDEDSLICE_END]->cbuffer().as() + inputs[STRIDEDSLICE_END]->getTensorDesc().getBlockingDesc().getOffsetPadding(); if (stride_dims.size()) stride = inputs[STRIDEDSLICE_STRIDE]->cbuffer().as() + inputs[STRIDEDSLICE_STRIDE]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - float* dst_data = outputs[0]->cbuffer().as() + - outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); InferenceEngine::SizeVector src_dims = inputs[STRIDEDSLICE_DATA]->getTensorDesc().getDims(); InferenceEngine::SizeVector srcStrides = inputs[STRIDEDSLICE_DATA]->getTensorDesc().getBlockingDesc().getStrides(); InferenceEngine::SizeVector dst_dims = outputs[0]->getTensorDesc().getDims(); InferenceEngine::SizeVector dstStrides = outputs[0]->getTensorDesc().getBlockingDesc().getStrides(); - auto dst_size = outputs[0]->byteSize(); - memset(dst_data, 0, dst_size); - size_t i, j, k, bj, ej, sj; InferenceEngine::SizeVector our_dims; InferenceEngine::SizeVector out_dims; @@ -231,13 +221,49 @@ class StridedSliceImpl: public ExtLayerBase { return PARAMETER_MISMATCH; } + const size_t inputsPrecSize = inputs[STRIDEDSLICE_DATA]->getTensorDesc().getPrecision().size(); if (static_cast(src_dims.size()) == max_dims && shrink_axis == 0 && - stride_dms[stride_dms.size()-1] == 1 && stride_dms.size() > 1) - strided_slice_vp(src_data, dst_data); - else if (static_cast(src_dims.size()) == max_dims && shrink_axis == 0) - strided_slice_p(src_data, dst_data); - else - strided_slice(src_data, dst_data, our_dims); + stride_dms[stride_dms.size()-1] == 1 && stride_dms.size() > 1) { + if (inputsPrecSize != outputs[0]->getTensorDesc().getPrecision().size()) { + if (resp) { + std::string errorMsg = "StridedSlice layer doesn't support 'Data' input precision: " + + std::string(inputs[STRIDEDSLICE_DATA]->getTensorDesc().getPrecision().name()); + errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); + } + return GENERAL_ERROR; + } + strided_slice_vp(inputs[STRIDEDSLICE_DATA], outputs[0]); + } else if (static_cast(src_dims.size()) == max_dims && shrink_axis == 0) { + switch (inputsPrecSize) { + case 1: { strided_slice_p(inputs[STRIDEDSLICE_DATA], outputs[0]); break; } + case 2: { strided_slice_p(inputs[STRIDEDSLICE_DATA], outputs[0]); break; } + case 4: { strided_slice_p(inputs[STRIDEDSLICE_DATA], outputs[0]); break; } + case 8: { strided_slice_p(inputs[STRIDEDSLICE_DATA], outputs[0]); break; } + default: { + if (resp) { + std::string errorMsg = "StridedSlice layer doesn't support 'Data' input precision: " + + std::string(inputs[STRIDEDSLICE_DATA]->getTensorDesc().getPrecision().name()); + errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); + } + return GENERAL_ERROR; + } + } + } else { + switch (inputsPrecSize) { + case 1: { strided_slice(inputs[STRIDEDSLICE_DATA], outputs[0], our_dims); break; } + case 2: { strided_slice(inputs[STRIDEDSLICE_DATA], outputs[0], our_dims); break; } + case 4: { strided_slice(inputs[STRIDEDSLICE_DATA], outputs[0], our_dims); break; } + case 8: { strided_slice(inputs[STRIDEDSLICE_DATA], outputs[0], our_dims); break; } + default: { + if (resp) { + std::string errorMsg = "StridedSlice layer doesn't support 'Data' input precision: " + + std::string(inputs[STRIDEDSLICE_DATA]->getTensorDesc().getPrecision().name()); + errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); + } + return GENERAL_ERROR; + } + } + } return OK; } @@ -248,9 +274,11 @@ class StridedSliceImpl: public ExtLayerBase { const size_t STRIDEDSLICE_END = 2; const size_t STRIDEDSLICE_STRIDE = 3; - void strided_slice(const float *src_data, float* dst_data, std::vector &dims); - void strided_slice_vp(const float *src_data, float* dst_data); - void strided_slice_p(const float *src_data, float* dst_data); + template + void strided_slice(Blob::Ptr&, Blob::Ptr& dst_data, std::vector &dims); + void strided_slice_vp(Blob::Ptr&, Blob::Ptr& dst_data); + template + void strided_slice_p(Blob::Ptr&, Blob::Ptr& dst_data); SizeVector begin_dims; SizeVector end_dims; @@ -275,7 +303,13 @@ class StridedSliceImpl: public ExtLayerBase { int ellipsis_pos1, ellipsis_pos2; }; -void StridedSliceImpl::strided_slice(const float *src_data, float* dst_data, std::vector &dims) { +template +void StridedSliceImpl::strided_slice(Blob::Ptr& input, Blob::Ptr& output, std::vector &dims) { + auto* src_data = input->cbuffer().as() + input->getTensorDesc().getBlockingDesc().getOffsetPadding(); + auto* dst_data = output->buffer().as() + output->getTensorDesc().getBlockingDesc().getOffsetPadding(); + auto dst_size = output->byteSize(); + memset(dst_data, 0, dst_size); + size_t work_amount_dst = dstStrides[0] * dst_dims[0]; parallel_nt(0, [&](const int ithr, const int nthr) { int j; @@ -306,10 +340,16 @@ void StridedSliceImpl::strided_slice(const float *src_data, float* dst_data, std }); } -void StridedSliceImpl::strided_slice_vp(const float *src_data, float* dst_data) { +void StridedSliceImpl::strided_slice_vp(Blob::Ptr& input, Blob::Ptr& output) { + size_t dataSize = input->getTensorDesc().getPrecision().size(); + const uint8_t* src_data = input->cbuffer().as() + input->getTensorDesc().getBlockingDesc().getOffsetPadding() * dataSize; + uint8_t* dst_data = output->buffer().as() + output->getTensorDesc().getBlockingDesc().getOffsetPadding() * dataSize; + auto dst_size = output->byteSize(); + memset(dst_data, 0, dst_size); + // Vectorized copy size_t dims_size_1 = dst_dims.size() - 1; - size_t dataLength = dst_dims[dims_size_1]; + size_t len = dst_dims[dims_size_1] * dataSize; size_t work_amount_dst = dstStrides[0] * dst_dims[0] / dst_dims[dims_size_1]; parallel_nt(0, [&](const int ithr, const int nthr) { @@ -323,8 +363,8 @@ void StridedSliceImpl::strided_slice_vp(const float *src_data, float* dst_data) i /= dst_dims[j]; } - for (size_t iwork = start, dst_idx = start * dataLength, i = 1; iwork < end; ++iwork, dst_idx += dataLength) { - cpu_memcpy(&dst_data[dst_idx], &src_data[src_idx], sizeof(float) * dataLength); + for (size_t iwork = start, dst_idx = start * len, i = 1; iwork < end; ++iwork, dst_idx += len) { + cpu_memcpy(&dst_data[dst_idx], &src_data[src_idx * dataSize], len); for (int j = dims_size_1 - 1; j >= 0; j--) { counters[j]++; if (counters[j] < dst_dims[j]) { @@ -342,7 +382,13 @@ void StridedSliceImpl::strided_slice_vp(const float *src_data, float* dst_data) }); } -void StridedSliceImpl::strided_slice_p(const float *src_data, float* dst_data) { +template +void StridedSliceImpl::strided_slice_p(Blob::Ptr& input, Blob::Ptr& output) { + auto* src_data = input->cbuffer().as() + input->getTensorDesc().getBlockingDesc().getOffsetPadding(); + auto* dst_data = output->buffer().as() + output->getTensorDesc().getBlockingDesc().getOffsetPadding(); + auto dst_size = output->byteSize(); + memset(dst_data, 0, dst_size); + size_t dims_size = dst_dims.size(); size_t work_amount_dst = dstStrides[0] * dst_dims[0]; diff --git a/inference-engine/src/mkldnn_plugin/nodes/topk.cpp b/inference-engine/src/mkldnn_plugin/nodes/topk.cpp index f2f715ec68734d..09ab13796b9e87 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/topk.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/topk.cpp @@ -30,14 +30,6 @@ class TopKImpl: public ExtLayerBase { if (layer->outData.size() != 1 && layer->outData.size() != 2) THROW_IE_EXCEPTION << layer->name << " Incorrect number of output edges!"; - // DataConfigurator::addConfig will automatically change BF16 datatype to FP32 - // it can be changed back by explicit modification like confs.back().outConfs[i].desc.setPrecision(Precision::BF16); - // if current layer supports BF16 naturally. usually they are not and nothing special is not required - if ((layer->insData[TOPK_DATA].lock()->getTensorDesc().getPrecision() != Precision::FP32 && - layer->insData[TOPK_DATA].lock()->getTensorDesc().getPrecision() != Precision::BF16) || - layer->insData[TOPK_K].lock()->getTensorDesc().getPrecision() != Precision::I32) - THROW_IE_EXCEPTION << layer->name << " TopKImpl - Incorrect input data/index values precision."; - if (layer->insData[TOPK_K].lock()->getTensorDesc().getDims().size() > 1) THROW_IE_EXCEPTION << layer->name << " TopKImpl - Index vector should be 1 dimension"; @@ -47,10 +39,6 @@ class TopKImpl: public ExtLayerBase { THROW_IE_EXCEPTION << layer->name << " TopKImpl - Incorrect input/output tensor dimension sizes"; if (layer->outData.size() == 2) { - if (layer->outData[TOPK_VALUE]->getTensorDesc().getPrecision() != Precision::FP32 && - layer->outData[TOPK_VALUE]->getTensorDesc().getPrecision() != Precision::BF16) - THROW_IE_EXCEPTION << layer->name << " TopKImpl - Incorrect output data tensor precision. Floating point datatypes are supported!"; - SizeVector dst_idx_dims = layer->outData[TOPK_INDEX]->getTensorDesc().getDims(); if (dst_dims.size() != dst_idx_dims.size()) THROW_IE_EXCEPTION << layer->name << " Incorrect output tensor dimension sizes"; @@ -102,11 +90,11 @@ class TopKImpl: public ExtLayerBase { before_num = count(src_dims, 0, axis); if (layer->outData.size() == 1) { - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, + addConfig(layer, { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::I32) }, { DataConfigurator(ConfLayout::PLN) }); } else { - addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }); + addConfig(layer, { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::I32) }, + { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN) }); // TODO: WA... While ICNNNetwork has no clear rule to fill tensor precision // it use precision of parent layer. So each output tensor Data object has diff --git a/inference-engine/src/mkldnn_plugin/nodes/topkrois_onnx.cpp b/inference-engine/src/mkldnn_plugin/nodes/topkrois_onnx.cpp index 9fe2c7cb461cdb..195e3ecfff4d82 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/topkrois_onnx.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/topkrois_onnx.cpp @@ -39,8 +39,8 @@ class ExperimentalDetectronTopKROIsImpl: public ExtLayerBase { max_rois_num_ = layer->GetParamAsInt("max_rois", 0); addConfig(layer, - {DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN)}, - {DataConfigurator(ConfLayout::PLN)}); + {DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32)}, + {DataConfigurator(ConfLayout::PLN, Precision::FP32)}); } catch (InferenceEngine::details::InferenceEngineException &ex) { errorMsg = ex.what(); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/unique.cpp b/inference-engine/src/mkldnn_plugin/nodes/unique.cpp index 950a8fd2eb99f7..f544789041f615 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/unique.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/unique.cpp @@ -61,20 +61,12 @@ class UniqueImpl : public ExtLayerBase { // check dimensions of output tensors and its precisions size_t cur_output_port = 0; SizeVector output_uniques_dims = layer->outData[cur_output_port]->getTensorDesc().getDims(); - Precision output_uniques_precision = layer->outData[cur_output_port]->getTensorDesc().getPrecision(); - if (output_uniques_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision for output tensor of unique elements. Only FP32 is supported!"; - } if (output_uniques_dims.size() != 1 || output_uniques_dims[0] != num_elements) { THROW_IE_EXCEPTION << layer->name << " Incorrect dimensions for output tensor of unique elements."; } if (return_inverse) { cur_output_port++; SizeVector output_indices_dims = layer->outData[cur_output_port]->getTensorDesc().getDims(); - Precision output_indices_precision = layer->outData[cur_output_port]->getTensorDesc().getPrecision(); - if (output_indices_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision for output tensor of indices. Only FP32 is supported!"; - } if (output_indices_dims.size() != 1 || output_indices_dims[0] != num_elements) { THROW_IE_EXCEPTION << layer->name << " Incorrect dimensions for output tensor of indices."; } @@ -82,10 +74,6 @@ class UniqueImpl : public ExtLayerBase { if (return_counts) { cur_output_port++; SizeVector output_counts_dims = layer->outData[cur_output_port]->getTensorDesc().getDims(); - Precision output_counts_precision = layer->outData[cur_output_port]->getTensorDesc().getPrecision(); - if (output_counts_precision != Precision::FP32) { - THROW_IE_EXCEPTION << layer->name << " Incorrect precision for output tensor of counts. Only FP32 is supported!"; - } if (output_counts_dims.size() != 1 || output_counts_dims[0] != num_elements) { THROW_IE_EXCEPTION << layer->name << " Incorrect dimensions for output tensor of counts."; } @@ -94,16 +82,16 @@ class UniqueImpl : public ExtLayerBase { // add a layer configuration if (layer->outData.size() == 1) { addConfig(layer, - { DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::FP32) }, + { DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } else if (layer->outData.size() == 2) { addConfig(layer, - { DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::FP32) }, + { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } else if (layer->outData.size() == 3) { addConfig(layer, - { DataConfigurator(ConfLayout::PLN) }, - { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }); + { DataConfigurator(ConfLayout::PLN, Precision::FP32) }, { DataConfigurator(ConfLayout::PLN, Precision::FP32), + DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN, Precision::FP32) }); } } catch (InferenceEngine::details::InferenceEngineException &ex) { diff --git a/inference-engine/src/mkldnn_plugin/utils/bfloat16.hpp b/inference-engine/src/mkldnn_plugin/utils/bfloat16.hpp new file mode 100644 index 00000000000000..35fac1fa682462 --- /dev/null +++ b/inference-engine/src/mkldnn_plugin/utils/bfloat16.hpp @@ -0,0 +1,141 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +/** + * The bfloat16_t class can be used as an arithmetic type. All arithmetic operations goes through conversion to the float data type. + */ + + +#define BFLOAT16_ROUND_MODE_TRUNCATE + +namespace MKLDNNPlugin { +class bfloat16_t { +public: + constexpr bfloat16_t() + : m_value{0} + { + } + bfloat16_t(float value) noexcept + : m_value{ +#if defined BFLOAT16_ROUND_MODE_TO_NEAREST + round_to_nearest(value) +#elif defined BFLOAT16_ROUND_MODE_TO_NEAREST_EVEN + round_to_nearest_even(value) +#elif defined BFLOAT16_ROUND_MODE_TRUNCATE + truncate(value) +#else +#error \ + "ROUNDING_MODE must be one of BFLOAT16_ROUND_MODE_TO_NEAREST, BFLOAT16_ROUND_MODE_TO_NEAREST_EVEN, or BFLOAT16_ROUND_MODE_TRUNCATE" +#endif + } + { + } + + operator float() const { + return F32{uint32_t(m_value) << 16}.vfloat; + } + static constexpr bfloat16_t from_bits(uint16_t bits) { return bfloat16_t(bits, true); } + uint16_t to_bits() const { return m_value; } + + static inline uint16_t round_to_nearest_even(float x) { + return static_cast((F32(x).vint + ((F32(x).vint & 0x00010000) >> 1)) >> 16); + } + + static inline uint16_t round_to_nearest(float x) { + return static_cast((F32(x).vint + 0x8000) >> 16); + } + + static inline uint16_t truncate(float x) { return static_cast((F32(x).vint) >> 16); } + +private: + constexpr bfloat16_t(uint16_t x, bool) + : m_value{x} + { + } + union alignas(16) F32 { + F32(float val) + : vfloat{val} { + } + + F32(uint32_t val) + : vint{val} { + } + float vfloat; + uint32_t vint; + }; + uint16_t m_value; +}; +} // namespace MKLDNNPlugin + +/** + * std::numeric_limits overloaded for better compatibility with template metaprogramming. + * For example, to make the following template work: + * template + * void someFunction() { + * ... + * T maxValue = std::numeric_limits::max(); + * ... + * } + */ + +namespace std { +template <> +class numeric_limits { +public: + static constexpr bool is_specialized = true; + static constexpr MKLDNNPlugin::bfloat16_t min() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x007F); + } + static constexpr MKLDNNPlugin::bfloat16_t max() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x7F7F); + } + static constexpr MKLDNNPlugin::bfloat16_t lowest() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0xFF7F); + } + static constexpr int digits = 7; + static constexpr int digits10 = 2; + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr int radix = 2; + static constexpr MKLDNNPlugin::bfloat16_t epsilon() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x3C00); + } + static constexpr MKLDNNPlugin::bfloat16_t round_error() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x3F00); + } + static constexpr int min_exponent = -125; + static constexpr int min_exponent10 = -37; + static constexpr int max_exponent = 128; + static constexpr int max_exponent10 = 38; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr float_denorm_style has_denorm = denorm_absent; + static constexpr bool has_denorm_loss = false; + static constexpr MKLDNNPlugin::bfloat16_t infinity() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x7F80); + } + static constexpr MKLDNNPlugin::bfloat16_t quiet_NaN() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x7FC0); + } + static constexpr MKLDNNPlugin::bfloat16_t signaling_NaN() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0x7FC0); + } + static constexpr MKLDNNPlugin::bfloat16_t denorm_min() noexcept { + return MKLDNNPlugin::bfloat16_t::from_bits(0); + } + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = false; + static constexpr bool is_modulo = false; + static constexpr bool traps = false; + static constexpr bool tinyness_before = false; + static constexpr float_round_style round_style = round_to_nearest; +}; +} // namespace std diff --git a/inference-engine/src/multi_device/CMakeLists.txt b/inference-engine/src/multi_device/CMakeLists.txt index 1eb5f45e9bc334..615d2d250c253b 100644 --- a/inference-engine/src/multi_device/CMakeLists.txt +++ b/inference-engine/src/multi_device/CMakeLists.txt @@ -12,7 +12,7 @@ ie_add_plugin(NAME ${TARGET_NAME} SOURCES ${SOURCES} ${HEADERS} VERSION_DEFINES_FOR multi_device_plugin.cpp) -target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_legacy inference_engine) +target_link_libraries(${TARGET_NAME} PRIVATE inference_engine) set_ie_threading_interface_for(${TARGET_NAME}) diff --git a/inference-engine/src/multi_device/multi_device_exec_network.cpp b/inference-engine/src/multi_device/multi_device_exec_network.cpp index 1f225232fb0228..10b9a280963624 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.cpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.cpp @@ -15,7 +15,6 @@ #include "ie_metric_helpers.hpp" -#include #include #include #include @@ -37,7 +36,7 @@ struct IdleGuard { } ~IdleGuard() { if (nullptr != _notBusyWorkerRequests) { - _notBusyWorkerRequests->push(_workerInferRequestPtr); + _notBusyWorkerRequests->try_push(_workerInferRequestPtr); } } MultiDeviceExecutableNetwork::NotBusyWorkerRequests* Release() { @@ -55,6 +54,7 @@ MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const DeviceMap()), _devicePriorities{networkDevices}, + _devicePrioritiesInitial{networkDevices}, _networksPerDevice{networksPerDevice}, _config{config}, _needPerfCounters{needPerfCounters} { @@ -80,10 +80,11 @@ MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const DeviceMap>( [workerRequestPtr, this, device, idleWorkerRequestsPtr] (InferRequest , StatusCode status) mutable { IdleGuard idleGuard{workerRequestPtr, *idleWorkerRequestsPtr}; @@ -92,41 +93,44 @@ MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const DeviceMap_task); capturedTask(); } - if (!_terminate) { - idleGuard.Release()->push(workerRequestPtr); - ScheduleToWorkerInferRequest(); + // try to return the request to the idle list (fails if the overall object destruction has began) + if (idleGuard.Release()->try_push(workerRequestPtr)) { + // try pop the task, as we know there is at least one idle request + if (_inferPipelineTasks.try_pop(workerRequestPtr->_task)) { + // if succeeded, let's schedule that + ScheduleToWorkerInferRequest(std::move(workerRequestPtr->_task)); + } } }); } } } -void MultiDeviceExecutableNetwork::ScheduleToWorkerInferRequest() { +void MultiDeviceExecutableNetwork::ScheduleToWorkerInferRequest(Task inferPipelineTask) { auto devices = [&] { std::lock_guard lock(_mutex); return _devicePriorities; }(); for (auto&& device : devices) { - auto& idleWorkerRequests = _idleWorkerRequests[device.deviceName]; WorkerInferRequest* workerRequestPtr = nullptr; + NotBusyWorkerRequests& idleWorkerRequests = _idleWorkerRequests[device.deviceName]; if (idleWorkerRequests.try_pop(workerRequestPtr)) { IdleGuard idleGuard{workerRequestPtr, idleWorkerRequests}; - Task inferPipelineTask; - if (_inferPipelineTasks.try_pop(inferPipelineTask)) { - _thisWorkerInferRequest = workerRequestPtr; - inferPipelineTask(); - idleGuard.Release(); - break; + _thisWorkerInferRequest = workerRequestPtr; + { + auto capturedTask = std::move(inferPipelineTask); + capturedTask(); } + idleGuard.Release(); + return; } } + // no vacant requests this time, storing the task to the queue + _inferPipelineTasks.push(std::move(inferPipelineTask)); } void MultiDeviceExecutableNetwork::run(Task inferPipelineTask) { - if (!_terminate) { - _inferPipelineTasks.push(std::move(inferPipelineTask)); - ScheduleToWorkerInferRequest(); - } + ScheduleToWorkerInferRequest(std::move(inferPipelineTask)); } MultiDeviceExecutableNetwork::~MultiDeviceExecutableNetwork() { @@ -134,16 +138,32 @@ MultiDeviceExecutableNetwork::~MultiDeviceExecutableNetwork() { std::lock_guard lock(_mutex); _devicePriorities.clear(); } - _terminate = true; - /* NOTE: The only threads that use `MultiDeviceExecutableNetwork` Context are those that are used by Worker infer requests. - * But AsyncInferRequest destructor should waits for all asynchronous tasks that are used by the request + /* NOTE: The only threads that use `MultiDeviceExecutableNetwork` worker infer requests' threads. + * But AsyncInferRequest destructor should wait for all asynchronous tasks by the request */ + for (auto&& networkValue : _networksPerDevice) { + // stop accepting any idle requests back (for re-scheduling) + _idleWorkerRequests.at(networkValue.first).set_capacity(0); + } _workerRequests.clear(); } InferenceEngine::InferRequestInternal::Ptr MultiDeviceExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) { - return std::make_shared(networkInputs, networkOutputs); + auto num = _numRequestsCreated++; + size_t sum = 0; + InferenceEngine::InferRequest request_to_share_blobs_with; + // borrowing device-specific blobs from the underlying requests for the device-agnostic, user-facing requests + // this allows to potentially save on the data-copy later (if the requests are scheduled in the same order) + for (const auto& device : _devicePrioritiesInitial) { + auto& dev_requests = _workerRequests[device.deviceName]; + if ((num - sum) < dev_requests.size()) { + request_to_share_blobs_with = dev_requests.at(num - sum)._inferRequest; + break; + } + sum += dev_requests.size(); + } + return std::make_shared(networkInputs, networkOutputs, request_to_share_blobs_with); } IInferRequest::Ptr MultiDeviceExecutableNetwork::CreateInferRequest() { diff --git a/inference-engine/src/multi_device/multi_device_exec_network.hpp b/inference-engine/src/multi_device/multi_device_exec_network.hpp index 6422ba2811fe2c..bdea1e449e4041 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.hpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.hpp @@ -37,6 +37,8 @@ using DeviceMap = std::unordered_map; #if ((IE_THREAD == IE_THREAD_TBB) || (IE_THREAD == IE_THREAD_TBB_AUTO)) template using ThreadSafeQueue = tbb::concurrent_queue; +template +using ThreadSafeBoundedQueue = tbb::concurrent_bounded_queue; #else template class ThreadSafeQueue { @@ -45,7 +47,6 @@ class ThreadSafeQueue { std::lock_guard lock(_mutex); _queue.push(std::move(value)); } - bool try_pop(T& value) { std::lock_guard lock(_mutex); if (!_queue.empty()) { @@ -56,15 +57,40 @@ class ThreadSafeQueue { return false; } } - - bool empty() { +protected: + std::queue _queue; + std::mutex _mutex; +}; +template +class ThreadSafeBoundedQueue { +public: + ThreadSafeBoundedQueue() = default; + bool try_push(T value) { + std::lock_guard lock(_mutex); + if (_capacity) { + _queue.push(std::move(value)); + } + return _capacity; + } + bool try_pop(T& value) { + std::lock_guard lock(_mutex); + if (_capacity && !_queue.empty()) { + value = std::move(_queue.front()); + _queue.pop(); + return true; + } else { + return false; + } + } + void set_capacity(std::size_t newCapacity) { std::lock_guard lock(_mutex); - return _queue.empty(); + _capacity = newCapacity; } protected: std::queue _queue; std::mutex _mutex; + bool _capacity = false; }; #endif @@ -77,7 +103,7 @@ class MultiDeviceExecutableNetwork : public InferenceEngine::ExecutableNetworkTh InferenceEngine::Task _task; InferenceEngine::StatusCode _status = InferenceEngine::StatusCode::OK; }; - using NotBusyWorkerRequests = ThreadSafeQueue; + using NotBusyWorkerRequests = ThreadSafeBoundedQueue; explicit MultiDeviceExecutableNetwork(const DeviceMap& networksPerDevice, const std::vector& networkDevices, @@ -93,18 +119,20 @@ class MultiDeviceExecutableNetwork : public InferenceEngine::ExecutableNetworkTh InferenceEngine::OutputsDataMap networkOutputs) override; ~MultiDeviceExecutableNetwork() override; - void ScheduleToWorkerInferRequest(); + void ScheduleToWorkerInferRequest(InferenceEngine::Task); static thread_local WorkerInferRequest* _thisWorkerInferRequest; std::atomic_bool _terminate = {false}; std::mutex _mutex; std::vector _devicePriorities; + const std::vector _devicePrioritiesInitial; DeviceMap _networksPerDevice; ThreadSafeQueue _inferPipelineTasks; DeviceMap _idleWorkerRequests; DeviceMap> _workerRequests; std::unordered_map _config; bool _needPerfCounters = false; + std::atomic_size_t _numRequestsCreated = {0}; }; } // namespace MultiDevicePlugin diff --git a/inference-engine/src/multi_device/multi_device_infer_request.cpp b/inference-engine/src/multi_device/multi_device_infer_request.cpp index d021e0a30624f0..a662cc711346af 100644 --- a/inference-engine/src/multi_device/multi_device_infer_request.cpp +++ b/inference-engine/src/multi_device/multi_device_infer_request.cpp @@ -10,8 +10,17 @@ namespace MultiDevicePlugin { using namespace InferenceEngine; // ------------------------------MultiDeviceInferRequest---------------------------- MultiDeviceInferRequest::MultiDeviceInferRequest(const InputsDataMap& networkInputs, - const OutputsDataMap& networkOutputs) + const OutputsDataMap& networkOutputs, + InferRequest request_to_share_blobs_with) : InferRequestInternal(networkInputs, networkOutputs) { + if (request_to_share_blobs_with) { + // borrow device-friendly blobs from the request + for (const auto &it : _networkInputs) + _inputs[it.first] = request_to_share_blobs_with.GetBlob(it.first); + for (const auto &it : _networkOutputs) + _outputs[it.first] = request_to_share_blobs_with.GetBlob(it.first); + return; + } // Allocate all input blobs for (const auto &it : networkInputs) { Layout l = it.second->getLayout(); @@ -40,14 +49,16 @@ void MultiDeviceInferRequest::SetBlobsToAnotherRequest(InferRequest& req) { auto &name = it.first; // this request is already in BUSY state, so using the internal functions safely GetBlob(name.c_str(), blob); - req.SetBlob(name.c_str(), blob); + if (req.GetBlob(name) != blob) + req.SetBlob(name, blob); } for (const auto &it : _networkOutputs) { Blob::Ptr blob; auto &name = it.first; // this request is already in BUSY state, so using the internal functions safely GetBlob(name.c_str(), blob); - req.SetBlob(name.c_str(), blob); + if (req.GetBlob(name) != blob) + req.SetBlob(name, blob); } } diff --git a/inference-engine/src/multi_device/multi_device_infer_request.hpp b/inference-engine/src/multi_device/multi_device_infer_request.hpp index aebeb6784f6106..80270cd117c294 100644 --- a/inference-engine/src/multi_device/multi_device_infer_request.hpp +++ b/inference-engine/src/multi_device/multi_device_infer_request.hpp @@ -23,14 +23,15 @@ class MultiDeviceInferRequest : public InferenceEngine::InferRequestInternal { public: using Ptr = std::shared_ptr; explicit MultiDeviceInferRequest(const InferenceEngine::InputsDataMap& networkInputs, - const InferenceEngine::OutputsDataMap& networkOutputs); + const InferenceEngine::OutputsDataMap& networkOutputs, + InferenceEngine::InferRequest request_to_share_blobs_with); void GetPerformanceCounts(std::map&) const override { THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; } void InferImpl() override { THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; } - // Multi-Device impl specific: sets the data (blobs from the device-less requets to the specific device request) + // Multi-Device impl specific: sets the data (blobs from the device-less requests to the specific device request) void SetBlobsToAnotherRequest(InferenceEngine::InferRequest& req); }; diff --git a/inference-engine/src/multi_device/multi_device_plugin.cpp b/inference-engine/src/multi_device/multi_device_plugin.cpp index eee9483720dec8..8d1217fedc7fe8 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.cpp +++ b/inference-engine/src/multi_device/multi_device_plugin.cpp @@ -8,9 +8,10 @@ #include #include #include +#include + #include -#include #include #include "multi_device_plugin.hpp" @@ -146,12 +147,16 @@ InferenceEngine::Parameter MultiDeviceInferencePlugin::GetMetric(const std::stri } } -ExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(const ICNNNetwork &network, +ExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(const CNNNetwork &network, const std::map& config) { if (GetCore() == nullptr) { THROW_IE_EXCEPTION << "Please, work with MULTI device via InferencEngine::Core object"; } + if (network.getFunction() == nullptr) { + THROW_IE_EXCEPTION << "MULTI device supports just ngraph network representation"; + } + auto fullConfig = mergeConfigs(_config, config); auto priorities = fullConfig.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES); if (priorities == fullConfig.end()) { @@ -168,8 +173,7 @@ ExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(co for (auto& p : metaDevices) { auto & deviceName = p.deviceName; auto & deviceConfig = p.config; - auto clonedNetwork = cloneNetwork(network); - executableNetworkPerDevice.insert({ deviceName, GetCore()->LoadNetwork(CNNNetwork{clonedNetwork}, deviceName, deviceConfig) }); + executableNetworkPerDevice.insert({ deviceName, GetCore()->LoadNetwork(network, deviceName, deviceConfig) }); multiNetworkConfig.insert(deviceConfig.begin(), deviceConfig.end()); } if (executableNetworkPerDevice.empty()) @@ -185,7 +189,7 @@ ExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(co enablePerfCounters); } -QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork& network, +QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& network, const std::map& config) const { QueryNetworkResult queryResult; @@ -193,6 +197,10 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork& THROW_IE_EXCEPTION << "Please, work with MULTI device via InferencEngine::Core object"; } + if (network.getFunction() == nullptr) { + THROW_IE_EXCEPTION << "MULTI device supports just ngraph network representation"; + } + queryResult.rc = StatusCode::OK; queryResult.supportedLayersMap.clear(); @@ -201,57 +209,21 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork& if (priorities == fullConfig.end()) { THROW_IE_EXCEPTION << "KEY_MULTI_DEVICE_PRIORITIES key is not set for MULTI device"; } - auto metaDevices = ParseMetaDevices(priorities->second, fullConfig); std::unordered_set supportedLayers; - - auto allSupportsNgraph = - std::all_of(std::begin(metaDevices), std::end(metaDevices), - [&] (const DeviceInformation& value) -> bool { - auto clonedNetwork = cloneNetwork(network); - try { GetCore()->QueryNetwork(*clonedNetwork, value.deviceName, value.config); } - catch (const InferenceEngine::details::InferenceEngineException & ex) { - std::string message = ex.what(); - return message.find(NOT_IMPLEMENTED_str) == std::string::npos; - } - return true; - }); - for (auto&& value : metaDevices) { - auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) { - auto clonedNetwork = cloneNetwork(networkObject); - auto deviceQr = GetCore()->QueryNetwork(*clonedNetwork, value.deviceName, value.config); - std::unordered_set deviceSupportedLayers; - for (auto&& layerQr : deviceQr.supportedLayersMap) { - deviceSupportedLayers.emplace(layerQr.first); - } - supportedLayers = supportedLayers.empty() - ? deviceSupportedLayers : (deviceSupportedLayers.empty() - ? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers)); - }; - - if (network.getFunction()) { - if (!allSupportsNgraph) { - if (contains(fullConfig, CONFIG_KEY_INTERNAL(AGGREGATED_PLUGIN))) { - THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; - } else { - auto cnnNetworkImpl = std::make_shared(network); - if (cnnNetworkImpl == nullptr) - THROW_IE_EXCEPTION << "Cannot create CNNNetworkImpl shared_ptr"; - queryNetwork(*cnnNetworkImpl); - } - } else { - queryNetwork(network); - } - } else { - queryNetwork(network); + auto deviceQr = GetCore()->QueryNetwork(network, value.deviceName, value.config); + std::unordered_set deviceSupportedLayers; + for (auto&& layerQr : deviceQr.supportedLayersMap) { + deviceSupportedLayers.emplace(layerQr.first); } + supportedLayers = supportedLayers.empty() + ? deviceSupportedLayers : (deviceSupportedLayers.empty() + ? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers)); } - for (auto&& supportedLayer : supportedLayers) { queryResult.supportedLayersMap[supportedLayer] = GetName(); } - return queryResult; } diff --git a/inference-engine/src/multi_device/multi_device_plugin.hpp b/inference-engine/src/multi_device/multi_device_plugin.hpp index b07f5968d737e4..09124822ce8541 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.hpp +++ b/inference-engine/src/multi_device/multi_device_plugin.hpp @@ -20,19 +20,18 @@ class MultiDeviceInferencePlugin : public InferenceEngine::InferencePluginIntern MultiDeviceInferencePlugin(); ~MultiDeviceInferencePlugin() override = default; - InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork& network, + InferenceEngine::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const std::map& config) override; void SetConfig(const std::map& config) override; - Parameter GetConfig(const std::string& name, - const std::map & options) const override; - InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network, + Parameter GetConfig(const std::string& name, const std::map & options) const override; + InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, const std::map& config) const override; InferenceEngine::Parameter GetMetric(const std::string& name, const std::map& options) const override; std::vector ParseMetaDevices(const std::string & devicesRequestsCfg, - const std::map & config) const; + const std::map & config) const; protected: std::map GetSupportedConfig(const std::map& config, diff --git a/inference-engine/src/plugin_api/blob_factory.hpp b/inference-engine/src/plugin_api/blob_factory.hpp index 997f7b72a18e4c..8ad0a94f85a75e 100644 --- a/inference-engine/src/plugin_api/blob_factory.hpp +++ b/inference-engine/src/plugin_api/blob_factory.hpp @@ -107,6 +107,7 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p switch (precision) { USE_FACTORY(FP32); + USE_FACTORY(FP64); USE_FACTORY(FP16); USE_FACTORY(Q78); USE_FACTORY(I8); diff --git a/inference-engine/src/plugin_api/caseless.hpp b/inference-engine/src/plugin_api/caseless.hpp index b9b08046fe8a76..0c030053bb6598 100644 --- a/inference-engine/src/plugin_api/caseless.hpp +++ b/inference-engine/src/plugin_api/caseless.hpp @@ -3,7 +3,8 @@ // /** - * @file A header file with caseless containers + * @file caseless.hpp + * @brief A header file with caseless containers */ #pragma once diff --git a/inference-engine/src/plugin_api/cpp_interfaces/base/ie_executable_network_base.hpp b/inference-engine/src/plugin_api/cpp_interfaces/base/ie_executable_network_base.hpp index b9d7833e3576b8..c195af7819386f 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/base/ie_executable_network_base.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/base/ie_executable_network_base.hpp @@ -10,8 +10,8 @@ #pragma once #include -#include -#include +#include +#include #include #include #include @@ -33,7 +33,7 @@ class ExecutableNetworkBase : public IExecutableNetwork { public: /** * @brief Constructor with actual underlying implementation. - * @param impl Underplying implementation of type IExecutableNetworkInternal + * @param impl Underlying implementation of type IExecutableNetworkInternal */ explicit ExecutableNetworkBase(std::shared_ptr impl) { if (impl.get() == nullptr) { @@ -113,6 +113,13 @@ class ExecutableNetworkBase : public IExecutableNetwork { ~ExecutableNetworkBase() = default; }; +/** + * @brief Create an execuable network public C++ object wrapper based on internal inplementation + * @ingroup ie_dev_api_exec_network_api + * @param impl An internal implementation for executable network + * @tparam T A type of internal implementation + * @return C++ wrapper for executable network + */ template inline typename InferenceEngine::ExecutableNetwork make_executable_network(std::shared_ptr impl) { typename ExecutableNetworkBase::Ptr net(new ExecutableNetworkBase(impl), [](IExecutableNetwork* p) { diff --git a/inference-engine/src/plugin_api/cpp_interfaces/base/ie_infer_async_request_base.hpp b/inference-engine/src/plugin_api/cpp_interfaces/base/ie_infer_async_request_base.hpp index 5892ef02846d5d..18ddb2a2e26548 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/base/ie_infer_async_request_base.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/base/ie_infer_async_request_base.hpp @@ -10,7 +10,7 @@ #include "cpp_interfaces/exception2status.hpp" #include "cpp_interfaces/plugin_itt.hpp" -#include +#include #include "ie_iinfer_request.hpp" #include "ie_preprocess.hpp" #include "ie_profiling.hpp" @@ -29,7 +29,7 @@ class InferRequestBase : public IInferRequest { public: /** * @brief Constructor with actual underlying implementation. - * @param impl Underplying implementation of type IAsyncInferRequestInternal + * @param impl Underlying implementation of type IAsyncInferRequestInternal */ explicit InferRequestBase(std::shared_ptr impl): _impl(impl) {} diff --git a/inference-engine/src/plugin_api/cpp_interfaces/base/ie_memory_state_base.hpp b/inference-engine/src/plugin_api/cpp_interfaces/base/ie_variable_state_base.hpp similarity index 74% rename from inference-engine/src/plugin_api/cpp_interfaces/base/ie_memory_state_base.hpp rename to inference-engine/src/plugin_api/cpp_interfaces/base/ie_variable_state_base.hpp index fe191a8e7f6ad5..f4977f4c18fb5b 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/base/ie_memory_state_base.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/base/ie_variable_state_base.hpp @@ -7,24 +7,28 @@ #include #include "cpp_interfaces/exception2status.hpp" -#include "cpp_interfaces/impl/ie_memory_state_internal.hpp" +#include "cpp_interfaces/impl/ie_variable_state_internal.hpp" #include "ie_imemory_state.hpp" namespace InferenceEngine { /** - * @brief default implementation for IVariableState - * @ingroup ie_dev_api_mem_state_api + * @brief Default implementation for IVariableState + * @tparam T Minimal CPP implementation of IVariableStateInternal (e.g. VariableStateInternal) + * @ingroup ie_dev_api_variable_state_api */ template class VariableStateBase : public IVariableState { -protected: std::shared_ptr impl; public: + /** + * @brief Constructor with actual underlying implementation. + * @param impl Underlying implementation of type IVariableStateInternal + */ explicit VariableStateBase(std::shared_ptr impl): impl(impl) { if (impl == nullptr) { - THROW_IE_EXCEPTION << "VariableStateBase implementation not defined"; + THROW_IE_EXCEPTION << "VariableStateBase implementation is not defined"; } } diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp index a78f43c562328a..0885efbc653482 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp @@ -55,6 +55,11 @@ class ExecutableNetworkThreadSafeDefault : public ExecutableNetworkInternal, } protected: + /** + * @brief Creates asyncronous inference request from synchronous request returned by CreateInferRequestImpl + * @tparam AsyncInferRequestType A type of asynchronous inference request to use a wrapper for synchronous request + * @return A shared pointer to an asynchronous inference request + */ template IInferRequest::Ptr CreateAsyncInferRequestFromSync() { IInferRequest::Ptr asyncRequest; diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_memory_state_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_memory_state_internal.hpp deleted file mode 100644 index 05f96d5f4e7607..00000000000000 --- a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_memory_state_internal.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -namespace InferenceEngine { - -/** - * @brief minimal interface for memory state implementation - * @ingroup ie_dev_api_mem_state_api - */ -class VariableStateInternal : public IVariableStateInternal { - std::string name; - Blob::Ptr state; - -public: - explicit VariableStateInternal(std::string name): name(name) {} - std::string GetName() const override { - return name; - } - void SetState(Blob::Ptr newState) override { - state = newState; - } - Blob::CPtr GetState() const override { - return state; - } -}; - -/* - * @brief For compatibility reasons. - */ -using MemoryStateInternal = VariableStateInternal; -} // namespace InferenceEngine diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp index dec31fa96a3960..5b2975741688b2 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp @@ -58,17 +58,15 @@ class InferencePluginInternal : public IInferencePlugin { ~InferencePluginInternal() override = default; public: - ExecutableNetwork LoadNetwork(const ICNNNetwork& network, + ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map& config) override { return LoadNetwork(network, config, nullptr); } - ExecutableNetwork LoadNetwork(const ICNNNetwork& network, const std::map& config, + ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map& config, RemoteContext::Ptr context) override { - InputsDataMap networkInputs, networkInputsCloned; - OutputsDataMap networkOutputs, networkOutputsCloned; - network.getInputsInfo(networkInputs); - network.getOutputsInfo(networkOutputs); + InputsDataMap networkInputs = network.getInputsInfo(), networkInputsCloned; + OutputsDataMap networkOutputs = network.getOutputsInfo(), networkOutputsCloned; copyInputOutputInfo(networkInputs, networkOutputs, networkInputsCloned, networkOutputsCloned); ExecutableNetworkInternal::Ptr impl; @@ -124,7 +122,7 @@ class InferencePluginInternal : public IInferencePlugin { THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; } - QueryNetworkResult QueryNetwork(const ICNNNetwork& /*network*/, const std::map& /*config*/) const override { + QueryNetworkResult QueryNetwork(const CNNNetwork& /*network*/, const std::map& /*config*/) const override { THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str; } @@ -159,28 +157,27 @@ class InferencePluginInternal : public IInferencePlugin { * @brief Creates an executable network from a parsed network object, users can create as many networks as they need * and use them simultaneously (up to the limitation of the HW resources) * @note The function is used in - * InferencePluginInternal::LoadNetwork(const ICNNNetwork&, const std::map&) + * InferencePluginInternal::LoadNetwork(const CNNNetwork&, const std::map&) * which performs common steps first and calls this plugin-dependent method implementation after. * @param network A network object * @param config string-string map of config parameters relevant only for this load operation * @return Shared pointer to the ExecutableNetwork object */ - virtual ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const ICNNNetwork& network, + virtual ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const CNNNetwork& network, const std::map& config) = 0; /** * @brief Creates an executable network using remote context from a parsed network object, * users can create as many networks as they need and use them simultaneously (up to the limitation of the HW resources) * @note The function is used in - * InferencePluginInternal::LoadNetwork(const ICNNNetwork&, const std::map&, RemoteContext::Ptr) + * InferencePluginInternal::LoadNetwork(const CNNNetwork&, const std::map&, RemoteContext::Ptr) * which performs common steps first and calls this plugin-dependent method implementation after. * @param network A network object * @param context A remote context * @param config string-string map of config parameters relevant only for this load operation * @return Shared pointer to the ExecutableNetwork object */ - virtual ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const ICNNNetwork& network, - RemoteContext::Ptr context, + virtual ExecutableNetworkInternal::Ptr LoadExeNetworkImpl(const CNNNetwork& network, RemoteContext::Ptr context, const std::map& config) { (void)network; (void)context; diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_variable_state_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_variable_state_internal.hpp new file mode 100644 index 00000000000000..11370bf8a7c93c --- /dev/null +++ b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_variable_state_internal.hpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace InferenceEngine { + +/** + * @brief Minimal interface for variable state implementation + * @ingroup ie_dev_api_variable_state_api + */ +class VariableStateInternal : public IVariableStateInternal { + std::string name; + Blob::Ptr state; + +public: + /** + * @brief Constructs a variable state with a given name + * @param name A name of variable state + */ + explicit VariableStateInternal(std::string name) : name(name) {} + + /** + * @brief Gets a variable state name + * @return A string representing variable state name + */ + std::string GetName() const override { + return name; + } + + /** + * @brief Sets the new state for the next inference + * @param newState A new state + */ + void SetState(Blob::Ptr newState) override { + state = newState; + } + + /** + * @brief Returns the value of the variable state. + * @return The value of the variable state + */ + Blob::CPtr GetState() const override { + return state; + } +}; + +/** + * @brief For compatibility reasons. + */ +using MemoryStateInternal = VariableStateInternal; + +} // namespace InferenceEngine diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp index 17cc927813f07d..8583302375502e 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include #include #include #include diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp index c09a15aa25bc7d..d749c4b75b2e07 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iinfer_request_internal.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include #include #include #include diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_imemory_state_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_imemory_state_internal.hpp deleted file mode 100644 index ef37d8b824131e..00000000000000 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_imemory_state_internal.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include - -namespace InferenceEngine { -/** - * @interface IVariableStateInternal - * @brief minimal interface for memory state implementation - * @ingroup ie_dev_api_mem_state_api - */ -class IVariableStateInternal { -public: - using Ptr = std::shared_ptr; - - virtual ~IVariableStateInternal() = default; - virtual std::string GetName() const = 0; - virtual void Reset() = 0; - virtual void SetState(Blob::Ptr newState) = 0; - virtual Blob::CPtr GetState() const = 0; - INFERENCE_ENGINE_DEPRECATED("Use GetState function instead") - virtual Blob::CPtr GetLastState() const {return GetState();} -}; - -/* - * @brief For compatibility reasons. - */ -using IMemoryStateInternal = IVariableStateInternal; -} // namespace InferenceEngine diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index d949e2de3b25a1..935ac60dff7b97 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -163,18 +163,18 @@ class IInferencePlugin : public details::IRelease, * @param config A string-string map of config parameters relevant only for this load operation * @return Created Executable Network object */ - virtual ExecutableNetwork LoadNetwork(const ICNNNetwork& network, + virtual ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map& config) = 0; /** * @brief Creates an executable network from network object, on specified remote context - * @param network - a network object acquired from InferenceEngine::Core::ReadNetwork + * @param network A network object acquired from InferenceEngine::Core::ReadNetwork * @param config string-string map of config parameters relevant only for this load operation - * @param context - a pointer to plugin context derived from RemoteContext class used to + * @param context A pointer to plugin context derived from RemoteContext class used to * execute the network * @return Created Executable Network object */ - virtual ExecutableNetwork LoadNetwork(const ICNNNetwork& network, + virtual ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::map& config, RemoteContext::Ptr context) = 0; /** @@ -270,7 +270,7 @@ class IInferencePlugin : public details::IRelease, * @param[in] config The map of configuration parameters * @return The result of query operator containing supported layers map */ - virtual QueryNetworkResult QueryNetwork(const ICNNNetwork& network, const std::map& config) const = 0; + virtual QueryNetworkResult QueryNetwork(const CNNNetwork& network, const std::map& config) const = 0; }; } // namespace InferenceEngine diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp new file mode 100644 index 00000000000000..a5662b99243de2 --- /dev/null +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_ivariable_state_internal.hpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include + +namespace InferenceEngine { + +/** + * @interface IVariableStateInternal + * @brief Minimal interface for variable state implementation + * @ingroup ie_dev_api_variable_state_api + */ +class IVariableStateInternal { +public: + /** + * @brief A shared pointer to a IVariableStateInternal interface + */ + using Ptr = std::shared_ptr; + + /** + * @brief A default virtual dtor + */ + virtual ~IVariableStateInternal() = default; + + /** + * @brief Gets a variable state name + * @return A string representing variable state name + */ + virtual std::string GetName() const = 0; + + /** + * @brief Reset internal variable state for relevant infer request, to a value specified as + * default for according `ReadValue` node + */ + virtual void Reset() = 0; + + /** + * @brief Sets the new state for the next inference + * @param newState A new state + */ + virtual void SetState(Blob::Ptr newState) = 0; + + /** + * @brief Returns the value of the variable state. + * @return The value of the variable state + */ + virtual Blob::CPtr GetState() const = 0; + + /** + * @deprecated Use IVariableStateInternal::GetState method instead + * @brief Returns the value of the variable state. + * @return The value of the variable state + */ + INFERENCE_ENGINE_DEPRECATED("Use IVariableStateInternal::GetState method instead") + virtual Blob::CPtr GetLastState() const { + return GetState(); + } +}; + +/** + * @brief For compatibility reasons. + */ +using IMemoryStateInternal = IVariableStateInternal; + +} // namespace InferenceEngine diff --git a/inference-engine/src/plugin_api/exec_graph_info.hpp b/inference-engine/src/plugin_api/exec_graph_info.hpp index becd982e03fd6a..c29e73faf7029e 100644 --- a/inference-engine/src/plugin_api/exec_graph_info.hpp +++ b/inference-engine/src/plugin_api/exec_graph_info.hpp @@ -129,6 +129,13 @@ class INFERENCE_ENGINE_API_CLASS(ExecutionNode) : public ngraph::Node { return cloned; } + /** + * @brief Visits attributes of the node + * + * @param[in] visitor An attribute visitor + * + * @return Returns `true` if an operation has completed successfully + */ bool visit_attributes(ngraph::AttributeVisitor& visitor) override { return true; } diff --git a/inference-engine/src/plugin_api/ie_icore.hpp b/inference-engine/src/plugin_api/ie_icore.hpp index 5b06d60504129a..42210ef59fe5cd 100644 --- a/inference-engine/src/plugin_api/ie_icore.hpp +++ b/inference-engine/src/plugin_api/ie_icore.hpp @@ -85,7 +85,7 @@ class ICore { * @param config Optional map of pairs: (config parameter name, config parameter value) * @return An object containing a map of pairs a layer name -> a device name supporting this layer. */ - virtual QueryNetworkResult QueryNetwork(const ICNNNetwork& network, const std::string& deviceName, + virtual QueryNetworkResult QueryNetwork(const CNNNetwork& network, const std::string& deviceName, const std::map& config) const = 0; /** diff --git a/inference-engine/src/plugin_api/ie_metric_helpers.hpp b/inference-engine/src/plugin_api/ie_metric_helpers.hpp index bd1922a936f67a..57030a856f8db0 100644 --- a/inference-engine/src/plugin_api/ie_metric_helpers.hpp +++ b/inference-engine/src/plugin_api/ie_metric_helpers.hpp @@ -54,27 +54,4 @@ struct MetricType; __VA_ARGS__; \ return _##name##_value -/** - * @def IE_SET_METRIC(name, ...) - * @ingroup ie_dev_api - * @brief Set metric with specified @p name and arguments `...`. Example: - * @code - * Parameter result = IE_SET_METRIC(SUPPORTED_METRICS, { - METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS), - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(NETWORK_NAME), - METRIC_KEY(SUPPORTED_CONFIG_KEYS) - }); - * @endcode - * - * @param name The metric name - * @param ... A metric value - * - * @return A metric value wrapped with Parameter. Must be used as a left-side argument to assignment operator. - */ -#define IE_SET_METRIC(name, ...) \ - [&] { \ - IE_SET_METRIC_RETURN(name, __VA_ARGS__); \ - }() - #include "ie_plugin_config.hpp" diff --git a/inference-engine/src/plugin_api/ie_ngraph_utils.hpp b/inference-engine/src/plugin_api/ie_ngraph_utils.hpp index 8bb7a7b4cfdede..3a05fcbcfe5efb 100644 --- a/inference-engine/src/plugin_api/ie_ngraph_utils.hpp +++ b/inference-engine/src/plugin_api/ie_ngraph_utils.hpp @@ -19,6 +19,8 @@ inline ::ngraph::element::Type convertPrecision(const Precision& precision) { return ::ngraph::element::Type(::ngraph::element::Type_t::undefined); case Precision::FP32: return ::ngraph::element::Type(::ngraph::element::Type_t::f32); + case Precision::FP64: + return ::ngraph::element::Type(::ngraph::element::Type_t::f64); case Precision::FP16: return ::ngraph::element::Type(::ngraph::element::Type_t::f16); case Precision::BF16: @@ -95,6 +97,8 @@ inline Precision convertPrecision(const ::ngraph::element::Type& precision) { return Precision(Precision::FP16); case ::ngraph::element::Type_t::f32: return Precision(Precision::FP32); + case ::ngraph::element::Type_t::f64: + return Precision(Precision::FP64); case ::ngraph::element::Type_t::bf16: return Precision(Precision::BF16); case ::ngraph::element::Type_t::i8: diff --git a/inference-engine/src/plugin_api/precision_utils.h b/inference-engine/src/plugin_api/precision_utils.h index 970c1c00ffa2be..696e1f70bdec28 100644 --- a/inference-engine/src/plugin_api/precision_utils.h +++ b/inference-engine/src/plugin_api/precision_utils.h @@ -38,8 +38,8 @@ namespace InferenceEngine { * @defgroup ie_dev_api_async_infer_request_api Asynchronous Inference Request base classes * @brief A set of base and helper classes to implement asynchronous inference request class * - * @defgroup ie_dev_api_mem_state_api Memory state base classes - * @brief A set of base and helper classes to implement memory state + * @defgroup ie_dev_api_variable_state_api Variable state base classes + * @brief A set of base and helper classes to implement variable state * * @defgroup ie_dev_api_threading Threading utilities * @brief Threading API providing task executors for asynchronous operations @@ -140,6 +140,7 @@ f32tof16Arrays(ie_fp16* dst, const float* src, size_t nelem, float scale = 1.f, * @ingroup ie_dev_api_precision * * @param value Value to be converted + * @return A saturated value */ template ::value && std::is_integral::value && @@ -166,6 +167,7 @@ inline OutT saturate_cast(const InT& value) { * @ingroup ie_dev_api_precision * * @param value Value to be converted + * @return A saturated value */ template ::value && std::is_integral::value && @@ -193,6 +195,7 @@ inline OutT saturate_cast(const InT& value) { * @ingroup ie_dev_api_precision * * @param value Value to be converted + * @return A saturated value */ template inline InT saturate_cast(const InT& value) { diff --git a/inference-engine/src/readers/ir_reader/CMakeLists.txt b/inference-engine/src/readers/ir_reader/CMakeLists.txt index 424c54c11db6ed..f46a73f891b828 100644 --- a/inference-engine/src/readers/ir_reader/CMakeLists.txt +++ b/inference-engine/src/readers/ir_reader/CMakeLists.txt @@ -47,6 +47,6 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) # install install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core - ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index db8ca524467189..306c9aad0740d7 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -397,7 +397,6 @@ std::shared_ptr V10Parser::createNode(const std::vector> creators = { std::make_shared>("AvgPool"), - std::make_shared>("Clamp"), std::make_shared>("Convert"), std::make_shared>("CTCGreedyDecoder"), std::make_shared>("DeformableConvolution"), @@ -405,38 +404,28 @@ std::shared_ptr V10Parser::createNode(const std::vector>("SpaceToDepth"), std::make_shared>("DepthToSpace"), std::make_shared>("Subtract"), - std::make_shared>("MatMul"), std::make_shared>("Broadcast"), std::make_shared>("Reshape"), std::make_shared>("StridedSlice"), - std::make_shared>("ELU"), - std::make_shared>("FakeQuantize"), std::make_shared>("Gather"), - std::make_shared>("GatherTree"), std::make_shared>("GreaterEqual"), - std::make_shared>("Convolution"), std::make_shared>("GroupConvolution"), std::make_shared>("ConvolutionBackpropData"), std::make_shared>("GroupConvolutionBackpropData"), std::make_shared>("BinaryConvolution"), - std::make_shared>("GRN"), - std::make_shared>("HardSigmoid"), std::make_shared>("SquaredDifference"), std::make_shared>("LessEqual"), std::make_shared>("Equal"), std::make_shared>("NotEqual"), std::make_shared>("FloorMod"), - std::make_shared>("LRN"), std::make_shared>("MVN"), std::make_shared>("LSTMCell"), std::make_shared>("MaxPool"), std::make_shared>("Minimum"), std::make_shared>("NonMaxSuppression"), std::make_shared>("NormalizeL2"), - std::make_shared>("OneHot"), std::make_shared>("PReLU"), std::make_shared>("ReLU"), - std::make_shared>("Pad"), std::make_shared>("Power"), std::make_shared>("ReverseSequence"), std::make_shared>("PriorBox"), @@ -836,20 +825,6 @@ std::shared_ptr V10Parser::LayerCreator::cre return std::make_shared(inputs[0], inputs[1], attr); } -// FakeQuantize layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 5); - pugi::xml_node dn = node.child("data"); - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - return std::make_shared(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], - GetUIntAttr(dn, "levels")); -} - // ReverseSequence layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer(const ngraph::OutputVector & inputs, const pugi::xml_node& node, @@ -907,43 +882,6 @@ std::shared_ptr V10Parser::LayerCreator -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - std::string pad_mode_str = GetStrAttr(dn, "pad_mode"); - ngraph::op::PadMode pad_mode; - - if (pad_mode_str == "constant") { - pad_mode = ngraph::op::PadMode::CONSTANT; - } else if (pad_mode_str == "edge") { - pad_mode = ngraph::op::PadMode::EDGE; - } else if (pad_mode_str == "reflect") { - pad_mode = ngraph::op::PadMode::REFLECT; - } else if (pad_mode_str == "symmetric") { - pad_mode = ngraph::op::PadMode::SYMMETRIC; - } else { - THROW_IE_EXCEPTION << "Pad mode: " << pad_mode_str << " is not supported"; - } - - if (pad_mode == ngraph::op::PadMode::CONSTANT) { - if (inputs.size() == 3) { - return std::make_shared(inputs[0], inputs[1], inputs[2], pad_mode); - } - checkParameters(inputs, layerParsePrms, 4); - return std::make_shared(inputs[0], inputs[1], inputs[2], inputs[3], pad_mode); - } - - checkParameters(inputs, layerParsePrms, 3); - return std::make_shared(inputs[0], inputs[1], inputs[2], pad_mode); -} - // SquaredDifference layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -1015,40 +953,6 @@ std::shared_ptr V10Parser::LayerCreator::createLa return std::make_shared(inputs[0], across, normalize_variance, eps); } -// LRN layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 2); - pugi::xml_node dn = node.child("data"); - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - return std::make_shared(inputs[0], - inputs[1], - GetFloatAttr(dn, "alpha"), - GetFloatAttr(dn, "beta"), - GetFloatAttr(dn, "bias"), - GetUInt64Attr(dn, "size")); -} - -// Clamp layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 1); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - double maxVal = GetFloatAttr(dn, "max"); - double minVal = GetFloatAttr(dn, "min"); - return std::make_shared(inputs[0], minVal, maxVal); -} - // VariadicSplit layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -1073,20 +977,6 @@ std::shared_ptr V10Parser::LayerCreator::cr return std::make_shared(inputs[0], inputs[1], num_splits); } -// ELU layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 1); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - return std::make_shared(inputs[0], GetFloatAttr(dn, "alpha")); -} - // SpaceToDepth layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -1248,20 +1138,6 @@ std::shared_ptr V10Parser::LayerCreator::cr return std::make_shared(inputs[0], inputs[1]); } -// MatMul layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 2); - pugi::xml_node dn = node.child("data"); - - auto transpose_a = GetBoolAttr(dn, "transpose_a", false); - auto transpose_b = GetBoolAttr(dn, "transpose_b", false); - - return std::make_shared(inputs[0], inputs[1], transpose_a, transpose_b); -} - // Softmax layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -1350,36 +1226,6 @@ std::shared_ptr V10Parser::LayerCreator -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 2); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - ngraph::op::PadType pad_type = ngraph::op::PadType::EXPLICIT; - std::string auto_pad = GetStrAttr(dn, "auto_pad", ""); - if (auto_pad == "same_lower") { - pad_type = ngraph::op::PadType::SAME_LOWER; - } else if (auto_pad == "same_upper") { - pad_type = ngraph::op::PadType::SAME_UPPER; - } else if (auto_pad == "valid") { - pad_type = ngraph::op::PadType::VALID; - } - - auto strides = ngraph::Strides(getParameters(dn, "strides")); - auto dilations = ngraph::Strides(getParameters(dn, "dilations")); - auto pads_begin = ngraph::CoordinateDiff(getParameters(dn, "pads_begin", {})); - auto pads_end = ngraph::CoordinateDiff(getParameters(dn, "pads_end", {})); - - return std::make_shared(inputs[0], inputs[1], strides, pads_begin, pads_end, - dilations, pad_type); -} - // GroupConvolution layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -1688,29 +1534,6 @@ std::shared_ptr V10Parser::LayerCreator::c return std::make_shared(inputs[0], inputs[1], inputs[2]); } -// GatherTree layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 4); - return std::make_shared(inputs[0], inputs[1], inputs[2], inputs[3]); -} - -// OneHot layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 4); - - pugi::xml_node dn = node.child("data"); - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - return std::make_shared(inputs[0], inputs[1], inputs[2], inputs[3], GetInt64Attr(dn, "axis")); -} - // NormalizeL2 layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -1736,29 +1559,6 @@ std::shared_ptr V10Parser::LayerCreator:: return std::make_shared(inputs[0], inputs[1], eps, em); } -// HardSigmoid layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector & inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 3); - return std::make_shared(inputs[0], inputs[1], inputs[2]); -} - -// GRN layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 1); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - return std::make_shared(inputs[0], GetFloatAttr(dn, "bias")); -} - // LogicalAnd layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp index d2dc3807e75210..c103eadc68479c 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp @@ -243,6 +243,11 @@ class V10Parser : public IParser { } else if (auto a = ngraph::as_type>(&adapter)) { if (!getStrAttribute(node.child("data"), name, val)) return; static_cast(*a) = ngraph::as_enum(val); + } else if (auto a = ngraph::as_type>(&adapter)) { + std::vector shape; + if (!getParameters(node.child("data"), name, shape)) return; + std::vector coord_diff(shape.begin(), shape.end()); + static_cast(*a) = ngraph::CoordinateDiff(coord_diff); } else { THROW_IE_EXCEPTION << "Error IR reading. Attribute adapter can not be found for " << name << " parameter"; diff --git a/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt b/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt index 50721bd95a3867..71c22e6a25fefb 100644 --- a/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt +++ b/inference-engine/src/readers/ir_reader_v7/CMakeLists.txt @@ -38,6 +38,9 @@ ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) +if(WIN32) + set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}) +endif() # code style add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) diff --git a/inference-engine/src/readers/onnx_reader/CMakeLists.txt b/inference-engine/src/readers/onnx_reader/CMakeLists.txt index dbc5ec12e6aa1a..754de3a90ef7ca 100644 --- a/inference-engine/src/readers/onnx_reader/CMakeLists.txt +++ b/inference-engine/src/readers/onnx_reader/CMakeLists.txt @@ -36,7 +36,7 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) # install install(TARGETS ${TARGET_NAME} - RUNTIME DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core - ARCHIVE DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core + RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core + ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) diff --git a/inference-engine/src/transformations/include/ngraph_ops/convolution_ie.hpp b/inference-engine/src/transformations/include/ngraph_ops/convolution_ie.hpp index 9dd5feb3164ce4..81891716b45c69 100644 --- a/inference-engine/src/transformations/include/ngraph_ops/convolution_ie.hpp +++ b/inference-engine/src/transformations/include/ngraph_ops/convolution_ie.hpp @@ -86,6 +86,8 @@ class TRANSFORMATIONS_API ConvolutionIE : public Op { void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector & new_args) const override; /// \return The strides. diff --git a/inference-engine/src/transformations/include/transformations/init_node_info.hpp b/inference-engine/src/transformations/include/transformations/init_node_info.hpp index e320f19803b633..0bf6e917a61686 100644 --- a/inference-engine/src/transformations/include/transformations/init_node_info.hpp +++ b/inference-engine/src/transformations/include/transformations/init_node_info.hpp @@ -16,25 +16,6 @@ #include -/** - * @defgroup ie_transformation_api Inference Engine Transformation API - * @brief Defines Inference Engine Transformations API which is used to transform ngraph::Function - * - * @{ - * @defgroup ie_runtime_attr_api Runtime information - * @brief A mechanism of runtime information extension - * - * @defgroup ie_transformation_common_api Common optimization passes - * @brief A set of common optimization passes - * - * @defgroup ie_transformation_to_opset2_api Conversion from opset3 to opset2 - * @brief A set of conversion downgrade passes from opset3 to opset2 - * - * @defgroup ie_transformation_to_opset1_api Conversion from opset2 to opset1 - * @brief A set of conversion downgrade passes from opset2 to opset1 - * @} - */ - /** * @brief ngraph namespace */ diff --git a/inference-engine/src/transformations/include/transformations_visibility.hpp b/inference-engine/src/transformations/include/transformations_visibility.hpp index 9aeee40b7d2a0e..4d19fcf3cae421 100644 --- a/inference-engine/src/transformations/include/transformations_visibility.hpp +++ b/inference-engine/src/transformations/include/transformations_visibility.hpp @@ -6,6 +6,30 @@ #include "ngraph/visibility.hpp" +/** + * @file transformations_visibility.hpp + * @brief Defines visibility settings for Inference Engine Transformations library + */ + +/** + * @defgroup ie_transformation_api Inference Engine Transformation API + * @brief Defines Inference Engine Transformations API which is used to transform ngraph::Function + * + * @{ + * @defgroup ie_runtime_attr_api Runtime information + * @brief A mechanism of runtime information extension + * + * @defgroup ie_transformation_common_api Common optimization passes + * @brief A set of common optimization passes + * + * @defgroup ie_transformation_to_opset2_api Conversion from opset3 to opset2 + * @brief A set of conversion downgrade passes from opset3 to opset2 + * + * @defgroup ie_transformation_to_opset1_api Conversion from opset2 to opset1 + * @brief A set of conversion downgrade passes from opset2 to opset1 + * @} + */ + #ifdef inference_engine_transformations_EXPORTS #define TRANSFORMATIONS_API NGRAPH_HELPER_DLL_EXPORT #else diff --git a/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp b/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp index 43e8db7e2d99c7..ef8507ed63ae5c 100644 --- a/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp +++ b/inference-engine/src/transformations/src/ngraph_ops/convolution_ie.cpp @@ -172,3 +172,13 @@ shared_ptr op::ConvolutionIE::clone_with_new_inputs(const ngraph::OutputVe throw ngraph_error("Unsupported number of arguments for ConvolutionIE operation"); } + +bool op::ConvolutionIE::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("strides", m_strides); + visitor.on_attribute("dilations", m_dilations); + visitor.on_attribute("pads_begin", m_pads_begin); + visitor.on_attribute("pads_end", m_pads_end); + visitor.on_attribute("auto_pad", m_auto_pad); + visitor.on_attribute("group", m_group); + return true; +} diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.hpp index aeef20650cad1a..743ae69957f817 100644 --- a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.hpp +++ b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.hpp @@ -10,6 +10,7 @@ namespace vpu { class ConvertExtractImagePatchesToReorgYolo : public ngraph::pass::MatcherPass { public: + NGRAPH_RTTI_DECLARATION; ConvertExtractImagePatchesToReorgYolo(); }; diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/mish_decomposition.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/mish_decomposition.hpp new file mode 100644 index 00000000000000..f9252ca416530f --- /dev/null +++ b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/mish_decomposition.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace vpu { + +class MishDecomposition : public ngraph::pass::MatcherPass { +public: + NGRAPH_RTTI_DECLARATION; + MishDecomposition(); +}; + +} // namespace vpu + diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.cpp index 4a28a14535faf2..c1cf63e3999df7 100644 --- a/inference-engine/src/vpu/common/src/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.cpp +++ b/inference-engine/src/vpu/common/src/ngraph/transformations/convert_extract_image_patches_to_reorg_yolo.cpp @@ -10,6 +10,8 @@ #include #include +NGRAPH_RTTI_DEFINITION(vpu::ConvertExtractImagePatchesToReorgYolo, "ConvertExtractImagePatchesToReorgYolo", 0); + namespace vpu { ConvertExtractImagePatchesToReorgYolo::ConvertExtractImagePatchesToReorgYolo() { diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/mish_decomposition.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/mish_decomposition.cpp new file mode 100644 index 00000000000000..76fb4d60d3e3b2 --- /dev/null +++ b/inference-engine/src/vpu/common/src/ngraph/transformations/mish_decomposition.cpp @@ -0,0 +1,49 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "vpu/ngraph/transformations/mish_decomposition.hpp" + +#include +#include +#include + +#include +#include + +NGRAPH_RTTI_DEFINITION(vpu::MishDecomposition, "MishDecomposition", 0); + +namespace vpu { + +MishDecomposition::MishDecomposition() { + const auto mishPattern = ngraph::pattern::wrap_type(); + + ngraph::matcher_pass_callback callback = [this](ngraph::pattern::Matcher &matcher) { + const auto& mish = ngraph::as_type_ptr(matcher.get_match_root()); + + if (!mish || m_transformation_callback(mish)) { + return false; + } + + const auto inputType = mish->input_value(0).get_element_type(); + const auto addConst = ngraph::opset5::Constant::create(inputType, ngraph::Shape{}, {1.0f}); + + const auto exp = std::make_shared(mish->input_value(0)); + const auto add = std::make_shared(exp, addConst); + const auto log = std::make_shared(add); + const auto tanh = std::make_shared(log); + const auto mul = std::make_shared(mish->input_value(0), tanh); + + mul->set_friendly_name(mish->get_friendly_name()); + ngraph::copy_runtime_info(mish, {addConst, exp, add, log, tanh, mul}); + ngraph::replace_node(mish, mul); + + return true; + }; + + const auto matcher = std::make_shared(mishPattern, "MishDecomposition"); + register_matcher(matcher, callback); +} + +} // namespace vpu + diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp index 0aaf88b9df1f1d..c26813c9728043 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/graph_transformer.hpp @@ -61,6 +61,7 @@ struct CompilationConfig final { bool mergeHwPoolToConv = true; bool hwDilation = false; bool forceDeprecatedCnnConversion = false; + bool enableEarlyEltwiseReLUFusion = true; std::map> ioStrides; diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/private_plugin_config.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/private_plugin_config.hpp index 5c3541811b83f1..d52ee8bba3ee6b 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/private_plugin_config.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/private_plugin_config.hpp @@ -36,6 +36,7 @@ DECLARE_VPU_CONFIG(MYRIAD_PER_LAYER); DECLARE_VPU_CONFIG(MYRIAD_PER_STAGE); DECLARE_VPU_CONFIG(MYRIAD_ENABLE_MEMORY_TYPES_ANNOTATION); +DECLARE_VPU_CONFIG(MYRIAD_ENABLE_EARLY_ELTWISE_RELU_FUSION); /** * @brief Used to disable analyzeWeightableLayers pass in cases where diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp index f4fa3b65df2d2d..58ac221cfa5ab3 100644 --- a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp" #include "vpu/ngraph/transformations/eliminate_shapeof_after_dsr.hpp" @@ -181,6 +182,8 @@ ie::ICNNNetwork::Ptr FrontEnd::convertNetwork(ie::ICNNNetwork& network) { manager.register_pass(); manager.register_pass(); manager.register_pass(); + // WA: Mish is not accurate enough. Remove this decomposition when mish is improved + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/pass_manager.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/pass_manager.cpp index 738044bc9fad75..f6a9291a9efc6c 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/pass_manager.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/pass_manager.cpp @@ -249,8 +249,10 @@ PassSet::Ptr PassManager::buildMiddleEnd() { ADD_PASS(mergeReLUAndBias); ADD_DUMP_PASS("mergeReLUAndBias"); - ADD_PASS(mergeEltwiseAndReLUDynamic); - ADD_DUMP_PASS("mergeEltwiseAndReLUDynamic"); + if (env.config.enableEarlyEltwiseReLUFusion) { + ADD_PASS(mergeEltwiseAndReLUDynamic); + ADD_DUMP_PASS("mergeEltwiseAndReLUDynamic"); + } // // Data layout adjustment diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_eltwise_and_relu.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_eltwise_and_relu.cpp index 43a8312f606783..21126f388f7b2d 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_eltwise_and_relu.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/merge_eltwise_and_relu.cpp @@ -9,6 +9,8 @@ #include +#include + namespace vpu { namespace { @@ -28,15 +30,18 @@ class PassImpl final : public Pass { }; void PassImpl::run(const Model& model) { - if (m_mode == MergeMode::DYNAMIC_NETWORK) { - VPU_PROFILE(mergeEltwiseAndReLUDynamic); - if (model->isStatic()) { - return; - } - } else if (m_mode == MergeMode::STATIC_NETWORK) { - VPU_PROFILE(mergeEltwiseAndReLUStatic); - if (model->isDynamic()) { - return; + const bool enableEarlyEltwiseReLUFusion = CompileEnv::get().config.enableEarlyEltwiseReLUFusion; + if (enableEarlyEltwiseReLUFusion) { + if (m_mode == MergeMode::DYNAMIC_NETWORK) { + VPU_PROFILE(mergeEltwiseAndReLUDynamic); + if (model->isStatic()) { + return; + } + } else if (m_mode == MergeMode::STATIC_NETWORK) { + VPU_PROFILE(mergeEltwiseAndReLUStatic); + if (model->isDynamic()) { + return; + } } } @@ -80,7 +85,8 @@ void PassImpl::run(const Model& model) { auto reluInput = reluStage->input(0); auto reluOutput = reluStage->output(0); - if (model->isDynamic() || reluInput->strides() == reluOutput->strides() || reluOutput->checkStrides(StridesRequirement::compact())) { + const auto stridesAreSupported = reluInput->strides() == reluOutput->strides() || reluOutput->checkStrides(StridesRequirement::compact()); + if ((enableEarlyEltwiseReLUFusion && (stridesAreSupported || model->isDynamic())) || (!enableEarlyEltwiseReLUFusion && stridesAreSupported)) { auto reluStageType = reluStage->type(); auto reluStageName = reluStage->name(); diff --git a/inference-engine/src/vpu/graph_transformer/src/parsed_config.cpp b/inference-engine/src/vpu/graph_transformer/src/parsed_config.cpp index 2c97b3af45f751..1f29bbe1cc9350 100644 --- a/inference-engine/src/vpu/graph_transformer/src/parsed_config.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/parsed_config.cpp @@ -68,6 +68,7 @@ IE_SUPPRESS_DEPRECATED_START ie::MYRIAD_FORCE_PURE_TENSOR_ITERATOR, ie::MYRIAD_DISABLE_CONVERT_STAGES, ie::MYRIAD_ENABLE_WEIGHTS_ANALYSIS, + ie::MYRIAD_ENABLE_EARLY_ELTWISE_RELU_FUSION, // // Debug options @@ -183,7 +184,8 @@ void ParsedConfig::parse(const std::map& config) { setOption(_compileConfig.enableTensorIteratorUnrolling, switches, config, ie::MYRIAD_ENABLE_TENSOR_ITERATOR_UNROLLING); setOption(_compileConfig.forcePureTensorIterator, switches, config, ie::MYRIAD_FORCE_PURE_TENSOR_ITERATOR); setOption(_compileConfig.disableConvertStages, switches, config, ie::MYRIAD_DISABLE_CONVERT_STAGES); - setOption(_compileConfig.enableWeightsAnalysis, switches, config, ie::MYRIAD_ENABLE_WEIGHTS_ANALYSIS); + setOption(_compileConfig.enableWeightsAnalysis, switches, config, ie::MYRIAD_ENABLE_WEIGHTS_ANALYSIS); + setOption(_compileConfig.enableEarlyEltwiseReLUFusion, switches, config, ie::MYRIAD_ENABLE_EARLY_ELTWISE_RELU_FUSION); setOption(_compileConfig.irWithVpuScalesDir, config, ie::MYRIAD_IR_WITH_SCALES_DIRECTORY); setOption(_compileConfig.noneLayers, config, ie::MYRIAD_NONE_LAYERS, parseStringSet); diff --git a/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt b/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt index 889f2e6e6669ae..c591ff89b13486 100644 --- a/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt +++ b/inference-engine/src/vpu/myriad_plugin/CMakeLists.txt @@ -48,7 +48,8 @@ ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) # install - -install(FILES ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius/mvnc/src/97-myriad-usbboot.rules +if (LINUX) + install(FILES ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius/mvnc/src/97-myriad-usbboot.rules DESTINATION ${IE_CPACK_IE_DIR}/external COMPONENT myriad) +endif() diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp index c7af80a78816c5..d1d2d7a334802e 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp @@ -33,7 +33,9 @@ using namespace InferenceEngine::VPUConfigParams; using namespace vpu::MyriadPlugin; -ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const ICNNNetwork& network, const std::map& config) { +ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl( + const CNNNetwork& network, + const std::map& config) { VPU_PROFILE(LoadExeNetworkImpl); auto parsedConfigCopy = _parsedConfig; @@ -66,7 +68,7 @@ Parameter Engine::GetConfig(const std::string& name, const std::map& config) const { VPU_PROFILE(QueryNetwork); QueryNetworkResult res; diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.h b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.h index 0a8a9501721bc8..7e7cb7f96b6e77 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.h +++ b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.h @@ -28,11 +28,11 @@ class Engine : public ie::InferencePluginInternal { void SetConfig(const std::map& config) override; ie::ExecutableNetworkInternal::Ptr LoadExeNetworkImpl( - const ie::ICNNNetwork& network, + const ie::CNNNetwork& network, const std::map& config) override; ie::QueryNetworkResult QueryNetwork( - const ie::ICNNNetwork& network, + const ie::CNNNetwork& network, const std::map& config) const override; using ie::InferencePluginInternal::ImportNetwork; diff --git a/inference-engine/tests/CMakeLists.txt b/inference-engine/tests/CMakeLists.txt index 69942e3db2f01e..6b63763c2f45d7 100644 --- a/inference-engine/tests/CMakeLists.txt +++ b/inference-engine/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Intel Corporation +# Copyright (C) 2019-2020 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -6,7 +6,7 @@ set(IE_TESTS_ROOT ${CMAKE_CURRENT_SOURCE_DIR}) enable_testing() -add_subdirectory(ngraph_functions) +add_subdirectory(ngraph_helpers) add_subdirectory(unit) if(ENABLE_FUNCTIONAL_TESTS) diff --git a/inference-engine/tests/functional/inference_engine/CMakeLists.txt b/inference-engine/tests/functional/inference_engine/CMakeLists.txt index 553992bd1bedff..6d61c3948db9a1 100644 --- a/inference-engine/tests/functional/inference_engine/CMakeLists.txt +++ b/inference-engine/tests/functional/inference_engine/CMakeLists.txt @@ -10,6 +10,7 @@ set(LINK_LIBRARIES gmock funcTestUtils ngraphFunctions + lptNgraphFunctions inference_engine_transformations openvino::itt openvino::conditional_compilation @@ -19,6 +20,7 @@ set(DEPENDENCIES inference_engine_ir_reader inference_engine_ir_v7_reader template_extension + lptNgraphFunctions ) if (NGRAPH_ONNX_IMPORT_ENABLE) @@ -148,6 +150,13 @@ function(ie_headers_compilation_with_custom_flags) # To include TBB headers as system set_ie_threading_interface_for(${target_name}) + # To avoid further TBB find_package action in next call of this function. Some version of TBB + # has an issue with cmake config which lead to fail in case of multiple call of find_package + # from one cmake script file. + set("TBB_FOUND" ${TBB_FOUND} PARENT_SCOPE) + set("TBB_IMPORTED_TARGETS" ${TBB_IMPORTED_TARGETS} PARENT_SCOPE) + set("TBB_VERSION" ${TBB_VERSION} PARENT_SCOPE) + set_target_properties(${target_name} PROPERTIES CXX_STANDARD ${IE_TEST_CXX_STANDARD} CXX_STANDARD_REQUIRED OFF) @@ -161,6 +170,10 @@ function(ie_headers_compilation_with_custom_flags) target_compile_definitions(${target_name} PRIVATE ${IE_TEST_DEFINITIONS}) endif() + if(WIN32) + set_target_properties(${target_name} PROPERTIES COMPILE_PDB_NAME ${target_name}) + endif() + add_dependencies(${TARGET_NAME} ${target_name}) endfunction() diff --git a/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp b/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp index 91c946cd97713b..7505324bb76d8b 100644 --- a/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp +++ b/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp @@ -44,8 +44,10 @@ SizeVector SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) { InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, SizeVector dimsVector, InferenceEngine::Layout layout) { InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout); switch (precision) { - case InferenceEngine::Precision::FP32 : + case InferenceEngine::Precision::FP32: return make_shared_blob(tensorDesc); + case InferenceEngine::Precision::FP64: + return make_shared_blob(tensorDesc); case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::I16: case InferenceEngine::Precision::Q78: @@ -115,8 +117,10 @@ void FillBlobRandom(Blob::Ptr& inputBlob) { void FillBlob(Blob::Ptr& inputBlob) { auto precision = inputBlob->getTensorDesc().getPrecision(); switch (precision) { - case InferenceEngine::Precision::FP32 : + case InferenceEngine::Precision::FP32: return FillBlobRandom(inputBlob); + case InferenceEngine::Precision::FP64: + return FillBlobRandom(inputBlob); case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::I16: case InferenceEngine::Precision::Q78: @@ -203,8 +207,10 @@ bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { switch (srcBlob->getTensorDesc().getPrecision()) { - case InferenceEngine::Precision::FP32 : + case InferenceEngine::Precision::FP32: return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); + case InferenceEngine::Precision::FP64: + return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::I16: case InferenceEngine::Precision::Q78: @@ -264,7 +270,7 @@ TEST_P(BlobCopyTest, BlobCopy) { std::cout << "Blob_copy execution time : " << std::chrono::duration_cast(finish - start).count() << " micros" << std::endl; - ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is't correct"; + ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is not correct"; } namespace { @@ -332,6 +338,8 @@ bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { switch (srcBlob->getTensorDesc().getPrecision()) { case InferenceEngine::Precision::FP32: return IsEqualBlobCopy_Impl(srcBlob, dstBlob); + case InferenceEngine::Precision::FP64: + return IsEqualBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::I16: case InferenceEngine::Precision::Q78: @@ -381,6 +389,8 @@ void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob switch (precision) { case InferenceEngine::Precision::FP32: return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); + case InferenceEngine::Precision::FP64: + return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); case InferenceEngine::Precision::FP16: case InferenceEngine::Precision::I16: case InferenceEngine::Precision::Q78: diff --git a/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp b/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp index 5046067fbc329e..64f10135d6a530 100644 --- a/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp @@ -17,6 +17,7 @@ TEST_F(PrecisionTests, ShowsCorrectPrecisionNames) { ASSERT_STREQ(Precision(Precision::U64).name(), "U64"); ASSERT_STREQ(Precision(Precision::FP16).name(), "FP16"); ASSERT_STREQ(Precision(Precision::FP32).name(), "FP32"); + ASSERT_STREQ(Precision(Precision::FP64).name(), "FP64"); ASSERT_STREQ(Precision(Precision::I16).name(), "I16"); ASSERT_STREQ(Precision(Precision::I32).name(), "I32"); ASSERT_STREQ(Precision(Precision::U32).name(), "U32"); @@ -35,6 +36,7 @@ TEST_F(PrecisionTests, sizeIsCorrect) { ASSERT_EQ(Precision(Precision::U64).size(), 8); ASSERT_EQ(Precision(Precision::FP16).size(), 2); ASSERT_EQ(Precision(Precision::FP32).size(), 4); + ASSERT_EQ(Precision(Precision::FP64).size(), 8); ASSERT_EQ(Precision(Precision::I32).size(), 4); ASSERT_EQ(Precision(Precision::U32).size(), 4); ASSERT_EQ(Precision(Precision::I16).size(), 2); @@ -50,6 +52,7 @@ TEST_F(PrecisionTests, sizeIsCorrect) { TEST_F(PrecisionTests, is_float) { ASSERT_TRUE(Precision(Precision::FP16).is_float()); ASSERT_TRUE(Precision(Precision::FP32).is_float()); + ASSERT_TRUE(Precision(Precision::FP64).is_float()); ASSERT_FALSE(Precision(Precision::I64).is_float()); ASSERT_FALSE(Precision(Precision::U64).is_float()); ASSERT_FALSE(Precision(Precision::I32).is_float()); @@ -70,6 +73,7 @@ TEST_F(PrecisionTests, constructFromSTR) { ASSERT_EQ(Precision(Precision::U64), Precision::FromStr("U64")); ASSERT_EQ(Precision(Precision::FP16), Precision::FromStr("FP16")); ASSERT_EQ(Precision(Precision::FP32), Precision::FromStr("FP32")); + ASSERT_EQ(Precision(Precision::FP64), Precision::FromStr("FP64")); ASSERT_EQ(Precision(Precision::I32), Precision::FromStr("I32")); ASSERT_EQ(Precision(Precision::U32), Precision::FromStr("U32")); ASSERT_EQ(Precision(Precision::I16), Precision::FromStr("I16")); diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp index 571bbd1535a35f..de7acf9f799ac8 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/add_transformation.cpp @@ -18,8 +18,8 @@ #include "simple_low_precision_transformer.hpp" #include -#include "ngraph_functions/low_precision_transformations/add_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/add_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/avg_pool_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/avg_pool_transformation.cpp index 06df64aea07e78..faf931bb9ac168 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/avg_pool_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/avg_pool_transformation.cpp @@ -17,7 +17,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/avg_pool_function.hpp" +#include "lpt_ngraph_functions/avg_pool_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/clamp_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/clamp_transformation.cpp index 9410b5df03fcb9..0530d526faf322 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/clamp_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/clamp_transformation.cpp @@ -12,8 +12,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/clamp_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/clamp_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_selection_with_intermediate_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_selection_with_intermediate_transformation.cpp index ce059281b224f7..6d8d75195ee2a0 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_selection_with_intermediate_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_selection_with_intermediate_transformation.cpp @@ -18,8 +18,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp index e9b15c9b8c0262..d30ff9bc603597 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_transformation.cpp @@ -17,8 +17,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_childs.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_childs.cpp index 0d5c137a0a6e9c..f7060401b70dfa 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_childs.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_different_precision_on_childs.cpp @@ -19,8 +19,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_transformation.cpp index d149c1ee14ecec..01d518c58f5fbb 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_transformation.cpp @@ -18,8 +18,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp index 197567fdeb4fed..2189be7ff716d3 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp @@ -19,8 +19,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_neighbors_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_neighbors_transformation.cpp index bf3e8ce76c249f..40424cefd8bc56 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_neighbors_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_neighbors_transformation.cpp @@ -17,8 +17,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_reshape_at_the_end_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_reshape_at_the_end_transformation.cpp index 9104e800af0220..80710523d70546 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_reshape_at_the_end_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_reshape_at_the_end_transformation.cpp @@ -19,8 +19,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_split_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_split_transformation.cpp index 4cdb0545aa11b6..324e59a0e886d4 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_split_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_split_transformation.cpp @@ -18,8 +18,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convert_mul_or_add_finally_transformation_with_dequantization.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convert_mul_or_add_finally_transformation_with_dequantization.cpp index 799e943fcaadb4..da40cf60e9aadd 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convert_mul_or_add_finally_transformation_with_dequantization.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convert_mul_or_add_finally_transformation_with_dequantization.cpp @@ -17,7 +17,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.hpp" +#include "lpt_ngraph_functions/convert_mul_or_add_finally_with_dequantization_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp index cd72f0e46035f1..ef8be7655b3ccd 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/convolution_function.hpp" +#include "lpt_ngraph_functions/convolution_function.hpp" using namespace testing; using namespace ngraph; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_with_incorrect_weights.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_with_incorrect_weights.cpp index 8101bf5b464f13..15e58bf3120266 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_with_incorrect_weights.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_with_incorrect_weights.cpp @@ -13,10 +13,10 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/convolution_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/convolution_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/depth_to_space_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/depth_to_space_transformation.cpp index f9b44c45edf8c3..464cae1db93cd7 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/depth_to_space_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/depth_to_space_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/depth_to_space_function.hpp" +#include "lpt_ngraph_functions/depth_to_space_function.hpp" using namespace ngraph::pass; using namespace ngraph::builder::subgraph; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/elementwise_with_multi_parent_dequantization_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/elementwise_with_multi_parent_dequantization_transformation.cpp index 4f6e394fb3106e..5d893fab6d2d7b 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/elementwise_with_multi_parent_dequantization_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/elementwise_with_multi_parent_dequantization_transformation.cpp @@ -18,8 +18,8 @@ #include "simple_low_precision_transformer.hpp" #include -#include "ngraph_functions/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 030f6e3245b049..32c5aa348dcfba 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -16,9 +16,9 @@ #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_precision_selection_transformation.cpp index 81a99049efc63c..05ffb0cc7ff5ff 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_precision_selection_transformation.cpp @@ -18,7 +18,7 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_precision_selection_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_transformation.cpp index b09fe27c0eabb6..0373723f1ff3cc 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_transformation.cpp @@ -15,7 +15,7 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fold_fake_quantize_in_transformations.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fold_fake_quantize_in_transformations.cpp index 67e5ea5d1b3c3b..4afe614a2f783d 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fold_fake_quantize_in_transformations.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fold_fake_quantize_in_transformations.cpp @@ -15,11 +15,11 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fold_fake_quantize_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/fold_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" using namespace testing; using namespace ngraph; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_convert_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_convert_transformation.cpp index 50050b00b1ceed..b09b623ed776e9 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_convert_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_convert_transformation.cpp @@ -14,9 +14,9 @@ #include "low_precision/fuse_convert.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_convert_function.hpp" +#include "lpt_ngraph_functions/fuse_convert_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_transformation.cpp index 65d4967a16f8e3..1b0fe309f8cef5 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_transformation.cpp @@ -14,12 +14,12 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_with_multi_inputs_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_with_multi_inputs_transformation.cpp index b24340223c717a..c6e086f710d208 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_with_multi_inputs_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_fake_quantize_with_multi_inputs_transformation.cpp @@ -14,11 +14,11 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_multiply_to_fake_quantize_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_multiply_to_fake_quantize_transformation.cpp index c91444d38ffc07..6809d5bfbb4dd3 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_multiply_to_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_multiply_to_fake_quantize_transformation.cpp @@ -10,11 +10,11 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_multiply_to_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_subtract_to_fake_quantize_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_subtract_to_fake_quantize_transformation.cpp index 606f9f0fe4ea25..19daeeea858ec9 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_subtract_to_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fuse_subtract_to_fake_quantize_transformation.cpp @@ -10,11 +10,11 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_subtract_to_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp" #include "simple_low_precision_transformer.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/get_dequantization_test.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/get_dequantization_test.cpp index f0b8a6716b96d3..d4d4eafced6f21 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/get_dequantization_test.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/get_dequantization_test.cpp @@ -9,10 +9,10 @@ #include #include -#include "ngraph_functions/low_precision_transformations/get_dequantization_function.hpp" +#include "lpt_ngraph_functions/get_dequantization_function.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "low_precision/network_helper.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp index c7b69bb11abb84..eaba214a633634 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp @@ -16,10 +16,10 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/group_convolution_function.hpp" +#include "lpt_ngraph_functions/group_convolution_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace testing; using namespace ngraph; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/interpolate_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/interpolate_transformation.cpp index fb14e833a90d73..9461ad58fc265c 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/interpolate_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/interpolate_transformation.cpp @@ -15,9 +15,9 @@ #include "low_precision/interpolate.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/interpolate_function.hpp" +#include "lpt_ngraph_functions/interpolate_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_transformation.cpp index 2272402e9c29aa..fa6cca4283f7b0 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_transformation.cpp @@ -16,10 +16,10 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_with_constant_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_with_constant_transformation.cpp index 88984670a74c6d..f8d4a6a6c59843 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_with_constant_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/mat_mul_with_constant_transformation.cpp @@ -15,10 +15,10 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/max_pool_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/max_pool_transformation.cpp index 7461644c00d256..6bba63856b33dc 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/max_pool_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/max_pool_transformation.cpp @@ -16,8 +16,8 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/max_pool_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/max_pool_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_transformation.cpp index ef84fe37e72503..aa5c2956a6ce0c 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_transformation.cpp @@ -16,8 +16,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/move_dequantization_after_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/move_dequantization_after_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_with_int_constant_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_with_int_constant_transformation.cpp index eab0935b4d32c6..d7cdca9904e009 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_with_int_constant_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/move_dequantization_after_with_int_constant_transformation.cpp @@ -16,8 +16,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/move_dequantization_after_with_int_constant_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/move_dequantization_after_with_int_constant_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/mul_add_to_scaleshift_or_power_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/mul_add_to_scaleshift_or_power_transformation.cpp index 5009a71c6e8344..59ea678921ed44 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/mul_add_to_scaleshift_or_power_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/mul_add_to_scaleshift_or_power_transformation.cpp @@ -16,9 +16,9 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/mul_add_to_scaleshift_or_power_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp" using namespace testing; using namespace ngraph; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_to_group_convolution_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_to_group_convolution_transformation.cpp index 4edeb4acdfc105..6cd142685478ff 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_to_group_convolution_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_to_group_convolution_transformation.cpp @@ -15,9 +15,9 @@ #include "low_precision/multiply_to_group_convolution.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/multiply_to_group_convolution_function.hpp" +#include "lpt_ngraph_functions/multiply_to_group_convolution_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp index 994b3fa4588413..095233dbcabb81 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp @@ -14,11 +14,11 @@ #include #include #include "low_precision/multiply.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/multiply_function.hpp" +#include "lpt_ngraph_functions/multiply_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/mvn_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/mvn_transformation.cpp index 5e2e73d0a1bee4..65c74418a190b5 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/mvn_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/mvn_transformation.cpp @@ -15,9 +15,9 @@ #include "low_precision/mvn.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/mvn_function.hpp" +#include "lpt_ngraph_functions/mvn_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/normalize_l2_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/normalize_l2_transformation.cpp index d1c6f0d2e4afcd..4648aa6e85d00b 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/normalize_l2_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/normalize_l2_transformation.cpp @@ -15,7 +15,7 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/normalize_l2_function.hpp" +#include "lpt_ngraph_functions/normalize_l2_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/prelu_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/prelu_transformation.cpp index ad24e183dde7be..2eb028233ae771 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/prelu_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/prelu_transformation.cpp @@ -15,8 +15,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/prelu_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/prelu_function.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/relu_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/relu_transformation.cpp index d7795bcf6a3462..a93f94d5f24151 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/relu_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/relu_transformation.cpp @@ -15,8 +15,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/relu_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/relu_function.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_fully_connected_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_fully_connected_transformation.cpp index dff66360e080a5..74b955f1c0d7e9 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_fully_connected_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_fully_connected_transformation.cpp @@ -14,7 +14,7 @@ #include #include "layer_transformation.hpp" #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/reshape_fully_connected_function.hpp" +#include "lpt_ngraph_functions/reshape_fully_connected_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp index b50aa2d2c598e2..9fe9c84d0c5d8a 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/reshape_transformation.cpp @@ -15,8 +15,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/reshape_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/reshape_function.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/round_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/round_transformation.cpp index eafa072942cad6..b451c9e98e0c56 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/round_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/round_transformation.cpp @@ -9,9 +9,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/round_function.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/round_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "low_precision/network_helper.hpp" diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/split_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/split_transformation.cpp index 77f4b38f39e6d8..e25e7ee506ffa1 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/split_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/split_transformation.cpp @@ -12,8 +12,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/split_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/split_function.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/squeeze_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/squeeze_transformation.cpp index 1d73ceb23adc40..9b9aacf1081d25 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/squeeze_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/squeeze_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/squeeze_function.hpp" +#include "lpt_ngraph_functions/squeeze_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/subtract_multiply_to_multiply_add_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/subtract_multiply_to_multiply_add_transformation.cpp index 92a27a97cf12aa..8dbe6fc4975176 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/subtract_multiply_to_multiply_add_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/subtract_multiply_to_multiply_add_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/subtract_multiply_to_multiply_add_function.hpp" +#include "lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/transformer_is_function_quantized.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/transformer_is_function_quantized.cpp index 163f892dae5de4..768ed9eb7f1753 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/transformer_is_function_quantized.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/transformer_is_function_quantized.cpp @@ -16,9 +16,9 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/convolution_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/convolution_function.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/transpose_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/transpose_transformation.cpp index 0a4dd72846e87f..0ee715d736012e 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/transpose_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/transpose_transformation.cpp @@ -15,8 +15,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/transpose_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/transpose_function.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/unsqueeze_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/unsqueeze_transformation.cpp index ec11d0e8cf8fe9..70126f8e0f1ff6 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/unsqueeze_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/unsqueeze_transformation.cpp @@ -16,7 +16,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "simple_low_precision_transformer.hpp" -#include "ngraph_functions/low_precision_transformations/unsqueeze_function.hpp" +#include "lpt_ngraph_functions/unsqueeze_function.hpp" using namespace testing; using namespace ngraph::pass; diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/variadic_split_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/variadic_split_transformation.cpp index a29252e30c3c69..cc686f5320b8bc 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/variadic_split_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/variadic_split_transformation.cpp @@ -12,8 +12,8 @@ #include #include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/variadic_split_function.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/variadic_split_function.hpp" #include "simple_low_precision_transformer.hpp" namespace { diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/convert_like_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/convert_like_tests.cpp new file mode 100644 index 00000000000000..f9fbee13e13f6e --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/convert_like_tests.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_reader_tests.hpp" + +TEST_F(NGraphReaderTests, ReadConvertLikeNetwork) { + std::string model = R"V0G0N( + + + + + + + 256 + 56 + + + + + + + + 256 + 56 + + + + + + + 256 + 56 + + + 256 + 56 + + + + + 256 + 56 + + + + + + + 256 + 56 + + + + + + + + + + +)V0G0N"; + std::string modelV7 = R"V0G0N( + + + + + + 256 + 56 + + + + + + + 256 + 56 + + + + + + + 256 + 56 + + + 256 + 56 + + + + + 256 + 56 + + + + + + + + + +)V0G0N"; + compareIRs(model, modelV7); +} diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/elu_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/elu_tests.cpp index d4bf29826b31a1..5187d7d4f590d8 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reader/elu_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/elu_tests.cpp @@ -19,7 +19,7 @@ TEST_F(NGraphReaderTests, ReadELUNetwork) { - + diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/matmul_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/matmul_tests.cpp index cb7388d951d768..8b396a4b5c421c 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reader/matmul_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/matmul_tests.cpp @@ -381,7 +381,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork4) { - + 2048 @@ -529,7 +529,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork5) { - + 1 diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/mod_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/mod_tests.cpp new file mode 100644 index 00000000000000..52d090a7b5da9c --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/mod_tests.cpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_reader_tests.hpp" +#include +#include + + +TEST_F(NGraphReaderTests, ReadModNetwork) { + std::string modelV10 = R"V0G0N( + + + + + + + 256 + 56 + + + + + + + + 256 + 56 + + + + + + + 256 + 56 + + + 256 + 56 + + + + + 256 + 56 + + + + + + + 256 + 56 + + + + + + + + + + +)V0G0N"; + Core ie; + Blob::Ptr weights; + std::shared_ptr f_ref{nullptr}; + + auto data_A = std::make_shared(ngraph::element::f32, ngraph::Shape{256, 56}); + auto data_B = std::make_shared(ngraph::element::f32, ngraph::Shape{256, 56}); + auto mod = std::make_shared(data_A, data_B); + f_ref = std::make_shared(ngraph::NodeVector{mod}, ngraph::ParameterVector{data_A, data_B}); + + auto network = ie.ReadNetwork(modelV10, weights); + auto f = network.getFunction(); + + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; +} diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp index 5d1defd2932aae..8cc114c4594676 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp @@ -30,7 +30,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { std::shared_ptr createGraph(InferenceEngine::Precision netPrecision) override { // + Power1(FP32) // | - // + AvgPooling1(FP32) + // + AvgPooling1(BF16) // | // + Convolution1(BF16) // | @@ -45,7 +45,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { // | / // ReLU3 (Fused to Conv2) / // | / - // MaxPooling1 (FP32) / + // MaxPooling1 (BF16) / // \ / // Eltwise // | @@ -180,7 +180,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters expectedPrecisions["Power1"] = "FP32"; - expectedPrecisions["AvgPooling1"] = "FP32"; + expectedPrecisions["AvgPooling1"] = "BF16"; expectedPrecisions["Convolution1"] = "BF16"; expectedPrecisions["ReLU1"] = "ndef"; expectedPrecisions["Convolution2"] = "BF16"; @@ -189,7 +189,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { expectedPrecisions["Norm1"] = "FP32"; expectedPrecisions["Eltwise1"] = "ndef"; expectedPrecisions["ReLU3"] = "ndef"; - expectedPrecisions["maxPooling1"] = "FP32"; + expectedPrecisions["maxPooling1"] = "BF16"; expectedPrecisions["Eltwise2"] = "FP32"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp index d77fb09f044efc..cc74eb684edb3b 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp @@ -131,7 +131,7 @@ class Concat_in_place : public BasicBF16Test { expectedPrecisions["ADD_1"] = "FP32"; expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; - expectedPrecisions["CONC_1_TEST"] = "FP32"; + expectedPrecisions["CONC_1_TEST"] = "BF16"; expectedPrecisions["RELU_1"] = "FP32"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp index 4dde0eaf889756..20131cb1720f4f 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp @@ -32,7 +32,7 @@ class ConvReLUPoolConvReLUPool : public BasicBF16Test { // | // ReLU1 (Fused) // | - // Pooling1 (FP32) + // Pooling1 (BF16) // | // Convolution2 (BF16) // | @@ -164,7 +164,7 @@ class ConvReLUPoolConvReLUPool : public BasicBF16Test { // performance counters expectedPrecisions["Convolution_1"] = "FP32"; expectedPrecisions["ReLU_1"] = "ndef"; - expectedPrecisions["AvgPool_1"] = "FP32"; + expectedPrecisions["AvgPool_1"] = "BF16"; expectedPrecisions["Convolution_2"] = "BF16"; expectedPrecisions["ReLU_2"] = "ndef"; expectedPrecisions["MaxPool_2"] = "BF16"; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp index 03185914a47577..2f29cb0a6c1ea3 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp @@ -37,7 +37,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test { // \ / / // Mul(FP32) ReLU(FP32) // \ / -// Concat(FP32) Const +// Concat(BF16) Const // \ / // Matmul(BF16) @@ -116,7 +116,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE2: set up safe threshold <= 5% from maximum value of output tensor - threshold = 170.02f; // Max in fp32 network by output: 3887.11 + threshold = 177.f; // Max in fp32 network by output: 3887.11 // STAGE3: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in @@ -125,7 +125,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test { expectedPrecisions["Mul_1"] = "FP32"; expectedPrecisions["Add_1"] = "FP32"; expectedPrecisions["Relu_1"] = "FP32"; - expectedPrecisions["Conc_1"] = "FP32"; + expectedPrecisions["Conc_1"] = "BF16"; expectedPrecisions["Matmul_1"] = "BF16"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp index 4855ca390151d2..aca7bd6eec27c4 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp @@ -24,7 +24,7 @@ class MobileNet_ssd_with_branching : public BasicBF16Test { // | // Conv1 (FP32) // | \ - // Conv2 (FP32 so far while we have not greedy mode. This must be fixed. Such pattern shouild have Conv2 in BF16) + // Conv2 (BF16) \ // | | // relu(fused) | // | Normalize (not LRN) @@ -145,18 +145,18 @@ class MobileNet_ssd_with_branching : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE1: - threshold = 0.8f; // max value in latest tensor is 87.67 + threshold = 0.85f; // max value in latest tensor is 87.67 // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters expectedPrecisions["ADD_1"] = "FP32"; expectedPrecisions["CONV_1"] = "BF16"; - expectedPrecisions["CONV_2"] = "FP32"; + expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["RELU_2"] = "ndef"; expectedPrecisions["DW_CONV"] = "BF16"; expectedPrecisions["RELU_DW"] = "ndef"; expectedPrecisions["NORM_1"] = "FP32"; - expectedPrecisions["CONC_1"] = "FP32"; + expectedPrecisions["CONC_1"] = "BF16"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp index cff8ce820f8d4f..d1bfeb0de6f999 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp @@ -93,7 +93,7 @@ class ScaleshiftConvRelu : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE1: - threshold = 5e-2; + threshold = 7e-2; // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp index f8e5ae16c5da63..b94f24111d2abc 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp @@ -117,7 +117,7 @@ class ScaleshiftConv_x2_ConcatRelu : public BasicBF16Test { expectedPrecisions["ADD_1"] = "FP32"; expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; - expectedPrecisions["CONC_1"] = "FP32"; + expectedPrecisions["CONC_1"] = "BF16"; expectedPrecisions["RELU_1"] = "FP32"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp index 35cace53067492..a3a45a3e09c6d6 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp @@ -142,7 +142,7 @@ class Scaleshift_x3_ConvEltwiseRelu : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE1: - threshold = 2e-1; + threshold = 5e-1; // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index 00443cdb4aed3f..c567163d5d20c4 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index a45618d0c9c999..627eba3b30f896 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index 55dc16804456e9..314391f2bd3f50 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_precision_selection_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp index 1ef54f8a23e77c..6cbc68d5dd538c 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index d610ba6b30e91f..0e0e430248bf16 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" using namespace LayerTestsDefinitions; using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/plugin_config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/plugin_config.cpp index 53e2dd7baa34de..4ad085c318fa70 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/plugin_config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/plugin_config.cpp @@ -5,4 +5,9 @@ #include "functional_test_utils/plugin_config.hpp" void PreparePluginConfiguration(LayerTestsUtils::LayerTestsCommon* test) { + // Within the test scope we don't need any implicit bf16 optimisations, so let's run the network as is. + auto& configuration = test->GetConfiguration(); + if (!configuration.count(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16)) { + configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}); + } } diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/pooling.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/pooling.cpp index 87a0f1f45287a0..98bb97e8487089 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/pooling.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/pooling.cpp @@ -25,12 +25,18 @@ const std::vector netPrecisions = { const std::vector> kernels = {{3, 3}, {3, 5}}; +const std::vector> kernel3D = {{2, 2, 2}}; + const std::vector> strides = {{1, 1}, {1, 2}}; +const std::vector> strides3D = {{1, 1, 1}, + {2, 2, 2}}; const std::vector> padBegins = {{0, 0}, {0, 2}}; +const std::vector> padBegins3D = {{0, 0, 0}}; const std::vector> padEnds = {{0, 0}, {0, 2}}; +const std::vector> padEnds3D = {{0, 0, 0}}; const std::vector roundingTypes = {ngraph::op::RoundingType::CEIL, ngraph::op::RoundingType::FLOOR}; ////* ========== Max Polling ========== */ @@ -46,7 +52,7 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine( ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); -INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTest, +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRounding, PoolingLayerTest, ::testing::Combine( maxPool_ExplicitPad_FloorRounding_Params, ::testing::ValuesIn(netPrecisions), @@ -58,6 +64,126 @@ INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTes ::testing::Values(CommonTestUtils::DEVICE_CPU)), PoolingLayerTest::getTestCaseName); +/* +========== Same Upper Pad Floor Rounding ========== */ +const auto maxPool_SameUpperPad_FloorRounding_Params = ::testing::Combine( + ::testing::Values(ngraph::helpers::PoolingTypes::MAX), + ::testing::ValuesIn(kernels), + ::testing::ValuesIn(strides), + ::testing::ValuesIn(padBegins), + ::testing::ValuesIn(padEnds), + ::testing::Values(ngraph::op::RoundingType::FLOOR), + ::testing::Values(ngraph::op::PadType::SAME_UPPER), + ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling +); + +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameUpperPad_FloorRounding, PoolingLayerTest, + ::testing::Combine( + maxPool_SameUpperPad_FloorRounding_Params, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({1, 3, 30, 30})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + PoolingLayerTest::getTestCaseName); + +/* +========== Same Lower Pad Floor Rounding ========== */ +const auto maxPool_SameLowerPad_FloorRounding_Params = ::testing::Combine( + ::testing::Values(ngraph::helpers::PoolingTypes::MAX), + ::testing::ValuesIn(kernels), + ::testing::ValuesIn(strides), + ::testing::ValuesIn(padBegins), + ::testing::ValuesIn(padEnds), + ::testing::Values(ngraph::op::RoundingType::FLOOR), + ::testing::Values(ngraph::op::PadType::SAME_LOWER), + ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling +); + +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameLowerPad_FloorRounding, PoolingLayerTest, + ::testing::Combine( + maxPool_SameUpperPad_FloorRounding_Params, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({1, 3, 30, 30})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + PoolingLayerTest::getTestCaseName); + +/* ========== Explicit Pad Floor Rounding 5D input========== */ +const auto maxPool_ExplicitPad_FloorRounding_5Dinput_Params = ::testing::Combine( + ::testing::Values(ngraph::helpers::PoolingTypes::MAX), + ::testing::ValuesIn(kernel3D), + ::testing::ValuesIn(strides3D), + ::testing::ValuesIn(padBegins3D), + ::testing::ValuesIn(padEnds3D), + ::testing::Values(ngraph::op::RoundingType::FLOOR), + ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling +); + +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRounding_5Dinput, PoolingLayerTest, + ::testing::Combine( + maxPool_ExplicitPad_FloorRounding_5Dinput_Params, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({32, 32, 2, 2, 2})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + PoolingLayerTest::getTestCaseName); + +/* ========== Same Upper Pad Floor Rounding 5D input========== */ +const auto maxPool_SameUpperPad_FloorRounding_5Dinput_Params = ::testing::Combine( + ::testing::Values(ngraph::helpers::PoolingTypes::MAX), + ::testing::ValuesIn(kernel3D), + ::testing::ValuesIn(strides3D), + ::testing::ValuesIn(padBegins3D), + ::testing::ValuesIn(padEnds3D), + ::testing::Values(ngraph::op::RoundingType::FLOOR), + ::testing::Values(ngraph::op::PadType::SAME_UPPER), + ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling +); + +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameUpperPad_FloorRounding_5Dinput, PoolingLayerTest, + ::testing::Combine( + maxPool_SameUpperPad_FloorRounding_5Dinput_Params, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({32, 32, 2, 2, 2})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + PoolingLayerTest::getTestCaseName); + +/* ========== Same Lower Pad Ceil Rounding 5D input========== */ +const auto maxPool_SameLowerPad_CeilRounding_5Dinput_Params = ::testing::Combine( + ::testing::Values(ngraph::helpers::PoolingTypes::MAX), + ::testing::ValuesIn(kernel3D), + ::testing::ValuesIn(strides3D), + ::testing::ValuesIn(padBegins3D), + ::testing::ValuesIn(padEnds3D), + ::testing::Values(ngraph::op::RoundingType::CEIL), + ::testing::Values(ngraph::op::PadType::SAME_LOWER), + ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling +); + +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameLowerPad_CeilRounding_5Dinput, PoolingLayerTest, + ::testing::Combine( + maxPool_SameUpperPad_FloorRounding_5Dinput_Params, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({32, 32, 2, 2, 2})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + PoolingLayerTest::getTestCaseName); + /* ========== Explicit Pad Ceil Rounding ========== */ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine( ::testing::Values(ngraph::helpers::PoolingTypes::MAX), @@ -70,7 +196,7 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine( ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); -INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_CeilRpunding, PoolingLayerTest, +INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_CeilRounding, PoolingLayerTest, ::testing::Combine( maxPool_ExplicitPad_CeilRounding_Params, ::testing::ValuesIn(netPrecisions), diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/roi_pooling.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/roi_pooling.cpp new file mode 100644 index 00000000000000..bc244cf5b571ca --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/roi_pooling.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/roi_pooling.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +const std::vector> inShapes = { + {1, 3, 8, 8}, + {3, 4, 50, 50} +}; + +const std::vector> pooledShapes_max = { + {1, 1}, + {2, 2}, + {3, 3}, + {6, 6} +}; + +const std::vector> pooledShapes_bilinear = { + {2, 2}, + {3, 3}, + {6, 6} +}; + +const std::vector> coordShapes = { + {1, 5}, + {3, 5}, + {5, 5} +}; + +const std::vector netPRCs = { + InferenceEngine::Precision::FP16, + InferenceEngine::Precision::FP32 +}; + +const std::vector spatial_scales = {0.625f, 1.f}; + +const auto test_ROIPooling_max = ::testing::Combine( + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(coordShapes), + ::testing::ValuesIn(pooledShapes_max), + ::testing::ValuesIn(spatial_scales), + ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX), + ::testing::ValuesIn(netPRCs), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const auto test_ROIPooling_bilinear = ::testing::Combine( + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(coordShapes), + ::testing::ValuesIn(pooledShapes_bilinear), + ::testing::Values(spatial_scales[1]), + ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), + ::testing::ValuesIn(netPRCs), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P(smoke_TestsROIPooling_max, ROIPoolingLayerTest, test_ROIPooling_max, ROIPoolingLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsROIPooling_bilinear, ROIPoolingLayerTest, test_ROIPooling_bilinear, ROIPoolingLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp index cb79f7c7f1555c..2ba662994afc30 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp @@ -5,10 +5,11 @@ #include #include +#include #include "functional_test_utils/skip_tests_config.hpp" std::vector disabledTestPatterns() { - return { + std::vector retVector{ // TODO: Issue 26264 R"(.*(MaxPool|AvgPool).*S\(1\.2\).*Rounding=ceil.*)", // TODO: Issue 31841 @@ -58,4 +59,12 @@ std::vector disabledTestPatterns() { // TODO: Issue 43417 sporadic issue, looks like an issue in test, reproducible only on Windows platform R"(.*decomposition1_batch=5_hidden_size=10_input_size=30_.*tanh.relu.*_clip=0_linear_before_reset=1.*_targetDevice=CPU_.*)", }; + + if (!InferenceEngine::with_cpu_x86_bfloat16()) { + // on platforms which do not support bfloat16, we are disabling bf16 tests since there are no bf16 primitives, + // tests are useless on such platforms + retVector.emplace_back(R"(.*BF16.*)"); + } + + return retVector; } diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/activation.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/activation.cpp new file mode 100644 index 00000000000000..975f790a4fa2db --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/activation.cpp @@ -0,0 +1,143 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ngraph::helpers; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + LayerTestsDefinitions::activationParams, + CPUSpecificParams> + ActivationLayerCPUTestParamSet; + +class ActivationLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + ActivationTypes activationType; + static std::string getTestCaseName(const testing::TestParamInfo &obj) { + LayerTestsDefinitions::activationParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = obj.param; + + std::ostringstream result; + result << LayerTestsDefinitions::ActivationLayerTest::getTestCaseName(testing::TestParamInfo( + basicParamsSet, 0)); + + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } + InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 15, 0, 32768); + } + +protected: + void SetUp() override { + LayerTestsDefinitions::activationParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = this->GetParam(); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + InferenceEngine::Precision netPrecision; + std::pair, std::vector> shapes; + std::pair> activationDecl; + std::tie(activationDecl, netPrecision, inPrc, outPrc, inLayout, outLayout, shapes, targetDevice) = basicParamsSet; + selectedType = getPrimitiveType() + "_" + inPrc.name(); + + activationType = activationDecl.first; + auto constantsValue = activationDecl.second; + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {shapes.first}); + auto activation = ngraph::builder::makeActivation(params[0], ngPrc, activationType, shapes.second, constantsValue); + activation->get_rt_info() = getCPUInfo(); + function = std::make_shared(ngraph::NodeVector{activation}, params, "Activation"); + } +}; + +TEST_P(ActivationLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "Eltwise"); +} + + +namespace { +// list only types supported by eltwise +const std::map>> activationTypes = { + {Sqrt, {{}}}, + {Sigmoid, {{}}}, + {Tanh, {{}}}, + {Relu, {{}}}, + {Gelu, {{}}}, + {Exp, {{}}}, + {Clamp, {{-2.0f, 2.0f}}}, + {Elu, {{0.1f}}}, + {Swish, {{0.1f}}}, + {HSwish, {{}}}, + {Mish, {{}}}, + {PReLu, {{-0.01f}}} +}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {}) +}; + +std::map, std::vector>> basic4D = { + {{2, 4, 4, 1}, {{}}}, + {{2, 17, 5, 4}, {{}}}, +}; + +std::vector bf16InpOutPrc = {Precision::BF16, Precision::FP32}; + +const auto basicCases4D = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes)), + ::testing::Values(Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(CommonTestUtils::combineParams(basic4D)), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)) +); + +INSTANTIATE_TEST_CASE_P(smoke_Activation4D_Eltwise_CPU_BF16, ActivationLayerCPUTest, basicCases4D, ActivationLayerCPUTest::getTestCaseName); + +std::vector cpuParams_5D = { + CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), + CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) +}; + +std::map, std::vector>> basic5D = { + {{2, 4, 3, 4, 1}, {{}}}, + {{2, 17, 7, 5, 4}, {{}}}, +}; + +const auto basicCases5D = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes)), + ::testing::Values(Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::ValuesIn(CommonTestUtils::combineParams(basic5D)), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)) +); + +INSTANTIATE_TEST_CASE_P(smoke_Activation5D_Eltwise_CPU_BF16, ActivationLayerCPUTest, basicCases5D, ActivationLayerCPUTest::getTestCaseName); +} // namespace +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/convert.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/convert.cpp new file mode 100644 index 00000000000000..89159652129173 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/convert.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +using namespace LayerTestsDefinitions; +using namespace InferenceEngine; + +namespace CPULayerTestsDefinitions { + +class ConvertCPULayerTest : public ConvertLayerTest {}; + +TEST_P(ConvertCPULayerTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + ConvertParamsTuple params = GetParam(); + inPrc = std::get<1>(params); + outPrc = std::get<2>(params); + + Run(); +} + +namespace { +const std::vector> inShape = {{1, 2, 3, 4}}; + +// List of precisions natively supported by mkldnn. +const std::vector precisions = { + Precision::U8, + Precision::I8, + Precision::I16, + Precision::I32, + Precision::FP32, + Precision::BF16 +}; + +INSTANTIATE_TEST_CASE_P(smoke_ConvertLayerTest_From_BF16, ConvertCPULayerTest, + ::testing::Combine( + ::testing::Values(inShape), + ::testing::Values(Precision::BF16), + ::testing::ValuesIn(precisions), + ::testing::Values(Layout::ANY), + ::testing::Values(Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvertLayerTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_ConvertLayerTest_To_BF16, ConvertCPULayerTest, + ::testing::Combine( + ::testing::Values(inShape), + ::testing::ValuesIn(precisions), + ::testing::Values(Precision::BF16), + ::testing::Values(Layout::ANY), + ::testing::Values(Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvertLayerTest::getTestCaseName); +} // namespace +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/crop.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/crop.cpp new file mode 100644 index 00000000000000..920ca6fba150c8 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/crop.cpp @@ -0,0 +1,176 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +// Given that the ngraph opset does not contain crop operation, we use the StridedSlice operation instead, since it is mapped to the Crop node if certain +// conditions are met. + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace LayerTestsDefinitions; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + StridedSliceSpecificParams, + InferenceEngine::Precision, // Net precision + std::string, // Device name + std::map, // Additional network configuration + CPUSpecificParams> CropLayerCPUTestParamSet; + +class CropLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + StridedSliceSpecificParams params; + InferenceEngine::Precision netPrc; + std::string targetName; + std::map additionalConfig; + CPUSpecificParams cpuParams; + std::tie(params, netPrc, targetName, additionalConfig, cpuParams) = obj.param; + + std::ostringstream result; + result << "inShape=" << CommonTestUtils::vec2str(params.inputShape) << "_"; + result << "netPRC=" << netPrc.name() << "_"; + result << "begin=" << CommonTestUtils::vec2str(params.begin) << "_"; + result << "end=" << CommonTestUtils::vec2str(params.end) << "_"; + result << "stride=" << CommonTestUtils::vec2str(params.strides) << "_"; + result << "begin_m=" << CommonTestUtils::vec2str(params.beginMask) << "_"; + result << "end_m=" << CommonTestUtils::vec2str(params.endMask) << "_"; + if (!params.newAxisMask.empty()) { + result << "new_axis_m=" << (params.newAxisMask.empty() ? "def" : CommonTestUtils::vec2str(params.newAxisMask)) << "_"; + } + if (!params.shrinkAxisMask.empty()) { + result << "shrink_m=" << (params.shrinkAxisMask.empty() ? "def" : CommonTestUtils::vec2str(params.shrinkAxisMask)) << "_"; + } + if (!params.ellipsisAxisMask.empty()) { + result << "ellipsis_m=" << (params.ellipsisAxisMask.empty() ? "def" : CommonTestUtils::vec2str(params.ellipsisAxisMask)) << "_"; + } + result << "trgDev=" << targetName; + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } +protected: + void SetUp() override { + StridedSliceSpecificParams ssParams; + InferenceEngine::Precision netPrecision; + std::map additionalConfig; + CPUSpecificParams cpuParams; + std::tie(ssParams, netPrecision, targetDevice, additionalConfig, cpuParams) = this->GetParam(); + inPrc = outPrc = netPrecision; // because crop does not convert Precisions, but only moves the data + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + configuration.insert(additionalConfig.begin(), additionalConfig.end()); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {ssParams.inputShape}); + auto paramOuts = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + auto ss = ngraph::builder::makeStridedSlice(paramOuts[0], ssParams.begin, ssParams.end, ssParams.strides, ngPrc, ssParams.beginMask, + ssParams.endMask, ssParams.newAxisMask, ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); + + selectedType = std::string("unknown_") + inPrc.name(); + + ss->get_rt_info() = getCPUInfo(); + + ngraph::ResultVector results{std::make_shared(ss)}; + function = std::make_shared(results, params, "StridedSlice"); + } +}; + +TEST_P(CropLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "Crop"); +} + +namespace { +const std::map additional_config; + +const std::vector netPrc = {Precision::BF16, Precision::FP32}; + +const std::vector testCasesPlain2D = {StridedSliceSpecificParams{ { 32, 32 }, { 0, 20 }, { 32, 30 }, { 1, 1 }, + { 0, 0 }, { 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 32, 20 }, { 2, 10 }, { 32, 20 }, { 1, 1 }, + { 0, 0 }, { 0, 0 }, { }, { }, { } } }; + +const auto CropParamsPlain2D = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain2D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(emptyCPUSpec)); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_Plain_2D, CropLayerCPUTest, CropParamsPlain2D, CropLayerCPUTest::getTestCaseName); + +const std::vector testCasesPlain4D = { + StridedSliceSpecificParams{ { 1, 5, 32, 32 }, { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 1, 5, 32, 32 }, { 0, 0, 20, 20 }, { 1, 5, 25, 25 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 1, 5, 32, 32 }, { 0, 0, 0, 20 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 1, 5, 32, 20 }, { 0, 0, 2, 10 }, { 1, 5, 32, 20 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } } +}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {}) +}; + +const auto CropParamsPlain4D = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain4D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(cpuParams_4D.at(1))); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_Plain_4D, CropLayerCPUTest, CropParamsPlain4D, CropLayerCPUTest::getTestCaseName); + +const std::vector testCasesBlocked4D = { + StridedSliceSpecificParams{ { 1, 16, 32, 32 }, { 0, 0, 20, 20 }, { 1, 16, 25, 25 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 1, 32, 32, 32 }, { 0, 0, 0, 20 }, { 1, 16, 32, 30 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, +}; + +const auto CropParamsBlocked4D = ::testing::Combine( + ::testing::ValuesIn(testCasesBlocked4D), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::Values(filterCPUSpecificParams(cpuParams_4D).front())); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_Blocked_4D, CropLayerCPUTest, CropParamsBlocked4D, CropLayerCPUTest::getTestCaseName); + +const std::vector testCasesPlain4DynBatch = { + StridedSliceSpecificParams{ { 10, 5, 32, 32 }, { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 10, 5, 32, 32 }, { 0, 0, 20, 20 }, { 1, 5, 25, 25 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 10, 5, 32, 32 }, { 0, 0, 0, 20 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceSpecificParams{ { 10, 5, 32, 20 }, { 0, 0, 2, 10 }, { 1, 5, 32, 20 }, { 1, 1, 1, 1 }, + { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } } +}; + +std::map additional_config_dyn_batch = {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}, + {PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::YES}}; + +const auto CropParamsPlain4DynBatch = ::testing::Combine( + ::testing::ValuesIn(testCasesPlain4DynBatch), + ::testing::ValuesIn(netPrc), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config_dyn_batch), + ::testing::Values(cpuParams_4D.at(1))); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_Blocked_4DynBatch, CropLayerCPUTest, CropParamsPlain4DynBatch, CropLayerCPUTest::getTestCaseName); +} // namespace +} // namespace CPULayerTestsDefinitions + diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp index 7d371b4b1a15ef..b968545b7dfb25 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp @@ -47,17 +47,7 @@ class EltwiseLayerCPUTest : public testing::WithParamInterface inputShape1, inputShape2; if (inputShapes.size() == 1) { @@ -90,12 +80,7 @@ class EltwiseLayerCPUTest : public testing::WithParamInterface data(ngraph::shape_size(shape_input_secondary)); - data = NGraphFunctions::Utils::generateVector(ngraph::shape_size(shape_input_secondary)); - for (float &i : data) { - if (i == 0) { - i = 1; - } - } + data = NGraphFunctions::Utils::generateVector(ngraph::shape_size(shape_input_secondary), 10, 2); secondaryInput = ngraph::builder::makeConstant(ngPrc, shape_input_secondary, data); } else { secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shape_input_secondary); @@ -105,7 +90,7 @@ class EltwiseLayerCPUTest : public testing::WithParamInterfaceget_rt_info() = CPUTestsBase::setCPUInfo(inFmts, outFmts, priority); + eltwise->get_rt_info() = getCPUInfo(); function = std::make_shared(eltwise, input, "Eltwise"); } }; @@ -114,7 +99,7 @@ TEST_P(EltwiseLayerCPUTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED() Run(); - CheckCPUImpl(executableNetwork, "Eltwise", inFmts, outFmts, selectedType); + CheckCPUImpl(executableNetwork, "Eltwise"); } namespace { @@ -128,7 +113,7 @@ std::vector opTypes = { CommonTestUtils::OpType::VECTOR, }; -std::vector eltwiseOpTypes = { +std::vector eltwiseOpTypesBinInp = { ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY, // TODO: Disabled because memory formats filter is not propogated through ngraph transformations @@ -138,27 +123,15 @@ std::vector eltwiseOpTypes = { ngraph::helpers::EltwiseTypes::SQUARED_DIFF, }; -std::map additional_config = {}; +std::vector eltwiseOpTypesDiffInp = { // Different number of input nodes depending on optimizations + ngraph::helpers::EltwiseTypes::POWER, + // ngraph::helpers::EltwiseTypes::MOD // Does not execute because of transformations +}; -std::vector filterCPUSpecificParams(std::vector& paramsVector) { - auto adjustBlockedFormatByIsa = [](std::vector& formats) { - for (int i = 0; i < formats.size(); i++) { - if (formats[i] == nChw16c) - formats[i] = nChw8c; - if (formats[i] == nCdhw16c) - formats[i] = nCdhw8c; - } - }; +std::map additional_config; - if (!with_cpu_x86_avx512f()) { - for (auto& param : paramsVector) { - adjustBlockedFormatByIsa(std::get<0>(param)); - adjustBlockedFormatByIsa(std::get<1>(param)); - } - } +std::vector bf16InpOutPrc = {Precision::BF16, Precision::FP32}; - return paramsVector; -} std::vector>> inShapes_4D = { {{2, 4, 4, 1}}, @@ -176,19 +149,50 @@ std::vector cpuParams_4D = { const auto params_4D_FP32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_4D), - ::testing::ValuesIn(eltwiseOpTypes), + ::testing::ValuesIn(eltwiseOpTypesBinInp), ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))); -INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_4D_FP32, EltwiseLayerCPUTest, params_4D_FP32, EltwiseLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_4D_FP32_MemOrder, EltwiseLayerCPUTest, params_4D_FP32, EltwiseLayerCPUTest::getTestCaseName); + +const auto params_4D_BF16 = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inShapes_4D), + ::testing::ValuesIn(eltwiseOpTypesBinInp), + ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(opTypes), + ::testing::Values(InferenceEngine::Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_4D_BF16_MemOrder, EltwiseLayerCPUTest, params_4D_BF16, EltwiseLayerCPUTest::getTestCaseName); + +const auto params_4D_BF16_emptyCPUSpec = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inShapes_4D), + ::testing::ValuesIn(eltwiseOpTypesDiffInp), + ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(opTypes), + ::testing::Values(InferenceEngine::Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::Values(emptyCPUSpec)); +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_4D_BF16, EltwiseLayerCPUTest, params_4D_BF16_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); std::vector>> inShapes_5D = { {{2, 4, 3, 4, 1}}, @@ -206,19 +210,50 @@ std::vector cpuParams_5D = { const auto params_5D_FP32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_5D), - ::testing::ValuesIn(eltwiseOpTypes), + ::testing::ValuesIn(eltwiseOpTypesBinInp), ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D))); -INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_5D_FP32, EltwiseLayerCPUTest, params_5D_FP32, EltwiseLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_5D_FP32_MemOrder, EltwiseLayerCPUTest, params_5D_FP32, EltwiseLayerCPUTest::getTestCaseName); + +const auto params_5D_BF16 = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inShapes_5D), + ::testing::ValuesIn(eltwiseOpTypesBinInp), + ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(opTypes), + ::testing::Values(InferenceEngine::Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D))); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_5D_BF16_MemOrder, EltwiseLayerCPUTest, params_5D_BF16, EltwiseLayerCPUTest::getTestCaseName); + +const auto params_5D_BF16_emptyCPUSpec = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inShapes_5D), + ::testing::ValuesIn(eltwiseOpTypesDiffInp), + ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(opTypes), + ::testing::Values(InferenceEngine::Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::Values(emptyCPUSpec)); +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_5D_BF16, EltwiseLayerCPUTest, params_5D_BF16_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); std::vector>> inShapes_4D_Blocked_Planar = { {{2, 17, 31, 3}, {2, 1, 31, 3}}, @@ -232,12 +267,12 @@ std::vector cpuParams_4D_Blocked_Planar = { const auto params_4D_FP32_Blocked_Planar = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_4D_Blocked_Planar), - ::testing::ValuesIn(eltwiseOpTypes), + ::testing::ValuesIn(eltwiseOpTypesBinInp), ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), @@ -258,12 +293,12 @@ std::vector cpuParams_4D_Planar_Blocked = { const auto params_4D_FP32_Planar_Blocked = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_4D_Planar_Blocked), - ::testing::ValuesIn(eltwiseOpTypes), + ::testing::ValuesIn(eltwiseOpTypesBinInp), ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), @@ -284,12 +319,12 @@ std::vector cpuParams_5D_Blocked_Planar = { const auto params_5D_FP32_Blocked_Planar = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_5D_Blocked_Planar), - ::testing::ValuesIn(eltwiseOpTypes), + ::testing::ValuesIn(eltwiseOpTypesBinInp), ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), @@ -310,12 +345,12 @@ std::vector cpuParams_5D_Planar_Blocked = { const auto params_5D_FP32_Planar_Blocked = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_5D_Planar_Blocked), - ::testing::ValuesIn(eltwiseOpTypes), + ::testing::ValuesIn(eltwiseOpTypesBinInp), ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes), ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(InferenceEngine::Precision::FP32), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::Values(additional_config)), diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp index a5b9f09fbc8851..784ced4c22649b 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp @@ -57,7 +57,7 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterface( ngraph::builder::makeGroupConvolution(paramOuts[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups)); - groupConv->get_rt_info() = setCPUInfo(inFmts, outFmts, priority); + groupConv->get_rt_info() = getCPUInfo(); ngraph::ResultVector results{std::make_shared(groupConv)}; function = std::make_shared(results, params, "groupConvolution"); } @@ -67,7 +67,7 @@ TEST_P(GroupConvolutionLayerCPUTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED() Run(); - CheckCPUImpl(executableNetwork, "Convolution", inFmts, outFmts, selectedType); + CheckCPUImpl(executableNetwork, "Convolution"); } namespace { diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/interpolate.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/interpolate.cpp index 3c3891c899bc4a..9d153429994aba 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/interpolate.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/interpolate.cpp @@ -78,21 +78,17 @@ class InterpolateLayerCPUTest : public testing::WithParamInterfaceget_rt_info() = CPUTestsBase::setCPUInfo(inFmts, outFmts, priority); + interpolate->get_rt_info() = getCPUInfo(); const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } - - std::vector inFmts, outFmts; - std::vector priority; - std::string selectedType; }; TEST_P(InterpolateLayerCPUTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED() Run(); - CheckCPUImpl(executableNetwork, "Interpolate", inFmts, outFmts, selectedType); + CheckCPUImpl(executableNetwork, "Interpolate"); } namespace { diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/logical.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/logical.cpp new file mode 100644 index 00000000000000..a968df20a184ef --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/logical.cpp @@ -0,0 +1,158 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace ngraph::helpers; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + LayerTestsDefinitions::LogicalTestParams, + CPUSpecificParams> +LogicalLayerCPUTestParamSet; + +class LogicalLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + LayerTestsDefinitions::LogicalTestParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = obj.param; + + std::ostringstream result; + result << LayerTestsDefinitions::LogicalLayerTest::getTestCaseName(testing::TestParamInfo( + basicParamsSet, 0)); + + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } + +protected: + void SetUp() override { + LayerTestsDefinitions::LogicalTestParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = this->GetParam(); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + LayerTestsDefinitions::LogicalParams::InputShapesTuple inputShapes; + ngraph::helpers::LogicalTypes logicalOpType; + ngraph::helpers::InputLayerType secondInputType; + InferenceEngine::Precision netPrecision; + std::string targetName; + std::map additional_config; + std::tie(inputShapes, logicalOpType, secondInputType, netPrecision, inPrc, outPrc, + inLayout, outLayout, targetDevice, additional_config) = basicParamsSet; + + selectedType = getPrimitiveType() + "_" + inPrc.name(); + + auto ngInputsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(Precision::BOOL); // Because ngraph supports only boolean input for logical ops + configuration.insert(additional_config.begin(), additional_config.end()); + + auto inputs = ngraph::builder::makeParams(ngInputsPrc, {inputShapes.first}); + + std::shared_ptr logicalNode; + if (logicalOpType != ngraph::helpers::LogicalTypes::LOGICAL_NOT) { + auto secondInput = ngraph::builder::makeInputLayer(ngInputsPrc, secondInputType, inputShapes.second); + if (secondInputType == ngraph::helpers::InputLayerType::PARAMETER) { + inputs.push_back(std::dynamic_pointer_cast(secondInput)); + } + logicalNode = ngraph::builder::makeLogical(inputs[0], secondInput, logicalOpType); + } else { + logicalNode = ngraph::builder::makeLogical(inputs[0], ngraph::Output(), logicalOpType); + } + + logicalNode->get_rt_info() = getCPUInfo(); + + function = std::make_shared(logicalNode, inputs, "Logical"); + } +}; + +TEST_P(LogicalLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "Eltwise"); +} + +namespace { + +std::map, std::vector>> inputShapes = { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, +}; + +std::map, std::vector>> inputShapesNot = { + {{1}, {}}, + {{5}, {}}, + {{2, 200}, {}}, + {{1, 3, 20}, {}}, + {{2, 17, 3, 4}, {}}, + {{2, 1, 1, 3, 1}, {}}, +}; + +std::vector inputsPrecisions = { + InferenceEngine::Precision::BOOL, +}; + +std::vector logicalOpTypes = { + ngraph::helpers::LogicalTypes::LOGICAL_AND, + ngraph::helpers::LogicalTypes::LOGICAL_OR, + ngraph::helpers::LogicalTypes::LOGICAL_XOR, +}; + +std::vector secondInputTypes = { + ngraph::helpers::InputLayerType::CONSTANT, + ngraph::helpers::InputLayerType::PARAMETER, +}; + +std::map additional_config; + +std::vector bf16InpOutPrc = {Precision::BF16, Precision::FP32}; + +const auto LogicalTestParams = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(LayerTestsDefinitions::LogicalLayerTest::combineShapes(inputShapes)), + ::testing::ValuesIn(logicalOpTypes), + ::testing::ValuesIn(secondInputTypes), + ::testing::Values(Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(Layout::ANY), + ::testing::Values(Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::Values(emptyCPUSpec)); + +const auto LogicalTestParamsNot = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(LayerTestsDefinitions::LogicalLayerTest::combineShapes(inputShapesNot)), + ::testing::Values(ngraph::helpers::LogicalTypes::LOGICAL_NOT), + ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(Precision::BF16), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::ValuesIn(bf16InpOutPrc), + ::testing::Values(Layout::ANY), + ::testing::Values(Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::Values(emptyCPUSpec)); + + +INSTANTIATE_TEST_CASE_P(smoke_Logical_Eltwise_CPU_BF16, LogicalLayerCPUTest, LogicalTestParams, LogicalLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_Logical_Not_Eltwise_CPU_BF16, LogicalLayerCPUTest, LogicalTestParamsNot, LogicalLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/mvn.cpp new file mode 100644 index 00000000000000..ad120a1b94051a --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/mvn.cpp @@ -0,0 +1,200 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + LayerTestsDefinitions::mvnParams, + CPUSpecificParams, + Precision, // CNNNetwork input precision + Precision> // CNNNetwork output precision +MvnLayerCPUTestParamSet; + +class MvnLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + LayerTestsDefinitions::mvnParams basicParamsSet; + CPUSpecificParams cpuParams; + Precision inputPrecision, outputPrecision; + std::tie(basicParamsSet, cpuParams, inputPrecision, outputPrecision) = obj.param; + + std::ostringstream result; + result << LayerTestsDefinitions::MvnLayerTest::getTestCaseName(testing::TestParamInfo( + basicParamsSet, 0)); + + result << "_" << "CNNInpPrc=" << inputPrecision.name(); + result << "_" << "CNNOutPrc=" << outputPrecision.name(); + + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } +protected: + void SetUp() override { + LayerTestsDefinitions::mvnParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams, inPrc, outPrc) = this->GetParam(); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + InferenceEngine::SizeVector inputShapes; + InferenceEngine::Precision netPrecision; + bool acrossChanels, normalizeVariance; + double eps; + std::tie(inputShapes, netPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = basicParamsSet; + auto netPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto param = ngraph::builder::makeParams(netPrc, {inputShapes}); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); + auto mvn = ngraph::builder::makeMVN(paramOuts[0], acrossChanels, normalizeVariance, eps); + ngraph::ResultVector results{std::make_shared(mvn)}; + + selectedType = getPrimitiveType() + "_" + inPrc.name(); + + threshold = 0.015f; + + mvn->get_rt_info() = getCPUInfo(); + + function = std::make_shared(results, param, "mvn"); + } +}; + +TEST_P(MvnLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "MVN"); +} + +namespace { +const std::vector> inputShapes_3D = { + {1, 32, 17}, + {1, 37, 9}, +}; + +const std::vector> inputShapes_4D = { + {1, 16, 5, 8}, + {2, 19, 5, 10}, + {7, 32, 2, 8}, + {5, 8, 3, 5}, + {4, 41, 6, 9} +}; + +const std::vector> inputShapes_5D = { + {1, 32, 8, 1, 6}, + {1, 9, 1, 15, 9}, + {6, 64, 6, 1, 18}, + {2, 31, 2, 9, 1}, + {10, 16, 5, 10, 6} +}; + +const std::vector acrossChannels = { + true, + false +}; + +const std::vector normalizeVariance = { + true, + false +}; + +const std::vector epsilon = { + 0.000000001 +}; + +std::vector inpOutPrc = {Precision::BF16, Precision::FP32}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {}) +}; + +std::vector cpuParams_5D = { + CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) +}; + +const auto Mvn3D = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inputShapes_3D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::ValuesIn(acrossChannels), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::Values(emptyCPUSpec), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc)); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_3D, MvnLayerCPUTest, Mvn3D, MvnLayerCPUTest::getTestCaseName); + +const auto Mvn4D = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inputShapes_4D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::ValuesIn(acrossChannels), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc)); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_4D, MvnLayerCPUTest, Mvn4D, MvnLayerCPUTest::getTestCaseName); + + +const auto MvnNHWC = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inputShapes_4D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(false), + ::testing::Values(true), + ::testing::ValuesIn(epsilon), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams({nhwc}, {nhwc}, {}, {})), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc)); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_4D_NHWC, MvnLayerCPUTest, MvnNHWC, MvnLayerCPUTest::getTestCaseName); + +const auto MvnNDHWC = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inputShapes_5D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(false), + ::testing::Values(true), + ::testing::ValuesIn(epsilon), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams({ndhwc}, {ndhwc}, {}, {})), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc)); + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_5D_NDHWC, MvnLayerCPUTest, MvnNDHWC, MvnLayerCPUTest::getTestCaseName); + + +const auto Mvn5D = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(inputShapes_5D), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::ValuesIn(acrossChannels), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc)); + + +INSTANTIATE_TEST_CASE_P(smoke_CompareWithRefs_5D, MvnLayerCPUTest, Mvn5D, MvnLayerCPUTest::getTestCaseName); + + +} // namespace +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp new file mode 100755 index 00000000000000..9b182a1b1e90f1 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp @@ -0,0 +1,132 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + LayerTestsDefinitions::NormalizeL2LayerTestParams, + CPUSpecificParams> +NormalizeL2LayerCPUTestParamSet; + +class NormalizeL2LayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + LayerTestsDefinitions::NormalizeL2LayerTestParams basicParamsSet; + CPUSpecificParams cpuParams; + Precision inputPrecision, outputPrecision; + std::tie(basicParamsSet, cpuParams) = obj.param; + + std::ostringstream result; + result << LayerTestsDefinitions::NormalizeL2LayerTest::getTestCaseName(testing::TestParamInfo( + basicParamsSet, 0)); + + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } +protected: + void SetUp() override { + LayerTestsDefinitions::NormalizeL2LayerTestParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = this->GetParam(); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + std::vector axes; + float eps; + ngraph::op::EpsMode eps_mode; + InferenceEngine::SizeVector inputShapes; + InferenceEngine::Precision netPrecision; + std::tie(axes, eps, eps_mode, inputShapes, netPrecision, targetDevice) = basicParamsSet; + inPrc = outPrc = netPrecision; + auto netPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto param = ngraph::builder::makeParams(netPrc, {inputShapes}); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); + auto normalize_l2 = ngraph::builder::makeNormalizeL2(paramOuts[0], axes, eps, eps_mode); + + ngraph::ResultVector results{std::make_shared(normalize_l2)}; + + if (Precision::BF16 == netPrecision) { + selectedType = "unknown_BF16"; + } else if (Precision::FP32 == netPrecision) { + selectedType = "unknown_FP32"; + } + + threshold = 0.015f; + + normalize_l2->get_rt_info() = getCPUInfo(); + + function = std::make_shared(results, param, "Normalize"); + } +}; + +TEST_P(NormalizeL2LayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "Normalize"); +} + +namespace { + +const std::vector> axes = { + {}, + {1}, +}; +const std::vector eps = { 1e-4f }; + +const std::vector epsMode = { + ngraph::op::EpsMode::ADD, + ngraph::op::EpsMode::MAX, +}; + +std::vector inpOutPrc = {Precision::BF16}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {}) +}; + + +const std::vector netPrecisions = { + Precision::FP32, + Precision::BF16 +}; + +const auto NormalizeL23D = testing::Combine( + testing::Combine( + testing::ValuesIn(axes), + testing::ValuesIn(eps), + testing::ValuesIn(epsMode), + testing::Values(std::vector{1, 32, 17}), + testing::ValuesIn(netPrecisions), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::Values(emptyCPUSpec)); + +INSTANTIATE_TEST_CASE_P(smoke_NormalizeL2CompareWithRefs_3D, NormalizeL2LayerCPUTest, NormalizeL23D, NormalizeL2LayerCPUTest::getTestCaseName); + +const auto NormalizeL24D = testing::Combine( + testing::Combine( + testing::ValuesIn(axes), + testing::ValuesIn(eps), + testing::ValuesIn(epsMode), + testing::Values(std::vector{1, 3, 10, 5}), + testing::ValuesIn(netPrecisions), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))); + +INSTANTIATE_TEST_CASE_P(smoke_NormalizeL2CompareWithRefs_4D, NormalizeL2LayerCPUTest, NormalizeL24D, NormalizeL2LayerCPUTest::getTestCaseName); + + +} // namespace +} // namespace CPULayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/permute.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/permute.cpp new file mode 100644 index 00000000000000..a0bf55781539b6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/permute.cpp @@ -0,0 +1,148 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +// Since the Transpose ngraph operation is converted to the permute node, we will use it in the permute test + +using namespace InferenceEngine; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + std::vector, // Input order + InferenceEngine::Precision, // Net precision + std::vector, // Input shapes + std::string, // Target device name + std::map, // Additional network configuration + CPUSpecificParams> PermuteLayerCPUTestParamSet; + +class PermuteLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + Precision netPrecision; + std::vector inputShape, inputOrder; + std::string targetDevice; + CPUSpecificParams cpuParams; + std::map additionalConfig; + std::tie(inputOrder, netPrecision, inputShape, targetDevice, additionalConfig, cpuParams) = obj.param; + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "inputOrder=" << CommonTestUtils::vec2str(inputOrder) << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "trgDev=" << targetDevice; + result << CPUTestsBase::getTestCaseName(cpuParams); + return result.str(); + } +protected: + void SetUp() override { + SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING); + + Precision netPrecision; + std::vector inputShape, inputOrder; + CPUSpecificParams cpuParams; + std::map additionalConfig; + std::tie(inputOrder, netPrecision, inputShape, targetDevice, additionalConfig, cpuParams) = this->GetParam(); + configuration.insert(additionalConfig.begin(), additionalConfig.end()); + inPrc = outPrc = netPrecision; // since the layer does not convert precisions + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + selectedType = std::string("unknown_") + inPrc.name(); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + auto paramOuts = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + + const auto inOrderShape = inputOrder.empty() ? ngraph::Shape({0}) : ngraph::Shape({inputShape.size()}); + const auto inputOrderOp = std::make_shared(ngraph::element::i64, + inOrderShape, + inputOrder); + const auto transpose = std::make_shared(paramOuts.at(0), inputOrderOp); + transpose->get_rt_info() = getCPUInfo(); + const ngraph::ResultVector results{std::make_shared(transpose)}; + function = std::make_shared(results, params, "Transpose"); + } +}; + +TEST_P(PermuteLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "Permute"); +} + +namespace { +std::map additional_config; + +const std::vector netPrecisions = { + Precision::BF16, + Precision::FP32 +}; + +const std::vector> inputShapes4D = { + {2, 32, 10, 20} +}; + +const std::vector> inputOrder4D = { + std::vector{0, 1, 2, 3}, + std::vector{0, 2, 3, 1}, + std::vector{0, 2, 1, 3}, + std::vector{1, 0, 2, 3}, + std::vector{}, +}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {}, {}, {}), + CPUSpecificParams({nchw}, {}, {}, {}), +}; + +const auto params4D = ::testing::Combine( + ::testing::ValuesIn(inputOrder4D), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes4D), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))); + +INSTANTIATE_TEST_CASE_P(smoke_Permute4D_CPU, PermuteLayerCPUTest, params4D, PermuteLayerCPUTest::getTestCaseName); + +const std::vector> inputShapes5D = { + {2, 32, 5, 10, 20} +}; + +const std::vector> inputOrder5D = { + std::vector{0, 1, 2, 3, 4}, + std::vector{0, 4, 2, 3, 1}, + std::vector{0, 4, 2, 1, 3}, + std::vector{0, 2, 4, 3, 1}, + std::vector{0, 3, 2, 4, 1}, + std::vector{0, 3, 1, 4, 2}, + std::vector{1, 0, 2, 3, 4}, + std::vector{}, +}; + +std::vector cpuParams_5D = { + CPUSpecificParams({nCdhw16c}, {}, {}, {}), + CPUSpecificParams({ncdhw}, {}, {}, {}), +}; + +const auto params5D = ::testing::Combine( + ::testing::ValuesIn(inputOrder5D), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes5D), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(additional_config), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D))); + +INSTANTIATE_TEST_CASE_P(smoke_Permute5D_CPU, PermuteLayerCPUTest, params5D, PermuteLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/reduce_ops.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/reduce_ops.cpp new file mode 100644 index 00000000000000..becf723a81fc9d --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/reduce_ops.cpp @@ -0,0 +1,352 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; +using namespace LayerTestsDefinitions; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple ReduceLayerCPUTestParamSet; + +class ReduceCPULayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + reduceMeanParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = obj.param; + + std::ostringstream result; + result << LayerTestsDefinitions::ReduceOpsLayerTest::getTestCaseName(testing::TestParamInfo( + basicParamsSet, 0)); + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } +protected: + void SetUp() override { + reduceMeanParams basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = this->GetParam(); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + InferenceEngine::Precision netPrecision; + bool keepDims; + std::vector inputShape; + std::vector axes; + CommonTestUtils::OpType opType; + std::tie(axes, opType, keepDims, reductionType, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = basicParamsSet; + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + auto paramOuts = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + + std::vector shapeAxes; + switch (opType) { + case CommonTestUtils::OpType::SCALAR: { + if (axes.size() > 1) + FAIL() << "In reduce op if op type is scalar, 'axis' input's must contain 1 element"; + break; + } + case CommonTestUtils::OpType::VECTOR: { + shapeAxes.push_back(axes.size()); + break; + } + default: + FAIL() << "Reduce op doesn't support operation type: " << opType; + } + auto reductionAxesNode = std::dynamic_pointer_cast( + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + + const auto reduce = ngraph::builder::makeReduce(paramOuts[0], reductionAxesNode, keepDims, reductionType); + + selectedType = getPrimitiveType() + "_" + inPrc.name(); + + reduce->get_rt_info() = getCPUInfo(); + + const ngraph::ResultVector results{std::make_shared(reduce)}; + function = std::make_shared(results, params, "Reduce"); + } + InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override { + if (ngraph::helpers::ReductionType::Prod == reductionType) { + // We change the range of random values to avoid possible floating point overflow + auto blob = FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 10, 5); + if (Precision::FP32 == info.getTensorDesc().getPrecision()) { + auto *rawBlobDataPtr = blob->buffer().as(); + for (size_t i = 0; i < blob->size(); ++i) { + rawBlobDataPtr[i] /= 10.f; + } + } else if (Precision::BF16 == info.getTensorDesc().getPrecision()) { + auto *rawBlobDataPtr = blob->buffer().as(); + for (size_t i = 0; i < blob->size(); ++i) { + rawBlobDataPtr[i] /= 10.f; + } + } + return blob; + } else { + return LayerTestsCommon::GenerateInput(info); + } + } + +private: + ngraph::helpers::ReductionType reductionType; +}; + +TEST_P(ReduceCPULayerTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + auto ops = function->get_ordered_ops(); + std::string name = (*(++ops.rbegin()))->get_type_name(); + + if ("ReduceLogicalAnd" == name) { + name = "ReduceAnd"; + } + if ("ReduceLogicalOr" == name) { + name = "ReduceOr"; + } + + Run(); + CheckCPUImpl(executableNetwork, name); +} +namespace { +std::vector inpOutPrc = {Precision::BF16, Precision::FP32}; + +const std::vector keepDims = { + true, + false, +}; + +const std::vector> axes = { + {0}, + {1}, + {2}, + {3} +}; + +const std::vector> axesND = { + {0, 1}, + {0, 2}, + {0, 3}, + {1, 2}, + {1, 3}, + {2, 3}, + {0, 1, 2}, + {0, 1, 3}, + {0, 2, 3}, + {1, 2, 3}, + {0, 1, 2, 3} +}; + +std::vector opTypes = { + CommonTestUtils::OpType::SCALAR, + CommonTestUtils::OpType::VECTOR, +}; + +const std::vector reductionTypes = { +// ngraph::helpers::ReductionType::Mean, //optimized out during the graph transformations +// ngraph::helpers::ReductionType::Max, //optimized out during the graph transformations +// ngraph::helpers::ReductionType::Sum, //optimized out during the graph transformations + ngraph::helpers::ReductionType::Min, + ngraph::helpers::ReductionType::Prod, + ngraph::helpers::ReductionType::L1, + ngraph::helpers::ReductionType::L2, +}; + +const std::vector reductionLogicalTypes = { + ngraph::helpers::ReductionType::LogicalOr, + ngraph::helpers::ReductionType::LogicalAnd +}; + +const std::vector> inputShapes = { + std::vector{10, 5, 15, 12}, + std::vector{3, 5, 7, 9}, +}; + +std::vector cpuParams_4D = { + CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {}) +}; + +std::vector cpuParams_5D = { + CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) +}; + +const auto paramsOneAxis = ::testing::Combine( + testing::Combine( + testing::ValuesIn(axes), + testing::ValuesIn(opTypes), + testing::ValuesIn(keepDims), + testing::ValuesIn(reductionTypes), + testing::Values(InferenceEngine::Precision::FP32), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::Values(emptyCPUSpec)); + +const auto paramsOneAxisLogical = testing::Combine( + testing::Combine( + testing::ValuesIn(axes), + testing::ValuesIn(opTypes), + testing::ValuesIn(keepDims), + testing::ValuesIn(reductionLogicalTypes), + testing::Values(InferenceEngine::Precision::BOOL), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::Values(emptyCPUSpec)); + +const auto params_MultiAxis = testing::Combine( + testing::Combine( + testing::ValuesIn(axesND), + testing::Values(opTypes[1]), + testing::Values(false), + testing::ValuesIn(reductionTypes), + testing::Values(InferenceEngine::Precision::FP32), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(std::vector{2, 9, 2, 9}), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::Values(emptyCPUSpec)); + +const auto params_MultiAxis_4D = testing::Combine( + testing::Combine( + testing::ValuesIn(axesND), + testing::Values(opTypes[1]), + testing::Values(true), + testing::ValuesIn(reductionTypes), + testing::Values(InferenceEngine::Precision::FP32), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(std::vector{2, 19, 2, 9}), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))); + +const auto params_MultiAxis_5D = testing::Combine( + testing::Combine( + testing::ValuesIn(axesND), + testing::Values(opTypes[1]), + testing::Values(true), + testing::ValuesIn(reductionTypes), + testing::Values(InferenceEngine::Precision::FP32), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(std::vector{2, 19, 7, 2, 9}), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D))); + +const auto params_MultiAxisLogical = testing::Combine( + testing::Combine( + testing::ValuesIn(axesND), + testing::Values(opTypes[1]), + testing::Values(false), + testing::ValuesIn(reductionLogicalTypes), + testing::Values(InferenceEngine::Precision::BOOL), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(std::vector{2, 9, 2, 9}), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::Values(emptyCPUSpec)); + +const auto params_MultiAxisLogical4D = testing::Combine( + testing::Combine( + testing::ValuesIn(axesND), + testing::Values(opTypes[1]), + testing::Values(true), + testing::ValuesIn(reductionLogicalTypes), + testing::Values(InferenceEngine::Precision::BOOL), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(std::vector{2, 19, 2, 9}), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))); + +const auto params_MultiAxisLogical5D = testing::Combine( + testing::Combine( + testing::ValuesIn(axesND), + testing::Values(opTypes[1]), + testing::Values(true), + testing::ValuesIn(reductionLogicalTypes), + testing::Values(InferenceEngine::Precision::BOOL), + testing::ValuesIn(inpOutPrc), + testing::ValuesIn(inpOutPrc), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(std::vector{2, 19, 7, 2, 9}), + testing::Values(CommonTestUtils::DEVICE_CPU)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D))); + +INSTANTIATE_TEST_CASE_P( + smoke_ReduceOneAxis_CPU, + ReduceCPULayerTest, + paramsOneAxis, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_ReduceLogicalOneAxis_CPU, + ReduceCPULayerTest, + paramsOneAxisLogical, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_Reduce_ReductionTypes_CPU, + ReduceCPULayerTest, + params_MultiAxis, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_Reduce_ReductionTypes4D_CPU, + ReduceCPULayerTest, + params_MultiAxis_4D, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_Reduce_ReductionTypes5D_CPU, + ReduceCPULayerTest, + params_MultiAxis_5D, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_ReduceLogical_ReductionTypes_CPU, + ReduceCPULayerTest, + params_MultiAxisLogical, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_ReduceLogical4D_ReductionTypes_CPU, + ReduceCPULayerTest, + params_MultiAxisLogical4D, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_CASE_P( + smoke_ReduceLogical5D_ReductionTypes_CPU, + ReduceCPULayerTest, + params_MultiAxisLogical5D, + ReduceCPULayerTest::getTestCaseName +); +} // namespace +} // namespace CPULayerTestsDefinitions + diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp new file mode 100644 index 00000000000000..2fedfd2b2804f4 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp @@ -0,0 +1,165 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_functions/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +struct regionYoloAttributes { + size_t classes; + size_t coordinates; + size_t num_regions; + bool do_softmax; + int start_axis; + int end_axis; +}; + +using regionYoloParamsTuple = std::tuple< + ngraph::Shape, // Input Shape + regionYoloAttributes, // Params + std::vector, // mask + InferenceEngine::Precision, // Network input precision + InferenceEngine::Precision, // Network output precision + std::map, // Additional network configuration + std::string>; // Device name + + +class RegionYoloCPULayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + ngraph::Shape inputShape; + regionYoloAttributes attributes; + std::vector mask; + InferenceEngine::Precision inpPrecision; + InferenceEngine::Precision outPrecision; + std::string targetName; + std::map additionalConfig; + + std::tie(inputShape, attributes, mask, inpPrecision, outPrecision, additionalConfig, targetName) = obj.param; + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "classes=" << attributes.classes << "_"; + result << "coords=" << attributes.coordinates << "_"; + result << "num=" << attributes.num_regions << "_"; + result << "doSoftmax=" << attributes.do_softmax << "_"; + result << "axis=" << attributes.start_axis << "_"; + result << "endAxis=" << attributes.end_axis << "_"; + result << "inpPRC=" << inpPrecision.name() << "_"; + result << "outPRC=" << outPrecision.name() << "_"; + result << "targetDevice=" << targetName << "_"; + return result.str(); + } +protected: + void SetUp() override { + ngraph::Shape inputShape; + regionYoloAttributes attributes; + std::vector mask; + std::map additionalConfig; + + std::tie(inputShape, attributes, mask, inPrc, outPrc, additionalConfig, targetDevice) = this->GetParam(); + + configuration.insert(additionalConfig.begin(), additionalConfig.end()); + + selectedType = std::string("unknown_") + inPrc.name(); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); + auto param = std::make_shared(ngPrc, inputShape); + auto region_yolo = std::make_shared(param, attributes.coordinates, attributes.classes, attributes.num_regions, + attributes.do_softmax, mask, attributes.start_axis, attributes.end_axis); + function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); + } +}; + +TEST_P(RegionYoloCPULayerTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckCPUImpl(executableNetwork, "RegionYolo"); +} + +namespace { +const std::vector inpOutPrc = {Precision::BF16, Precision::FP32}; + +const std::map additional_config; + +const std::vector inShapes_caffe = { + {1, 125, 13, 13} +}; + +const std::vector inShapes_mxnet = { + {1, 75, 52, 52}, + {1, 75, 32, 32}, + {1, 75, 26, 26}, + {1, 75, 16, 16}, + {1, 75, 13, 13}, + {1, 75, 8, 8} +}; + +const std::vector inShapes_v3 = { + {1, 255, 52, 52}, + {1, 255, 26, 26}, + {1, 255, 13, 13} +}; + +const std::vector> masks = { + {0, 1, 2}, + {3, 4, 5}, + {6, 7, 8} +}; + +const std::vector do_softmax = {true, false}; +const std::vector classes = {80, 20}; +const std::vector num_regions = {5, 9}; +const size_t coords = 4; +const int start_axis = 1; +const int end_axis = 3; + +const regionYoloAttributes yoloV3attr = {80, 4, 9, false, 1, 3}; + +const auto testCase_yolov3 = ::testing::Combine( + ::testing::ValuesIn(inShapes_v3), + ::testing::Values(yoloV3attr), + ::testing::Values(masks[2]), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc), + ::testing::Values(additional_config), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const regionYoloAttributes yoloV3mxnetAttr = {20, 4, 9, false, 1, 3}; + +const auto testCase_yolov3_mxnet = ::testing::Combine( + ::testing::ValuesIn(inShapes_mxnet), + ::testing::Values(yoloV3mxnetAttr), + ::testing::Values(masks[1]), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc), + ::testing::Values(additional_config), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +const regionYoloAttributes yoloV2caffeAttr = {20, 4, 5, true, 1, 3}; + +const auto testCase_yolov2_caffe = ::testing::Combine( + ::testing::ValuesIn(inShapes_caffe), + ::testing::Values(yoloV2caffeAttr), + ::testing::Values(masks[0]), + ::testing::ValuesIn(inpOutPrc), + ::testing::ValuesIn(inpOutPrc), + ::testing::Values(additional_config), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P(smoke_TestsRegionYolov3CPU, RegionYoloCPULayerTest, testCase_yolov3, RegionYoloCPULayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsRegionYoloMxnetCPU, RegionYoloCPULayerTest, testCase_yolov3_mxnet, RegionYoloCPULayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsRegionYoloCaffeCPU, RegionYoloCPULayerTest, testCase_yolov2_caffe, RegionYoloCPULayerTest::getTestCaseName); +} // namespace +} // namespace CPULayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/conv_concat.cpp b/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/conv_concat.cpp index 076f6560e38de7..0df806f7f86da0 100644 --- a/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/conv_concat.cpp +++ b/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/conv_concat.cpp @@ -99,7 +99,7 @@ void ConvConcatSubgraphTest::SetUp() { } } for (size_t conv = 0; conv < convolutionNodes.size(); conv++) { - convolutionNodes[conv]->get_rt_info() = setCPUInfo(inFmts, outFmts, priority); + convolutionNodes[conv]->get_rt_info() = getCPUInfo(); } auto concat = ngraph::builder::makeConcat(ngraph::OutputVector{convolutionNodes[0], convolutionNodes[1]}, axis); @@ -112,7 +112,7 @@ TEST_P(ConvConcatSubgraphTest, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED() Run(); - CheckCPUImpl(executableNetwork, pluginTypeNode, inFmts, outFmts, selectedType); + CheckCPUImpl(executableNetwork, pluginTypeNode); }; /* ============= Common Convolution Params ============= */ diff --git a/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/fuse_permute_reorder.cpp b/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/fuse_permute_reorder.cpp index e5b734bae53c9a..44c2d81847344e 100644 --- a/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/fuse_permute_reorder.cpp +++ b/inference-engine/tests/functional/plugin/cpu/subgraph_tests/src/fuse_permute_reorder.cpp @@ -84,7 +84,7 @@ void FusePermuteAndReorderTest::CreateGraph() { auto constOrder = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order); auto permute = std::make_shared(params[0], constOrder); - permute->get_rt_info() = setCPUInfo({memFmt}, {memFmt}, {}); + permute->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {}); ngraph::ResultVector results{std::make_shared(permute)}; function = std::make_shared(results, params, "PermuteReorder"); @@ -145,17 +145,17 @@ void FusePermuteAndReorderTest1::CreateGraph() { auto constOrder1 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order); auto permute1 = std::make_shared(params[0], constOrder1); auto memFmt1 = inputShape.size() == 5 ? ndhwc : nhwc; - permute1->get_rt_info() = setCPUInfo({memFmt1}, {memFmt1}, {}); + permute1->get_rt_info() = makeCPUInfo({memFmt1}, {memFmt1}, {}); auto constOrder2 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order); auto permute2 = std::make_shared(permute1, constOrder2); auto memFmt2 = inputShape.size() == 5 ? ndhwc : nhwc; - permute2->get_rt_info() = setCPUInfo({memFmt2}, {memFmt2}, {}); + permute2->get_rt_info() = makeCPUInfo({memFmt2}, {memFmt2}, {}); auto constOrder3 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order); auto permute3 = std::make_shared(permute2, constOrder3); auto memFmt3 = inputShape.size() == 5 ? ncdhw : nchw; - permute3->get_rt_info() = setCPUInfo({memFmt3}, {memFmt3}, {}); + permute3->get_rt_info() = makeCPUInfo({memFmt3}, {memFmt3}, {}); auto shape = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, permute3->get_output_shape(0)); auto reshape = std::make_shared(permute1, shape, false); @@ -214,12 +214,12 @@ void FusePermuteAndReorderTest2::CreateGraph() { auto constOrder1 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order); auto permute1 = std::make_shared(params[0], constOrder1); auto memFmt1 = inputShape.size() == 5 ? ndhwc : nhwc; - permute1->get_rt_info() = setCPUInfo({memFmt1}, {memFmt1}, {}); + permute1->get_rt_info() = makeCPUInfo({memFmt1}, {memFmt1}, {}); auto constOrder2 = ngraph::builder::makeConstant(ngraph::element::i64, {inputShape.size()}, order); auto permute2 = std::make_shared(params[1], constOrder2); auto memFmt2 = inputShape.size() == 5 ? ncdhw : nchw; - permute2->get_rt_info() = setCPUInfo({memFmt2}, {memFmt2}, {}); + permute2->get_rt_info() = makeCPUInfo({memFmt2}, {memFmt2}, {}); auto concat = ngraph::builder::makeConcat({permute1, permute2}, 1); diff --git a/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.cpp b/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.cpp index 94fdbd34b377c1..5dff97729e5d48 100644 --- a/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.cpp +++ b/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.cpp @@ -15,6 +15,7 @@ const char *CPUTestsBase::cpu_fmt2str(cpu_memory_format_t v) { if (v == nCdhw8c) return "nCdhw8c"; if (v == nCdhw16c) return "nCdhw16c"; if (v == ndhwc) return "ndhwc"; + if (v == nc) return "nc"; if (v == x) return "x"; assert(!"unknown fmt"); return "undef"; @@ -34,6 +35,7 @@ cpu_memory_format_t CPUTestsBase::cpu_str2fmt(const char *str) { CASE(nCdhw8c); CASE(nCdhw16c); CASE(ndhwc); + CASE(nc); CASE(x); #undef CASE assert(!"unknown memory format"); @@ -45,7 +47,9 @@ std::string CPUTestsBase::fmts2str(const std::vector &fmts) for (auto &fmt : fmts) { ((str += "cpu:") += cpu_fmt2str(fmt)) += ","; } - str.erase(str.end() - 1); + if (!str.empty()) { + str.pop_back(); + } return str; } @@ -54,14 +58,16 @@ std::string CPUTestsBase::impls2str(const std::vector &priority) { for (auto &impl : priority) { ((str += "cpu:") += impl) += ","; } - str.erase(str.end() - 1); + if (!str.empty()) { + str.pop_back(); + } return str; } -void CPUTestsBase::CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType, - std::vector inputMemoryFormats, - std::vector outputMemoryFormats, std::string selectedType) { +void CPUTestsBase::CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const { IE_SUPPRESS_DEPRECATED_START + ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined."; + bool isNodeFound = false; InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo(); auto function = execGraphInfo.getFunction(); ASSERT_NE(nullptr, function); @@ -84,25 +90,27 @@ void CPUTestsBase::CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std }; if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == nodeType) { - ASSERT_LE(inputMemoryFormats.size(), node->get_input_size()); - ASSERT_LE(outputMemoryFormats.size(), node->get_output_size()); - for (int i = 0; i < inputMemoryFormats.size(); i++) { + isNodeFound = true; + ASSERT_LE(inFmts.size(), node->get_input_size()); + ASSERT_LE(outFmts.size(), node->get_output_size()); + for (int i = 0; i < inFmts.size(); i++) { const auto parentPort = node->input_values()[i]; const auto port = node->inputs()[i]; if ((parentPort.get_tensor_ptr() == port.get_tensor_ptr())) { auto parentNode = parentPort.get_node_shared_ptr(); auto actualInputMemoryFormat = getExecValueOutputsLayout(parentNode); - ASSERT_EQ(inputMemoryFormats[i], cpu_str2fmt(actualInputMemoryFormat.c_str())); + ASSERT_EQ(inFmts[i], cpu_str2fmt(actualInputMemoryFormat.c_str())); } } - for (int i = 0; i < outputMemoryFormats.size(); i++) { + for (int i = 0; i < outFmts.size(); i++) { auto actualOutputMemoryFormat = getExecValue(ExecGraphInfoSerialization::OUTPUT_LAYOUTS); - ASSERT_EQ(outputMemoryFormats[i], cpu_str2fmt(actualOutputMemoryFormat.c_str())); + ASSERT_EQ(outFmts[i], cpu_str2fmt(actualOutputMemoryFormat.c_str())); } auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE); ASSERT_EQ(selectedType, primType); } } + ASSERT_TRUE(isNodeFound) << "Node type name: \"" << nodeType << "\" has not been found."; IE_SUPPRESS_DEPRECATED_END } @@ -112,16 +120,39 @@ std::string CPUTestsBase::getTestCaseName(CPUSpecificParams params) { std::vector priority; std::string selectedType; std::tie(inFmts, outFmts, priority, selectedType) = params; - result << "_inFmts=" << fmts2str(inFmts); - result << "_outFmts=" << fmts2str(outFmts); - result << "_primitive=" << selectedType; + if (!inFmts.empty()) { + result << "_inFmts=" << fmts2str(inFmts); + } + if (!outFmts.empty()) { + result << "_outFmts=" << fmts2str(outFmts); + } + if (!selectedType.empty()) { + result << "_primitive=" << selectedType; + } return result.str(); } -std::map> CPUTestsBase::setCPUInfo(std::vector inFmts, - std::vector outFmts, - std::vector priority) { - std::map> cpuInfo; +CPUTestsBase::CPUInfo CPUTestsBase::getCPUInfo() const { + return makeCPUInfo(inFmts, outFmts, priority); +} + +std::string CPUTestsBase::getPrimitiveType() const { + std::string isaType; + if (InferenceEngine::with_cpu_x86_avx512f()) { + isaType = "jit_avx512"; + } else if (InferenceEngine::with_cpu_x86_avx2()) { + isaType = "jit_avx2"; + } else if (InferenceEngine::with_cpu_x86_sse42()) { + isaType = "jit_sse42"; + } else { + isaType = "ref"; + } + return isaType; +} + +CPUTestsBase::CPUInfo +CPUTestsBase::makeCPUInfo(std::vector inFmts, std::vector outFmts, std::vector priority) { + CPUInfo cpuInfo; if (!inFmts.empty()) { cpuInfo.insert({"InputMemoryFormats", std::make_shared>(fmts2str(inFmts))}); @@ -136,4 +167,24 @@ std::map> CPUTestsBase::setCPUInfo return cpuInfo; } +std::vector filterCPUSpecificParams(std::vector ¶msVector) { + auto adjustBlockedFormatByIsa = [](std::vector& formats) { + for (int i = 0; i < formats.size(); i++) { + if (formats[i] == nChw16c) + formats[i] = nChw8c; + if (formats[i] == nCdhw16c) + formats[i] = nCdhw8c; + } + }; + + if (!InferenceEngine::with_cpu_x86_avx512f()) { + for (auto& param : paramsVector) { + adjustBlockedFormatByIsa(std::get<0>(param)); + adjustBlockedFormatByIsa(std::get<1>(param)); + } + } + + return paramsVector; +} + } // namespace CPUTestUtils diff --git a/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.hpp b/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.hpp index 4a259af706eb5e..70e3d1c91839f5 100644 --- a/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.hpp +++ b/inference-engine/tests/functional/plugin/cpu/test_utils/cpu_test_utils.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2020 Intel Corporation7 // SPDX-License-Identifier: Apache-2.0 // @@ -23,38 +23,44 @@ namespace CPUTestUtils { nCdhw8c, nCdhw16c, ndhwc, + nc, x, undef } cpu_memory_format_t; using CPUSpecificParams = std::tuple< - std::vector, - std::vector, - std::vector, - std::string + std::vector, //input memomry format + std::vector, //output memory format + std::vector, //priority + std::string // selected primitive type >; class CPUTestsBase { public: - void CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType, std::vector inputMemoryFormats, - std::vector outputMemoryFormats, std::string selectedType); - - std::map> setCPUInfo(std::vector inFmts, std::vector outFmts, - std::vector priority); + typedef std::map> CPUInfo; +public: static std::string getTestCaseName(CPUSpecificParams params); + static const char *cpu_fmt2str(cpu_memory_format_t v); + static cpu_memory_format_t cpu_str2fmt(const char *str); + static std::string fmts2str(const std::vector &fmts); + static std::string impls2str(const std::vector &priority); + static CPUInfo makeCPUInfo(std::vector inFmts, + std::vector outFmts, + std::vector priority); + + CPUInfo getCPUInfo() const; + void CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const; +protected: + std::string getPrimitiveType() const; std::vector inFmts, outFmts; std::vector priority; std::string selectedType; - -private: - static const char *cpu_fmt2str(cpu_memory_format_t v); - cpu_memory_format_t cpu_str2fmt(const char *str); - static std::string fmts2str(const std::vector &fmts); - std::string impls2str(const std::vector &priority); }; +const auto emptyCPUSpec = CPUSpecificParams{{}, {}, {}, {}}; + const auto conv_ref_2D = CPUSpecificParams{{nchw}, {nchw}, {"ref_any"}, "ref_any_FP32"}; const auto conv_ref_3D = CPUSpecificParams{{ncdhw}, {ncdhw}, {"ref_any"}, "ref_any_FP32"}; @@ -80,4 +86,7 @@ const auto conv_sse42_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42 const auto conv_avx2_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1_FP32"}; const auto conv_avx512_2D_1x1 = CPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1_FP32"}; +// utility functions +std::vector filterCPUSpecificParams(std::vector& paramsVector); + } // namespace CPUTestUtils diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/activation.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/activation.cpp index c77802de5c4294..c05ce551c52a5b 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/activation.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/activation.cpp @@ -38,7 +38,17 @@ const std::map>> activationTypes std::map, std::vector>> basic = { {{1, 50}, {{}}}, {{1, 128}, {{}}}, - {{1, 10 * 1024}, {{}}} + {{1, 10 * 1024}, {{}}}, + {{64, 1}, {{}}}, + {{8, 128}, {{}}}, + {{16, 128}, {{}}}, + {{18, 128}, {{}}}, + {{32, 512}, {{}}}, + {{1, 4, 2, 256}, {{}}}, + {{4, 4, 4, 4}, {{}}}, + {{1, 16, 1, 128}, {{}}}, + {{1, 8, 15, 128}, {{}}}, + {{1, 4, 4, 128}, {{}}} }; const auto basicCases = ::testing::Combine( diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp index 77fb8156782e7a..34f7d7a7c5cb0d 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp @@ -54,10 +54,10 @@ const std::vector levels = {16, 255, 256}; const std::vector> fqArgs = {{}}; const std::vector> inputParams = {{-10, 10, 0.1}, {}}; -const std::vector fqInputMin = {0, 1, 2, 3, 4, 5}; -const std::vector fqInputMax = {10, 9, 8, 7, 6}; -const std::vector fqOutputMin = {1, 2, 3, 4}; -const std::vector fqOutputMax = {8, 7, 6, 5}; +const std::vector fqInputMin = {0, 3}; +const std::vector fqInputMax = {10, 7}; +const std::vector fqOutputMin = {1, 3}; +const std::vector fqOutputMax = {7, 6}; std::vector> getInputOutputShapes(const std::vector inputsMin, const std::vector inputsMax, diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp index f2804f915d61a0..0205fe3802cc4f 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/skip_tests_config.cpp @@ -43,7 +43,13 @@ std::vector disabledTestPatterns() { // TODO: Issue 39358 R"(.*unaligned.*MultipleConcatTest.*)", R"(.*ActivationConcatsEltwise.*CS=35.*)", + // TODO: Issue 38974 + R"(.*ConcatMultiInput.CompareWithRefConstOnly.*IS=\(1.8\).*)", + R"(.*ConcatMultiInput.CompareWithRefConstOnly.*IS=\(1.16\).*)", + R"(.*ConcatMultiInput.CompareWithRefConstOnly.*IS=\(1.32\).*)", // TODO: Issue: 40960 R"(.*(ConstantResultSubgraphTest).*)", + // TODO: Issue: 29577 + R"(.*CoreThreadingTests.smoke_QueryNetwork.*)" }; } diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/concat_4D.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/trivial_concat.cpp similarity index 79% rename from inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/concat_4D.cpp rename to inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/trivial_concat.cpp index bdabe6c7669e58..0438d852b229b4 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/concat_4D.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/trivial_concat.cpp @@ -4,7 +4,7 @@ #include -#include "single_layer_tests/concat_4D.hpp" +#include "subgraph_tests/trivial_concat.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; @@ -13,6 +13,9 @@ namespace { std::vector> inShapes = { {1, 1, 33, 16}, {1, 1, 65, 16}, + {10, 16}, + {10, 64}, + {15, 15}, }; std::vector netPrecisions = {InferenceEngine::Precision::FP32, @@ -24,11 +27,11 @@ std::map additional_config = { {"GNA_SCALE_FACTOR_0", "2000.0"}, }; -INSTANTIATE_TEST_CASE_P(smoke_Concat4D_Basic, Concat4DLayerTest, +INSTANTIATE_TEST_CASE_P(smoke_trivial_concat_Basic, TrivialConcatLayerTest, ::testing::Combine( ::testing::ValuesIn(inShapes), ::testing::ValuesIn(netPrecisions), ::testing::Values(CommonTestUtils::DEVICE_GNA), ::testing::Values(additional_config)), - Concat4DLayerTest::getTestCaseName); + TrivialConcatLayerTest::getTestCaseName); } // namespace diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/two_fake_quantize_to_fullyconnected.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/two_fake_quantize_to_fullyconnected.cpp index 1347bcd0bdbf57..35c82543f91abe 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/two_fake_quantize_to_fullyconnected.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/two_fake_quantize_to_fullyconnected.cpp @@ -42,7 +42,7 @@ const std::vector> inputShapes = { {1, 440} }; const std::vector>> constShapes = { - {{1}, {2048, 1}} + {{1}, {1024, 1}} }; const std::vector>> constShapes_int16 = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index d94d586630dfff..daaf6f66bd55f5 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index 455ba87eee3425..7860d820a46b08 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -5,7 +5,7 @@ #include #include "low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index 8d4fc7d40b7a8c..8a902be41c5088 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_precision_selection_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp index 7aa3f6704a6c1f..381bcf0ea52399 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fake_quantize_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index 2e92bddd0a8bc0..9cdc2bb960b80c 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -6,7 +6,7 @@ #include "low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp" #include "common_test_utils/test_constants.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" using namespace LayerTestsDefinitions; using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp index dbec7dadf1706b..dcd331a0241161 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp @@ -36,5 +36,9 @@ std::vector disabledTestPatterns() { // TODO: Issue: 41461 R"(.*TopKLayerTest.*k=10.*mode=min.*sort=index.*)", R"(.*TopKLayerTest.*k=5.*sort=(none|index).*)", + // TODO: Issue: 43511 + R"(.*EltwiseLayerTest.*IS=\(1.4.3.2.1.3\).*OpType=(Prod|Sub).*secondaryInputType=CONSTANT_opType=VECTOR_netPRC=(FP16|FP32).*)", + R"(.*EltwiseLayerTest.*IS=\(1.4.3.2.1.3\).*OpType=Sum.*secondaryInputType=CONSTANT_opType=VECTOR_netPRC=(FP16|FP32).*)", + R"(.*EltwiseLayerTest.*IS=\(1.4.3.2.1.3\).*OpType=Sub.*secondaryInputType=CONSTANT_opType=VECTOR_netPRC=I64.*)", }; } diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/roi_pooling.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/roi_pooling.cpp new file mode 100644 index 00000000000000..ffdae0ca7b20d6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/roi_pooling.cpp @@ -0,0 +1,58 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/roi_pooling.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector> inShapes = { + {3, 4, 50, 50} +}; + +const std::vector> pooledShapes_max = { + {1, 1}, + {3, 3}, +}; + +const std::vector> pooledShapes_bilinear = { + {2, 2}, + {6, 6} +}; + +const std::vector> coordShapes = { + {1, 5}, + {3, 5}, +}; + +const std::vector spatial_scales = {0.625f, 1.f}; + +const auto test_ROIPooling_max = ::testing::Combine( + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(coordShapes), + ::testing::ValuesIn(pooledShapes_max), + ::testing::ValuesIn(spatial_scales), + ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD) +); + +const auto test_ROIPooling_bilinear = ::testing::Combine( + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(coordShapes), + ::testing::ValuesIn(pooledShapes_bilinear), + ::testing::ValuesIn(spatial_scales), + ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD) +); + +INSTANTIATE_TEST_CASE_P(smoke_TestsROIPooling_max, ROIPoolingLayerTest, test_ROIPooling_max, ROIPoolingLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestsROIPooling_bilinear, ROIPoolingLayerTest, test_ROIPooling_bilinear, ROIPoolingLayerTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp index d4e280b0d3c2ea..8d11db060a4685 100644 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp @@ -34,5 +34,7 @@ std::vector disabledTestPatterns() { R"(.*DSR_NonMaxSuppression.*NBoxes=(5|20|200).*)", // TODO: Issue: 42721 R"(.*(DSR_GatherND).*)", + // TODO: Issue 43781 + ".*ROIPoolingLayerTest.*" }; } diff --git a/inference-engine/tests/functional/plugin/shared/CMakeLists.txt b/inference-engine/tests/functional/plugin/shared/CMakeLists.txt index c08f97b09e0f50..2b50e312a7d1f8 100644 --- a/inference-engine/tests/functional/plugin/shared/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/shared/CMakeLists.txt @@ -8,6 +8,7 @@ set(TARGET_NAME funcSharedTests) list(APPEND EXPORT_DEPENDENCIES funcTestUtils ngraphFunctions + lptNgraphFunctions ) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp index 4bbeb9460e68ca..4aff2bb283efa5 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp @@ -1350,63 +1350,6 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { // QueryNetwork with HETERO on MULTI combinations particular device // -TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROwithMULTINoThrow_v7) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; - - if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { - std::string devices; - auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += deviceName + '.' + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - - auto convertedActualNetwork = std::make_shared(actualNetwork); - QueryNetworkResult result; - std::string targetFallback(std::string(CommonTestUtils::DEVICE_MULTI) + "," + CommonTestUtils::DEVICE_CPU); - ASSERT_NO_THROW(result = ie.QueryNetwork(InferenceEngine::CNNNetwork{convertedActualNetwork}, CommonTestUtils::DEVICE_HETERO, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", targetFallback}})); - - for (auto &&layer : result.supportedLayersMap) { - EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first)); - } - } else { - GTEST_SKIP(); - } -} - -TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIwithHETERONoThrowv7) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Core ie; - - if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { - std::string devices; - auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); - for (auto &&device : availableDevices) { - devices += CommonTestUtils::DEVICE_HETERO + std::string(".") + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } - } - - QueryNetworkResult result; - auto convertedActualNetwork = std::make_shared(actualNetwork); - ASSERT_NO_THROW(result = ie.QueryNetwork(InferenceEngine::CNNNetwork{convertedActualNetwork}, CommonTestUtils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", deviceName + "," + CommonTestUtils::DEVICE_CPU}})); - - for (auto &&layer : result.supportedLayersMap) { - EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first)); - } - } else { - GTEST_SKIP(); - } -} - TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); Core ie; diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp index b2f68f644a8e71..b56b93324e7646 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp @@ -157,8 +157,7 @@ TEST_P(CoreThreadingTests, smoke_QueryNetwork) { SKIP_IF_CURRENT_TEST_IS_DISABLED() InferenceEngine::Core ie; - auto model = FuncTestUtils::TestModel::convReluNormPoolFcModelFP32; - auto network = ie.ReadNetwork(model.model_xml_str, model.weights_blob); + InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::make2InputSubtract()); ie.SetConfig(config, deviceName); InferenceEngine::QueryNetworkResult refResult = ie.QueryNetwork(network, deviceName); @@ -224,15 +223,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork) { InferenceEngine::Core ie; std::atomic counter{0u}; - const FuncTestUtils::TestModel::TestModel models[] = { - FuncTestUtils::TestModel::convReluNormPoolFcModelFP32, - FuncTestUtils::TestModel::convReluNormPoolFcModelFP16 - }; std::vector networks; - for (auto & model : models) { - networks.emplace_back(ie.ReadNetwork(model.model_xml_str, model.weights_blob)); - } - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract())); networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv())); networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv())); @@ -253,15 +244,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetworkAccuracy) { InferenceEngine::Core ie; std::atomic counter{0u}; - const FuncTestUtils::TestModel::TestModel models[] = { - FuncTestUtils::TestModel::convReluNormPoolFcModelFP32, - FuncTestUtils::TestModel::convReluNormPoolFcModelFP16 - }; std::vector networks; - for (auto & model : models) { - networks.emplace_back(ie.ReadNetwork(model.model_xml_str, model.weights_blob)); - } - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract())); networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv())); networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv())); @@ -314,18 +297,17 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_MultipleIECores) { std::atomic counter{0u}; - // TODO: replace with subgraph builders after fixing *-31414 - const std::vector models = { - FuncTestUtils::TestModel::convReluNormPoolFcModelFP32, - FuncTestUtils::TestModel::convReluNormPoolFcModelFP16 - }; + std::vector networks; + networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract())); + networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv())); + networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv())); + networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat())); + networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat())); runParallel([&] () { auto value = counter++; InferenceEngine::Core ie; ie.SetConfig(config, deviceName); - auto model = models[value % models.size()]; - auto network = ie.ReadNetwork(model.model_xml_str, model.weights_blob); - (void)ie.LoadNetwork(network, deviceName); + (void)ie.LoadNetwork(networks[value % networks.size()], deviceName); }, numIterations, numThreads); } diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp index f97eec8eed7ed8..5e2c62b2abdf66 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp @@ -634,8 +634,10 @@ TEST_P(InferRequestTestsResultNotReady, ReturnResultNotReadyFromWaitInAsyncModeF // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() // Create CNNNetwork from ngraph::Function - // return function which computes around 20ms on GNA SW - function = LayerTestsDefinitions::Basic_LSTM_S::GetNetwork(3000, 380); + // return ngrpah::Function + // GetNetwork(3000, 380) make inference around 20ms on GNA SW + // so increases chances for getting RESULT_NOT_READY + function = LayerTestsDefinitions::Basic_LSTM_S::GetNetwork(300, 38); InferenceEngine::CNNNetwork cnnNet(function); // Load CNNNetwork to target plugins auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp index 5f921f2a6d3159..3df460c1c293ab 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/add_transformation.hpp @@ -7,7 +7,7 @@ #include #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp index afb7099a7ba508..aed62adabeb51a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/clamp_transformation.hpp @@ -5,8 +5,8 @@ #pragma once #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { class ClampTransformationParam { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp index 9e3048b4431d45..edb659d66af684 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_childs.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_childs.hpp index d57da55665edb4..2993c9224f7331 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_childs.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_different_precision_on_childs.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class ConcatWithDifferentChildsTransformationParam { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp index f60a25125554d0..3a3810daf07e50 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/concat_with_split_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class ConcatWithSplitTransformationParam { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp index 9799ebf615b6d1..fa008401cdf102 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_transformation.hpp @@ -8,8 +8,8 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp index d2aba75e0ab0eb..0e0c297b4180b8 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/convolution_with_incorrect_weights.hpp @@ -8,8 +8,8 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp index 4665c0c712c0c8..dff852f74317a1 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_avg_pool_transformation.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp index a8336c737d31d9..ecd09bd3fe1826 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_max_pool_transformation.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp index 2be09f64dc6e91..12584f01c5e09a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp @@ -8,9 +8,9 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp index ed6788de3dead3..6631331577a33b 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp @@ -6,11 +6,11 @@ #include #include -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp index a80950a3ef14a5..3d4a68122ecdd1 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp index 350794b75dfebe..e6367c6e418684 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_convert_transformation.hpp @@ -8,8 +8,8 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace ngraph; diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp index 51ac8bfcaaa232..5a7144ddc6a4d4 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_transformation.hpp index 1b36bf4acadacb..290b6f11a284a4 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp index 59d403258a391d..fbdbebfeb38099 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp index e0b694f55c1607..11744d8161001c 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp index 99e13eedf97167..6f7f1d6af96851 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/group_convolution_transformation.hpp @@ -8,8 +8,8 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp index 7a7ff6a6b372e3..43ecdec5a7f427 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_transformation.hpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp index 67819a7fc07528..b3d7c668db015a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_constant_transformation.hpp @@ -7,9 +7,9 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.hpp index 15d5e671b8c534..76e14249b53316 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp index 3c1bc1237c1323..332fbcffc8d7c4 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_to_group_convolution_transformation.hpp @@ -8,8 +8,8 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" using namespace ngraph; diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp index bfd789a78bae59..c1b4cb9a9d02bb 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp index 97fd4fafd32355..f9bf9e87c4adfd 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/multiply_with_one_parent_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp index 7a4f1daacb10dc..3c25a9c6de2df8 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/mvn_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" using namespace ngraph; diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp index 21350f38eff419..5cc801890d5e69 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/normalize_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp index e4232b7d473021..b7108d258150df 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/prelu_transformation.hpp @@ -7,7 +7,7 @@ #include #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp index 5f3324101a88c5..8e817dff562dfc 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/relu_transformation.hpp @@ -7,7 +7,7 @@ #include #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp index b4b6b1bb23700c..8b8ea38c52b4f8 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/reshape_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp index 9fac2b208e4169..39e2a7f612fe78 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/split_transformation.hpp @@ -5,7 +5,7 @@ #pragma once #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class SplitTransformationParam { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp index 8dcba7d8c09e2b..4b09c063bf1a35 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp index 4df37205264469..593602eea6878a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp index 67877a0df6c193..00e261ddc5a29f 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/transpose_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp index 48531e9aaa7abe..db3ecb9992e5a8 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp @@ -8,7 +8,7 @@ #include #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp index 523d28c37d0470..e30e8aeae29085 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/variadic_split_transformation.hpp @@ -5,7 +5,7 @@ #pragma once #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace LayerTestsDefinitions { class VariadicSplitTransformationParam { diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/roi_pooling.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/roi_pooling.hpp new file mode 100644 index 00000000000000..7f863b006fb540 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/roi_pooling.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2020 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +#include "functional_test_utils/layer_test_utils.hpp" + +namespace LayerTestsDefinitions { + +using roiPoolingParamsTuple = std::tuple< + InferenceEngine::SizeVector, // Input shape + InferenceEngine::SizeVector, // Coords shape + std::vector, // Pooled shape {pooled_h, pooled_w} + float, // Spatial scale + ngraph::helpers::ROIPoolingTypes, // ROIPooling method + InferenceEngine::Precision, // Net precision + LayerTestsUtils::TargetDevice>; // Device name + +class ROIPoolingLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void Infer() override; + +protected: + void SetUp() override; + +private: + ngraph::helpers::ROIPoolingTypes pool_method; + float spatial_scale; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp index 8f0e06d41e41c3..eb7147bd11798a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_multi_input.hpp @@ -24,7 +24,14 @@ namespace LayerTestsDefinitions { class ConcatMultiInput : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { +private: + std::vector paramSize; + ngraph::element::Type ngPrc; + std::vector> inputShapes; + public: + void GenerateStridedSliceModel(); + void GenerateConstOnlyModel(); static std::string getTestCaseName(testing::TestParamInfo obj); protected: diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp index 4838ec79623139..e79d8729165942 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/scaleshift.hpp @@ -8,7 +8,6 @@ #include #include #include "functional_test_utils/layer_test_utils.hpp" -#include "../../../../../ngraph_functions/include/ngraph_functions/builders.hpp" #include "common_test_utils/test_constants.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp index de6a254ecde746..c9fe931b0ae086 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/split_relu.hpp @@ -8,7 +8,7 @@ #include #include #include "functional_test_utils/layer_test_utils.hpp" -#include "../../../../../ngraph_functions/include/ngraph_functions/builders.hpp" +#include "ngraph_functions/builders.hpp" #include "common_test_utils/test_constants.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/concat_4D.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp similarity index 81% rename from inference-engine/tests/functional/plugin/shared/include/single_layer_tests/concat_4D.hpp rename to inference-engine/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp index ca6adcd29d40f9..f34f6103387b57 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/concat_4D.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/trivial_concat.hpp @@ -14,17 +14,17 @@ #include "ngraph_functions/utils/ngraph_helpers.hpp" namespace LayerTestsDefinitions { -using concat4DParamsTuple = typename std::tuple< +using trivialConcatParamsTuple = typename std::tuple< std::vector, // Inputs shape InferenceEngine::Precision, // Network precision std::string, // Device name std::map // Configuration >; -class Concat4DLayerTest : public testing::WithParamInterface, +class TrivialConcatLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); + static std::string getTestCaseName(const testing::TestParamInfo &obj); protected: void SetUp() override; }; diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/layout.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/layout.cpp index 7057af5ee943e7..70997ee6ae425e 100644 --- a/inference-engine/tests/functional/plugin/shared/src/behavior/layout.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/layout.cpp @@ -26,7 +26,7 @@ std::string LayoutTest::getTestCaseName(testing::TestParamInfo obj void LayoutTest::SetUp() { std::tie(netPrecision, targetDevice, configuration, layout, inputShapes) = this->GetParam(); - function = ngraph::builder::subgraph::make2InputSubtract(inputShapes, netPrecision); + function = ngraph::builder::subgraph::make2InputSubtract(inputShapes, FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision)); } void LayoutTest::TearDown() { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp index 46a126a958a320..beebb9ff7036ab 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/add_function.hpp" +#include "lpt_ngraph_functions/add_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp index bd26b98941118d..74e956d91df72c 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/clamp_function.hpp" +#include "lpt_ngraph_functions/clamp_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp index 248894f69bb988..7f299ac9cd4e02 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_transformation.cpp @@ -12,7 +12,7 @@ #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp index 970b580f8c5acc..58bb6df113df47 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp @@ -12,7 +12,7 @@ #include #include "ngraph_functions/builders.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp index a50c3624e9b25c..2e06ee9d7249bd 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -12,7 +12,7 @@ #include #include "ngraph_functions/builders.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp index 35c9fc49915a03..4177b03ef95577 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_neighbors_graph_transformation.cpp @@ -12,7 +12,7 @@ #include #include "ngraph_functions/builders.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp index 281825cf284d54..53199107bfcb30 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp @@ -12,7 +12,7 @@ #include #include "ngraph_functions/builders.hpp" -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" using namespace InferenceEngine; using namespace InferenceEngine::details; diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index cc3d4059756aea..179e9fed3bc01c 100755 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -16,7 +16,7 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_and_convolution_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index 8c7ba6c0f23649..2033bf35907166 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -16,7 +16,7 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/convolution_function.hpp" +#include "lpt_ngraph_functions/convolution_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index 9c5a111d313084..11450115159b93 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -27,7 +27,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/depth_to_space_function.hpp" +#include "lpt_ngraph_functions/depth_to_space_function.hpp" using namespace ngraph::opset1; diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index 2eb4c44db44dd8..ffd1a3fc952421 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -11,8 +11,8 @@ //#include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/avg_pool_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/avg_pool_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index ea5d9bc4d1f1b7..1197a6ffd3df22 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -11,8 +11,8 @@ //#include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/max_pool_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/max_pool_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index c04ca9ee550abc..db78324ea9dd80 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -3,7 +3,7 @@ // #include "low_precision_transformations/fake_quantize_precision_selection_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/fake_quantize_precision_selection_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp" #include #include diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index a9637072ec59f0..a1c27a0927a508 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -17,7 +17,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/fuse_convert_function.hpp" +#include "lpt_ngraph_functions/fuse_convert_function.hpp" #include diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp index 2a97e558d3c694..a1f4d509d4a330 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_transformation.cpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp index 4cc1243ba0dd1a..68ade881f0a76b 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_multiply_to_fake_quantize_transformation.cpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/fuse_multiply_to_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp index dea5ed5a19cf67..582597bde0f8de 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_subtract_to_fake_quantize_transformation.cpp @@ -10,7 +10,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/fuse_subtract_to_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp index 750d2e39caf93d..e72372bf7db023 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/group_convolution_transformation.cpp @@ -16,7 +16,7 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/group_convolution_function.hpp" +#include "lpt_ngraph_functions/group_convolution_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp index 035748470c0b42..162140ad1eb000 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/interpolate_function.hpp" +#include "lpt_ngraph_functions/interpolate_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index ff9dffa7b581c9..67df482ac82ecc 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -15,7 +15,7 @@ #include #include "low_precision_transformations/mat_mul_transformation.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp index a64024dc5f6bd0..817c8f5f2fa4bf 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -15,7 +15,7 @@ #include #include "low_precision_transformations/mat_mul_transformation.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp index a9d0d96c06a1e2..3fa6915790ae6d 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp @@ -16,7 +16,7 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp index c7b34299354e98..4135a983f4be9c 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_to_group_convolution_transformation.cpp @@ -17,7 +17,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/multiply_to_group_convolution_function.hpp" +#include "lpt_ngraph_functions/multiply_to_group_convolution_function.hpp" #include diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index deb5deee7d5e57..ff1ab2e21db885 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/multiply_function.hpp" +#include "lpt_ngraph_functions/multiply_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp index 465809485dd037..500bbc4245bdd2 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp @@ -11,7 +11,7 @@ #include #include "common_test_utils/common_utils.hpp" -#include "ngraph_functions/low_precision_transformations/multiply_with_one_parent_function.hpp" +#include "lpt_ngraph_functions/multiply_with_one_parent_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index a0c93bca86d1eb..5c66a7f500591d 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -17,7 +17,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/mvn_function.hpp" +#include "lpt_ngraph_functions/mvn_function.hpp" #include diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp index ccba661206109a..1205657e466436 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/normalize_transformation.cpp @@ -17,7 +17,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "ngraph_functions/low_precision_transformations/normalize_l2_function.hpp" +#include "lpt_ngraph_functions/normalize_l2_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp index b633f6164784ba..fa57d7dd7afe2b 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/prelu_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/prelu_function.hpp" +#include "lpt_ngraph_functions/prelu_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp index 061deab3f175eb..c6e6cc61151851 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/relu_function.hpp" +#include "lpt_ngraph_functions/relu_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp index b4a3a927cfaf1c..2c2a2088866404 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/reshape_transformation.cpp @@ -12,7 +12,7 @@ #include "ngraph_functions/builders.hpp" #include -#include "ngraph_functions/low_precision_transformations/reshape_function.hpp" +#include "lpt_ngraph_functions/reshape_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp index 6a4a463d0660bf..f3e23b76baceb4 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp @@ -12,7 +12,7 @@ #include #include "low_precision/split.hpp" -#include "ngraph_functions/low_precision_transformations/split_function.hpp" +#include "lpt_ngraph_functions/split_function.hpp" namespace LayerTestsDefinitions { std::string SplitTransformation::getTestCaseName(testing::TestParamInfo obj) { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index 33b9f11ef65461..b8e723716ed534 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -13,7 +13,7 @@ #include #include "low_precision_transformations/squeeze_transformation.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/squeeze_function.hpp" +#include "lpt_ngraph_functions/squeeze_function.hpp" #include diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp index 185482c9eab387..28e1ae82a1ac11 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_multiply_to_multiply_add_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/subtract_multiply_to_multiply_add_function.hpp" +#include "lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp index 4087d85c7424be..750ba8ceb6f0cf 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/transpose_transformation.cpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/transpose_function.hpp" +#include "lpt_ngraph_functions/transpose_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index bf9cfc59a111cf..28192ef3cb0070 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -13,7 +13,7 @@ #include #include "low_precision_transformations/unsqueeze_transformation.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/unsqueeze_function.hpp" +#include "lpt_ngraph_functions/unsqueeze_function.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp index f3ed8ab125b2f3..06e4ba14661943 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp @@ -12,7 +12,7 @@ #include #include "low_precision/variadic_split.hpp" -#include "ngraph_functions/low_precision_transformations/variadic_split_function.hpp" +#include "lpt_ngraph_functions/variadic_split_function.hpp" namespace LayerTestsDefinitions { std::string VariadicSplitTransformation::getTestCaseName(testing::TestParamInfo obj) { diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/concat_4D.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/concat_4D.cpp deleted file mode 100644 index b4e5e509b72325..00000000000000 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/concat_4D.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2019 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include - -#include "ie_core.hpp" - -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "common_test_utils/data_utils.hpp" -#include "functional_test_utils/precision_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/skip_tests_config.hpp" - -#include "single_layer_tests/concat_4D.hpp" - -namespace LayerTestsDefinitions { - - std::string Concat4DLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - int axis; - std::vector inputShapes; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::map config; - std::tie(inputShapes, netPrecision, targetName, config) = obj.param; - std::ostringstream result; - result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); - } - - void Concat4DLayerTest::SetUp() { - int axis = 1; - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - std::map additional_config; - std::tie(inputShape, netPrecision, targetDevice, additional_config) = this->GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); - - auto total_size = std::accumulate(inputShape.begin(), inputShape.end(), static_cast(1), std::multiplies()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); - auto input = params[0]; - - auto constant_values = CommonTestUtils::generate_float_numbers(total_size, 11.0f, 12.0f); - auto constant = ngraph::builder::makeConstant(ngPrc, std::vector({1, total_size}), constant_values); - auto first_reshape_pattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, std::vector(inputShape)); - auto first_reshape = std::make_shared(constant, first_reshape_pattern, false); - auto constant_2 = ngraph::builder::makeConstant(ngPrc, inputShape, constant_values); - - auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input, constant_2}), axis); - auto act = ngraph::builder::makeActivation(concat, ngPrc, ngraph::helpers::ActivationTypes::Relu); - ngraph::ResultVector results{std::make_shared(act)}; - function = std::make_shared(results, params, "concat"); - } - - - TEST_P(Concat4DLayerTest, CompareWithRefs) { - Run(); - }; -} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/eltwise.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/eltwise.cpp index 22bf1a5ae89a77..1c407733fc4459 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/eltwise.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/eltwise.cpp @@ -98,13 +98,11 @@ void EltwiseLayerTest::SetUp() { eltwiseType == ngraph::helpers::EltwiseTypes::FLOOR_MOD || eltwiseType == ngraph::helpers::EltwiseTypes::MOD) { std::vector data(ngraph::shape_size(shape_input_secondary)); - data = NGraphFunctions::Utils::generateVector(ngraph::shape_size(shape_input_secondary)); - for (float &i : data) { - if (i == 0) { - i = 1; - } - } + data = NGraphFunctions::Utils::generateVector(ngraph::shape_size(shape_input_secondary), 10, 2); secondaryInput = ngraph::builder::makeConstant(ngPrc, shape_input_secondary, data); + } else if (eltwiseType == ngraph::helpers::EltwiseTypes::POWER && secondaryInputType == ngraph::helpers::InputLayerType::CONSTANT) { + // to avoid floating point overflow on some platforms, let's fill the constant with small numbers. + secondaryInput = ngraph::builder::makeConstant(ngPrc, shape_input_secondary, {}, true, 3); } else { secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shape_input_secondary); if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/fake_quantize.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/fake_quantize.cpp index 30eb24935094b0..511c234f1bb231 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/fake_quantize.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/fake_quantize.cpp @@ -145,7 +145,7 @@ TEST_P(FakeQuantizeLayerTest, CompareWithRefs) { return; } - size_t nIterations = (inputDataMax - inputDataMin) / inputDataResolution; + size_t nIterations = 1; for (; nIterations != 0; nIterations--) { UpdateSeed(); Infer(); diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp index 27fbf901caef8d..d9a6869e5a3d9b 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp @@ -79,12 +79,22 @@ void NmsLayerTest::Compare(const std::vector> &expecte const auto &precision = actual->getTensorDesc().getPrecision(); size_t size = expected.size() / actual->getTensorDesc().getPrecision().size(); switch (precision) { - case Precision::FP32: + case Precision::FP32: { LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, threshold); + const auto fBuffer = lockedMemory.as(); + for (int i = size; i < actual->size(); i++) { + ASSERT_TRUE(fBuffer[i] == -1.f) << "Invalid default value: " << fBuffer[i] << " at index: " << i; + } break; - case Precision::I32: + } + case Precision::I32: { LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, 0); + const auto iBuffer = lockedMemory.as(); + for (int i = size; i < actual->size(); i++) { + ASSERT_TRUE(iBuffer[i] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i; + } break; + } default: FAIL() << "Comparator for " << precision << " precision isn't supported"; } diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp index 968909418bcfb2..fbbd2cc627dd02 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/region_yolo.cpp @@ -27,7 +27,7 @@ std::string RegionYoloLayerTest::getTestCaseName(const testing::TestParamInfoGetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto param = std::make_shared(ngraph::element::f32, inputShape); + auto param = std::make_shared(ngPrc, inputShape); auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); } diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/roi_pooling.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/roi_pooling.cpp new file mode 100644 index 00000000000000..c2f7d584558420 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/roi_pooling.cpp @@ -0,0 +1,104 @@ +// Copyright (C) 2020 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "functional_test_utils/layer_test_utils.hpp" + +#include "single_layer_tests/roi_pooling.hpp" + +using namespace InferenceEngine; +using namespace FuncTestUtils::PrecisionUtils; + +namespace LayerTestsDefinitions { + + std::string ROIPoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::vector inputShape; + std::vector coordsShape; + std::vector poolShape; + float spatial_scale; + ngraph::helpers::ROIPoolingTypes pool_method; + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::tie(inputShape, coordsShape, poolShape, spatial_scale, pool_method, netPrecision, targetDevice) = obj.param; + + std::ostringstream result; + + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "CS=" << CommonTestUtils::vec2str(coordsShape) << "_"; + result << "PS=" << CommonTestUtils::vec2str(poolShape) << "_"; + result << "Scale=" << spatial_scale << "_"; + switch (pool_method) { + case ngraph::helpers::ROIPoolingTypes::ROI_MAX: + result << "Max_"; + break; + case ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR: + result << "Bilinear_"; + break; + } + result << "netPRC=" << netPrecision.name() << "_"; + result << "trgDev=" << targetDevice; + return result.str(); + } + + void ROIPoolingLayerTest::Infer() { + inferRequest = executableNetwork.CreateInferRequest(); + inputs.clear(); + + auto feat_map_shape = cnnNetwork.getInputShapes().begin()->second; + const int height = pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX ? feat_map_shape[2] / spatial_scale : 1; + const int width = pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX ? feat_map_shape[3] / spatial_scale : 1; + + size_t it = 0; + for (const auto &input : cnnNetwork.getInputsInfo()) { + const auto &info = input.second; + Blob::Ptr blob; + + if (it == 1) { + blob = make_blob_with_precision(info->getTensorDesc()); + blob->allocate(); + CommonTestUtils::fill_data_roi(blob->buffer(), blob->size(), feat_map_shape[0] - 1, + height, width, 1.0f); + } else { + blob = GenerateInput(*info); + } + inferRequest.SetBlob(info->name(), blob); + inputs.push_back(blob); + it++; + } + inferRequest.Infer(); + } + + void ROIPoolingLayerTest::SetUp() { + InferenceEngine::SizeVector inputShape; + InferenceEngine::SizeVector coordsShape; + InferenceEngine::SizeVector poolShape; + InferenceEngine::Precision netPrecision; + float spatial_scale; + + std::tie(inputShape, coordsShape, poolShape, spatial_scale, pool_method, netPrecision, targetDevice) = this->GetParam(); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {inputShape, coordsShape}); + auto paramOuts = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + std::shared_ptr roi_pooling = ngraph::builder::makeROIPooling(paramOuts[0], + paramOuts[1], + poolShape, + spatial_scale, + pool_method); + ngraph::ResultVector results{std::make_shared(roi_pooling)}; + function = std::make_shared(results, params, "roi_pooling"); + } + + TEST_P(ROIPoolingLayerTest, CompareWithRefs) { + Run(); + } +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/cascade_concat.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/cascade_concat.cpp index 5e2d54b7de4849..f83dde6f5a88be 100644 --- a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/cascade_concat.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/cascade_concat.cpp @@ -35,17 +35,17 @@ void CascadeConcat::SetUp() { std::tie(input1, input2, input3, netPrecision, multioutput, targetDevice, additional_config) = this->GetParam(); configuration.insert(additional_config.begin(), additional_config.end()); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto input = ngraph::builder::makeParams(ngPrc, {input1[0], input2[0], input2[0]}); + auto input = ngraph::builder::makeParams(ngPrc, {input1[0], input2[0], input3[0]}); auto relu1 = std::make_shared(input[0]); auto relu2 = std::make_shared(input[1]); auto relu3 = std::make_shared(input[2]); auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), - relu2->output(0)}, + relu2->output(0)}, 1); auto reshape = ngraph::builder::makeSqueezeUnsqueeze(concat, ngPrc, {0}, ngraph::helpers::SqueezeOpType::UNSQUEEZE); auto reshape2 = ngraph::builder::makeSqueezeUnsqueeze(reshape, ngPrc, {0}, ngraph::helpers::SqueezeOpType::SQUEEZE); auto concat2 = std::make_shared(ngraph::OutputVector{reshape2->output(0), - relu3->output(0)}, + relu3->output(0)}, 1); ngraph::ResultVector results; if (multioutput) { diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/concat_multi_input.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/concat_multi_input.cpp index 1d70dfe0448122..8c51603c381869 100644 --- a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/concat_multi_input.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/concat_multi_input.cpp @@ -38,17 +38,19 @@ std::string ConcatMultiInput::getTestCaseName(testing::TestParamInfo> inputShapes; InferenceEngine::Precision netPrecision; std::map additional_config; std::tie(inputShapes, netPrecision, targetDevice, additional_config) = this->GetParam(); configuration.insert(additional_config.begin(), additional_config.end()); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector paramSize = { 1, 0 }; + ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + paramSize = { 1, 0 }; for (const auto& val : inputShapes) { paramSize[1] += val[1]; } +} + +void ConcatMultiInput::GenerateStridedSliceModel() { auto params = ngraph::builder::makeParams(ngPrc, { paramSize }); auto stride = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector{ 1, 1 }); @@ -80,9 +82,53 @@ void ConcatMultiInput::SetUp() { function = std::make_shared(results, params, "ConcatMultiInput"); } -TEST_P(ConcatMultiInput, CompareWithRefImpl) { +void ConcatMultiInput::GenerateConstOnlyModel() { + ngraph::OutputVector concatInputs; + + const int seed = 0; + std::mt19937 gen(static_cast(seed)); + + auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable { + std::vector res; + + std::uniform_real_distribution dist(min, max); + for (int i = 0; i < vec_len; i++) + res.emplace_back(static_cast(dist(gen))); + + return res; + }; + ngraph::ParameterVector input_vector; + for (size_t i = 0; i < inputShapes.size(); ++i) { + size_t total_size = 1; + for (auto dim : inputShapes[i]) { + total_size *= dim; + } + if (i == 0) { + input_vector = ngraph::builder::makeParams(ngPrc, {{1, total_size}}); + auto relu = ngraph::builder::makeActivation(input_vector[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); + concatInputs.push_back(relu); + } else { + auto min_max = (i % 2 == 0) ? 2 : 30; + auto const_values = generateFloatNumbers(total_size, -min_max, min_max); + auto const_node = ngraph::builder::makeConstant(ngPrc, {1, total_size}, const_values); + concatInputs.push_back(const_node); + } + } + + auto concat = ngraph::builder::makeConcat(concatInputs, 1); + + ngraph::ResultVector results{ std::make_shared(concat) }; + function = std::make_shared(results, input_vector, "ConcatConstOnly"); +} + +TEST_P(ConcatMultiInput, CompareWithRefStridedSlice) { + GenerateStridedSliceModel(); Run(); }; +TEST_P(ConcatMultiInput, CompareWithRefConstOnly) { + GenerateConstOnlyModel(); + Run(); +}; } // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp index 7bbd371f9597da..b31280766ce88d 100644 --- a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp @@ -3,6 +3,7 @@ // #include +#include "ngraph_functions/builders.hpp" #include "functional_test_utils/precision_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "subgraph_tests/scaleshift.hpp" diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/trivial_concat.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/trivial_concat.cpp new file mode 100644 index 00000000000000..9626204fe7857a --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/trivial_concat.cpp @@ -0,0 +1,78 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include + +#include "ie_core.hpp" + +#include "common_test_utils/common_utils.hpp" +#include "functional_test_utils/blob_utils.hpp" +#include "common_test_utils/data_utils.hpp" +#include "functional_test_utils/precision_utils.hpp" +#include "functional_test_utils/plugin_cache.hpp" +#include "functional_test_utils/skip_tests_config.hpp" + +#include "subgraph_tests/trivial_concat.hpp" + +namespace LayerTestsDefinitions { + +std::string TrivialConcatLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + int axis; + std::vector inputShapes; + InferenceEngine::Precision netPrecision; + InferenceEngine::Precision inPrc, outPrc; + InferenceEngine::Layout inLayout, outLayout; + std::string targetName; + std::map config; + std::tie(inputShapes, netPrecision, targetName, config) = obj.param; + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "trgDev=" << targetName << "_"; + return result.str(); +} + +void TrivialConcatLayerTest::SetUp() { + InferenceEngine::SizeVector inputShape; + InferenceEngine::Precision netPrecision; + std::map additional_config; + std::tie(inputShape, netPrecision, targetDevice, additional_config) = this->GetParam(); + configuration.insert(additional_config.begin(), additional_config.end()); + int axis = inputShape.size() - 2; + size_t total_size = std::accumulate(inputShape.begin(), inputShape.end(), static_cast(1), std::multiplies()); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {{1, total_size}}); + + auto input_relu = ngraph::builder::makeActivation(params[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); + + auto input_reshape_pattern = std::make_shared(ngraph::element::i64, + ngraph::Shape{inputShape.size()}, std::vector(inputShape)); + auto input = std::make_shared(input_relu, input_reshape_pattern, false); + + auto constant_values = CommonTestUtils::generate_float_numbers(total_size, 15.5f, 16.1f); + auto constant = ngraph::builder::makeConstant(ngPrc, std::vector({1, total_size}), constant_values); + + auto first_reshape = std::make_shared(constant, input_reshape_pattern, false); + + auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input}), axis); + + auto final_reshape_pattern = std::make_shared(ngraph::element::i64, + ngraph::Shape{2}, std::vector({1, 2 * total_size})); + auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); + + auto act = ngraph::builder::makeActivation(final_reshape, ngPrc, ngraph::helpers::ActivationTypes::Relu); + + ngraph::ResultVector results{std::make_shared(act)}; + function = std::make_shared(results, params, "trivial_concat"); +} + + +TEST_P(TrivialConcatLayerTest, CompareWithRefs) { + Run(); +}; +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/CMakeLists.txt b/inference-engine/tests/ie_test_utils/common_test_utils/CMakeLists.txt index 7453f36933de60..6dee3f11890b38 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/CMakeLists.txt +++ b/inference-engine/tests/ie_test_utils/common_test_utils/CMakeLists.txt @@ -15,7 +15,7 @@ function(add_gtest_libraries) endif () set(BUILD_SHARED_LIBS OFF) - add_subdirectory(gtest) + add_subdirectory(gtest EXCLUDE_FROM_ALL) get_target_property(gtest_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gtest_include_dirs}") @@ -23,6 +23,22 @@ function(add_gtest_libraries) get_target_property(gmock_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES) set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gmock_include_dirs};${gmock_SOURCE_DIR}/include") + # If we have specified /Z7 option, remove -Zi option which comes from gtest + if (WIN32) + set(gtest_targets "gtest;gtest_main;gmock;gmock_main") + foreach(target_name ${gtest_targets}) + if(TARGET "${target_name}") + get_target_property(_target_cxx_flags ${target_name} COMPILE_FLAGS) + if(_target_cxx_flags) + if(CMAKE_CXX_FLAGS_DEBUG MATCHES ".+/Z7.+") + string(REPLACE "-Zi" " " _target_cxx_flags ${_target_cxx_flags}) + message(STATUS "Removing -Zi flag from target " ${target_name}) + set_target_properties(${target_name} PROPERTIES COMPILE_FLAGS "${_target_cxx_flags}") + endif() + endif() + endif() + endforeach() + endif() set_target_properties(gtest gtest_main gmock gmock_main PROPERTIES FOLDER thirdparty) endfunction() diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp index fcbb64cf041173..379d6c57449d62 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp @@ -7,6 +7,7 @@ #include #include +#include #include #include @@ -122,6 +123,35 @@ static void fill_data_bbox(float *data, size_t size, int height, int width, floa } } +static void fill_data_roi(float *data, size_t size, const uint32_t range, const int height, const int width, const float omega, const int seed = 1) { + std::default_random_engine random(seed); + std::uniform_int_distribution distribution(0, range); + float center_h = (height - 1.0f) / 2; + float center_w = (width - 1.0f) / 2; + for (size_t i = 0; i < size; i += 5) { + data[i] = static_cast(distribution(random)); + data[i + 1] = std::floor(center_w + width * 0.6f * sin(static_cast(i+1) * omega)); + data[i + 3] = std::floor(center_w + width * 0.6f * sin(static_cast(i+3) * omega)); + if (data[i + 3] < data[i + 1]) { + std::swap(data[i + 1], data[i + 3]); + } + if (data[i + 1] < 0) + data[i + 1] = 0; + if (data[i + 3] > width - 1) + data[i + 3] = static_cast(width - 1); + + data[i + 2] = std::floor(center_h + height * 0.6f * sin(static_cast(i+2) * omega)); + data[i + 4] = std::floor(center_h + height * 0.6f * sin(static_cast(i+4) * omega)); + if (data[i + 4] < data[i + 2]) { + std::swap(data[i + 2], data[i + 4]); + } + if (data[i + 2] < 0) + data[i + 2] = 0; + if (data[i + 4] > height - 1) + data[i + 4] = static_cast(height - 1); + } +} + /** @brief Fill blob with random data. * * @param blob Target blob @@ -177,8 +207,10 @@ void inline fill_data_random_float(InferenceEngine::Blob::Ptr &blob, const uint3 for (size_t i = 0; i < blob->size(); i++) { auto value = static_cast(distribution(random)); value /= static_cast(k); - if (typeid(dataType) == typeid(typename InferenceEngine::PrecisionTrait::value_type)) { + if (PRC == InferenceEngine::Precision::FP16) { rawBlobDataPtr[i] = ngraph::float16(value).to_bits(); + } else if (PRC == InferenceEngine::Precision::BF16) { + rawBlobDataPtr[i] = ngraph::bfloat16(value).to_bits(); } else { rawBlobDataPtr[i] = value; } @@ -237,4 +269,27 @@ void inline fill_data_random(InferenceEngine:: fill_data_random_float(blob, range, start_from, k, seed); } +template<> +void inline fill_data_random(InferenceEngine::Blob::Ptr &blob, + const uint32_t range, + int32_t start_from, + const int32_t k, const int seed) { + fill_data_random_float(blob, range, start_from, k, seed); +} + +template +typename std::enable_if::value, T>::type +static ie_abs(const T &val) { + return std::abs(val); +} + +template +typename std::enable_if::value, T>::type +static ie_abs(const T &val) { + return val; +} + +static ngraph::bfloat16 ie_abs(const ngraph::bfloat16& val) { + return ngraph::bfloat16::from_bits(val.to_bits() ^ 0x8000); +} } // namespace CommonTestUtils diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp index 2ed182eee3b3e9..3577ab049c2315 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp @@ -11,7 +11,7 @@ const char DEVICE_GPU[] = "GPU"; const char DEVICE_HDDL[] = "HDDL"; const char DEVICE_FPGA[] = "FPGA"; const char DEVICE_MYRIAD[] = "MYRIAD"; -const char DEVICE_KEEMBAY[] = "KMB"; +const char DEVICE_KEEMBAY[] = "VPUX"; const char DEVICE_MULTI[] = "MULTI"; const char DEVICE_HETERO[] = "HETERO"; diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp index 3a6be95061be22..1259266be86329 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp @@ -328,6 +328,16 @@ convertArrayPrecision +void inline +convertArrayPrecision(float *dst, const short *src, + size_t nelem) { + auto srcBf16 = reinterpret_cast(src); + for (size_t i = 0; i < nelem; i++) { + dst[i] = static_cast(srcBf16[i]); + } +} + template InferenceEngine::Blob::Ptr inline convertBlobPrecision(const InferenceEngine::Blob::Ptr &blob) { using from_d_type = typename InferenceEngine::PrecisionTrait::value_type; @@ -464,6 +474,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::Tenso #define CASE(X) case X: CommonTestUtils::fill_data_random(blob, range, start_from, resolution, seed); break; CASE(InferenceEngine::Precision::FP32) CASE(InferenceEngine::Precision::FP16) + CASE(InferenceEngine::Precision::BF16) CASE(InferenceEngine::Precision::U8) CASE(InferenceEngine::Precision::U16) CASE(InferenceEngine::Precision::I8) diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp index b976f125825bcb..4cbfc20959e564 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp @@ -239,6 +239,10 @@ void LayerTestsCommon::Compare(const std::vector &expected, const Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, 0); break; + case InferenceEngine::Precision::BF16: + Compare(reinterpret_cast(expectedBuffer), + reinterpret_cast(actualBuffer), size, ngraph::bfloat16(threshold)); + break; default: FAIL() << "Comparator for " << precision << " precision isn't supported"; } @@ -320,6 +324,9 @@ std::vector> LayerTestsCommon::CalculateRefs() { // IE converts f16 to f32 ngraph::pass::ConvertPrecision().run_on_function( function); + + // The same idea for bf16 + ngraph::pass::ConvertPrecision().run_on_function(function); function->validate_nodes_and_infer_types(); auto referenceInputs = std::vector>(inputs.size()); for (std::size_t i = 0; i < inputs.size(); ++i) { @@ -347,14 +354,15 @@ std::vector> LayerTestsCommon::CalculateRefs() { } } + const auto& inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); std::vector> expectedOutputs; switch (refMode) { case INTERPRETER: { - expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs, convertType); + expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs, inType, convertType); break; } case CONSTANT_FOLDING: { - const auto &foldedFunc = ngraph::helpers::foldFunction(function, referenceInputs); + const auto &foldedFunc = ngraph::helpers::foldFunction(function, referenceInputs, inType); expectedOutputs = ngraph::helpers::getConstData(foldedFunc, convertType); break; } @@ -370,7 +378,7 @@ std::vector> LayerTestsCommon::CalculateRefs() { m.register_pass(); m.register_pass(); m.run_passes(cloned_function); - expectedOutputs = ngraph::helpers::interpreterFunction(cloned_function, referenceInputs, convertType); + expectedOutputs = ngraph::helpers::interpreterFunction(cloned_function, referenceInputs, inType, convertType); break; } } diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp index 0290d35daf75a2..bdc1e27b209ece 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp @@ -15,6 +15,7 @@ #include #include #include +#include #include "common_test_utils/common_utils.hpp" #include "common_test_utils/test_common.hpp" @@ -154,29 +155,17 @@ class LayerTestsCommon : public CommonTestUtils::TestsCommon { protected: LayerTestsCommon(); - template - typename std::enable_if::value, T>::type - static ie_abs(const T &val) { - return std::abs(val); - } - - template - typename std::enable_if::value, T>::type - static ie_abs(const T &val) { - return val; - } - template static void Compare(const T *expected, const T *actual, std::size_t size, T threshold) { for (std::size_t i = 0; i < size; ++i) { const auto &ref = expected[i]; const auto &res = actual[i]; - const auto absoluteDifference = ie_abs(res - ref); + const auto absoluteDifference = CommonTestUtils::ie_abs(res - ref); if (absoluteDifference <= threshold) { continue; } - const auto max = std::max(ie_abs(res), ie_abs(ref)); + const auto max = std::max(CommonTestUtils::ie_abs(res), CommonTestUtils::ie_abs(ref)); ASSERT_TRUE(max != 0 && ((absoluteDifference / max) <= threshold)) << "Relative comparison of values expected: " << ref << " and actual: " << res << " at index " << i << " with threshold " << threshold diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/empty.cpp b/inference-engine/tests/ie_test_utils/unit_test_utils/empty.cpp index 00686cf815b6fc..b205d6f1967aeb 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/empty.cpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/empty.cpp @@ -4,7 +4,7 @@ #include "unit_test_utils/mocks/mock_allocator.hpp" #include "unit_test_utils/mocks/mock_icnn_network.hpp" -#include "unit_test_utils/mocks/mock_ie_imemory_state.hpp" +#include "unit_test_utils/mocks/mock_ie_ivariable_state.hpp" #include "unit_test_utils/mocks/mock_iexecutable_network.hpp" #include "unit_test_utils/mocks/mock_iinfer_request.hpp" #include "unit_test_utils/mocks/mock_not_empty_icnn_network.hpp" @@ -23,5 +23,5 @@ #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp" -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp" +#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp index c68604a3781924..353096030b9c9d 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp @@ -19,7 +19,7 @@ class MockInferencePluginInternal2 : public InferenceEngine::InferencePluginInternal { public: MOCK_METHOD2(LoadExeNetworkImpl, std::shared_ptr( - const InferenceEngine::ICNNNetwork &, const std::map &)); + const InferenceEngine::CNNNetwork &, const std::map &)); MOCK_METHOD3(LoadNetwork, void( InferenceEngine::IExecutableNetwork::Ptr &, const InferenceEngine::ICNNNetwork &, @@ -31,7 +31,7 @@ class MockInferencePluginInternal2 : public InferenceEngine::InferencePluginInte class MockInferencePluginInternal : public InferenceEngine::InferencePluginInternal { public: MOCK_METHOD2(LoadExeNetworkImpl, std::shared_ptr( - const InferenceEngine::ICNNNetwork &, const std::map &)); + const InferenceEngine::CNNNetwork &, const std::map &)); MOCK_METHOD1(AddExtension, void(InferenceEngine::IExtensionPtr ext_ptr)); MOCK_METHOD1(SetConfig, void(const std::map &)); @@ -48,7 +48,7 @@ class MockInferencePluginInternal : public InferenceEngine::InferencePluginInter class MockInferencePluginInternal3 : public InferenceEngine::InferencePluginInternal { public: MOCK_METHOD2(LoadExeNetworkImpl, std::shared_ptr( - const InferenceEngine::ICNNNetwork &, const std::map &)); + const InferenceEngine::CNNNetwork &, const std::map &)); MOCK_METHOD1(AddExtension, void(InferenceEngine::IExtensionPtr ext_ptr)); MOCK_METHOD1(SetConfig, void(const std::map &)); }; diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp index d544e6b0438fb0..942eae7e65aa28 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp @@ -11,7 +11,7 @@ #include #include -#include +#include class MockIAsyncInferRequestInternal : public InferenceEngine::IAsyncInferRequestInternal { public: diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp index a22ee60960ea73..8bffc712ac17a5 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp @@ -21,7 +21,7 @@ class MockICore : public InferenceEngine::ICore { std::istream&, const std::string&, const std::map&)); MOCK_QUALIFIED_METHOD3(QueryNetwork, const, InferenceEngine::QueryNetworkResult( - const InferenceEngine::ICNNNetwork&, const std::string&, const std::map&)); + const InferenceEngine::CNNNetwork&, const std::string&, const std::map&)); MOCK_QUALIFIED_METHOD2(GetMetric, const, InferenceEngine::Parameter(const std::string&, const std::string&)); diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp index 0cc1e7f919ff13..63fb99ad083ead 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp @@ -11,7 +11,7 @@ #include #include -#include +#include class MockIInferRequestInternal : public InferenceEngine::IInferRequestInternal { public: diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp similarity index 87% rename from inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp rename to inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp index 13cd11033f5e2d..694a981a84d068 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp @@ -9,7 +9,7 @@ #include #include -#include +#include class MockIVariableStateInternal : public InferenceEngine::IVariableStateInternal { public: diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp index dcea35207c1fa3..f770b2c6e9ebb8 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp @@ -23,7 +23,7 @@ void MockPlugin::SetConfig(const std::map& config) { } ExecutableNetwork -MockPlugin::LoadNetwork(const ICNNNetwork &network, +MockPlugin::LoadNetwork(const CNNNetwork &network, const std::map &config) { if (_target) { return _target->LoadNetwork(network, config); @@ -33,7 +33,7 @@ MockPlugin::LoadNetwork(const ICNNNetwork &network, } ExecutableNetworkInternal::Ptr -MockPlugin::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork& network, +MockPlugin::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const std::map& config) { return {}; } @@ -52,8 +52,8 @@ INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, R } } -INFERENCE_PLUGIN_API(InferenceEngine::IInferencePlugin*)CreatePluginEngineProxy( - InferenceEngine::IInferencePlugin *target) { +INFERENCE_PLUGIN_API(InferenceEngine::IInferencePlugin*) +CreatePluginEngineProxy(InferenceEngine::IInferencePlugin *target) { return new MockPlugin(target); } diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp index 4bc5fe3bd21f7a..f500dfc1ce77e0 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.hpp @@ -18,10 +18,10 @@ class MockPlugin : public InferenceEngine::InferencePluginInternal { void SetConfig(const std::map& config) override; InferenceEngine::ExecutableNetwork - LoadNetwork(const InferenceEngine::ICNNNetwork &network, + LoadNetwork(const InferenceEngine::CNNNetwork &network, const std::map &config) override; ExecutableNetworkInternal::Ptr - LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork& network, + LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const std::map& config) override; std::map config; diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_ie_imemory_state.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_ie_ivariable_state.hpp similarity index 100% rename from inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_ie_imemory_state.hpp rename to inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_ie_ivariable_state.hpp diff --git a/inference-engine/tests/ngraph_helpers/CMakeLists.txt b/inference-engine/tests/ngraph_helpers/CMakeLists.txt new file mode 100644 index 00000000000000..a730f8ef58b804 --- /dev/null +++ b/inference-engine/tests/ngraph_helpers/CMakeLists.txt @@ -0,0 +1,6 @@ +# Copyright (C) 2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +add_subdirectory(ngraph_functions) +add_subdirectory(lpt_ngraph_functions) \ No newline at end of file diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/CMakeLists.txt b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/CMakeLists.txt new file mode 100644 index 00000000000000..32a0f871ff0854 --- /dev/null +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/CMakeLists.txt @@ -0,0 +1,38 @@ +# Copyright (C) 2019 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME lptNgraphFunctions) + +list(APPEND EXPORT_DEPENDENCIES + ngraphFunctions + inference_engine_lp_transformations + inference_engine_legacy + ) + +set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") + +addIeTarget( + NAME ${TARGET_NAME} + TYPE STATIC + ROOT ${PUBLIC_HEADERS_DIR} + INCLUDES + PUBLIC + ${PUBLIC_HEADERS_DIR} + ADDITIONAL_SOURCE_DIRS + ${CMAKE_CURRENT_SOURCE_DIR}/src + LINK_LIBRARIES + PRIVATE + ${EXPORT_DEPENDENCIES} + ADD_CPPLINT + DEPENDENCIES + ngraphFunctions + DEVELOPER_PACKAGE + EXPORT_DEPENDENCIES + ${EXPORT_DEPENDENCIES} +) + +ie_faster_build(${TARGET_NAME} + UNITY + PCH PRIVATE "src/precomp.hpp" +) diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/add_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp similarity index 90% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/add_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp index b3345222cf5809..2962265b732928 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/add_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/add_function.hpp @@ -6,11 +6,10 @@ #include #include +#include -#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/avg_pool_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/avg_pool_function.hpp similarity index 96% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/avg_pool_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/avg_pool_function.hpp index a2856609d4894f..c47412f74d8241 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/avg_pool_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/avg_pool_function.hpp @@ -7,7 +7,6 @@ #include #include #include "common/fake_quantize_on_data.hpp" -#include "low_precision/layer_transformation.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/clamp_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp similarity index 87% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/clamp_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp index 3366cc5d368d51..6c8cdc1f914adf 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/clamp_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/clamp_function.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/add.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/add.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/add.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/add.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/builders.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp similarity index 92% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/builders.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp index 80b2a297f6888c..56ad51682b77f2 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/builders.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp @@ -6,14 +6,16 @@ #include #include +#include +#include #include "ngraph_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/common/dequantization_op.hpp" -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/constant.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/constant.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/constant.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/constant.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/dequantization_operations.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/dequantization_operations.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_data.hpp similarity index 98% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_data.hpp index 1a69e394a66dec..a81699cf5982b5 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_data.hpp @@ -7,7 +7,6 @@ #include #include #include -#include "low_precision/layer_transformation.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_weights.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/fake_quantize_on_weights.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/multiply.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/multiply.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/common/multiply.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/multiply.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/concat_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/concat_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/concat_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convert_mul_or_add_finally_with_dequantization_function.hpp similarity index 80% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convert_mul_or_add_finally_with_dequantization_function.hpp index b5a191a8a9d7fd..1ea526b95665bb 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convert_mul_or_add_finally_with_dequantization_function.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/convolution_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp similarity index 90% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/convolution_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp index bfda0a4e9830d3..223c5b3a801f0b 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/convolution_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/convolution_function.hpp @@ -6,9 +6,11 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include + +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/depth_to_space_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp similarity index 93% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/depth_to_space_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp index a1091b9c3d68f6..21a6ae13f10581 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/depth_to_space_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/depth_to_space_function.hpp @@ -11,7 +11,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp similarity index 87% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp index 6f55b2d40542e7..403b56398a34e5 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp @@ -6,11 +6,10 @@ #include #include +#include -#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_and_convolution_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp similarity index 77% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_and_convolution_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp index 7e5108f8c493b4..a12966bc4d2565 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_and_convolution_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp @@ -6,8 +6,8 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp similarity index 90% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp index 20bf4f2c4ffdc9..f392873b213668 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp @@ -6,8 +6,9 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_precision_selection_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp similarity index 90% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_precision_selection_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp index e00ad81c1cb713..e5a39644aa8df0 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fake_quantize_precision_selection_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp @@ -9,8 +9,8 @@ #include #include "low_precision/layer_transformation.hpp" #include "common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fold_fake_quantize_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fold_fake_quantize_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fold_fake_quantize_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fold_fake_quantize_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_convert_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp similarity index 82% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_convert_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp index 45b686036b247c..3b16261667b5cd 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_convert_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_convert_function.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_fake_quantize_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_fake_quantize_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_fake_quantize_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_multiply_to_fake_quantize_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_multiply_to_fake_quantize_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_subtract_to_fake_quantize_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/fuse_subtract_to_fake_quantize_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/get_dequantization_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp similarity index 93% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/get_dequantization_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp index 718922ad611dee..4aba78c0d054d2 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/get_dequantization_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/get_dequantization_function.hpp @@ -7,7 +7,6 @@ #include #include #include -#include "ngraph_functions/subgraph_builders.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/group_convolution_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp similarity index 90% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/group_convolution_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp index 8d0254e01c6943..0e842e57237ad0 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/group_convolution_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/group_convolution_function.hpp @@ -6,8 +6,10 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include + +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/interpolate_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp similarity index 96% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/interpolate_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp index 6b45a189897259..508c6d35e4a03c 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/interpolate_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/interpolate_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mat_mul_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp similarity index 90% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mat_mul_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp index c656260ede5d55..ac1944c1b1d7ca 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mat_mul_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_function.hpp @@ -6,9 +6,9 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp similarity index 87% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp index 8b00864554615b..3bea8fa3b9fdbb 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/max_pool_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp similarity index 91% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/max_pool_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp index 80f61af96fae3e..fac6d62ab409cb 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/max_pool_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/max_pool_function.hpp @@ -8,7 +8,7 @@ #include #include "common/fake_quantize_on_data.hpp" #include "low_precision/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/move_dequantization_after_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp similarity index 91% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/move_dequantization_after_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp index 229677131aff2b..44c1dc3351ea4a 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/move_dequantization_after_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_function.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "ngraph_functions/subgraph_builders.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/move_dequantization_after_with_int_constant_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_with_int_constant_function.hpp similarity index 91% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/move_dequantization_after_with_int_constant_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_with_int_constant_function.hpp index b3908880b331d0..bb59b9f2243c42 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/move_dequantization_after_with_int_constant_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/move_dequantization_after_with_int_constant_function.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "ngraph_functions/subgraph_builders.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mul_add_to_scaleshift_or_power_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mul_add_to_scaleshift_or_power_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp similarity index 82% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp index 87bccb34bd19be..c86f3ee666db23 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_function.hpp @@ -7,9 +7,8 @@ #include #include -#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "ngraph_functions/low_precision_transformations/common/constant.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/constant.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_to_group_convolution_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp similarity index 89% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_to_group_convolution_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp index 1b1669de6c4f27..007a80561cd01c 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_to_group_convolution_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_to_group_convolution_function.hpp @@ -6,7 +6,10 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include +#include + +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_with_one_parent_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp similarity index 85% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_with_one_parent_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp index 504ee84ca048cb..505a8e254cb409 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/multiply_with_one_parent_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/multiply_with_one_parent_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mvn_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp similarity index 93% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mvn_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp index 78137297403799..f4402e2e1c3275 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/mvn_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/mvn_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/normalize_l2_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_l2_function.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/normalize_l2_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/normalize_l2_function.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/prelu_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp similarity index 92% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/prelu_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp index 10e0aebbc6c607..5df9e4bc6155ac 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/prelu_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/prelu_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/relu_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp similarity index 92% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/relu_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp index eda021ba657a6b..f67c52fdd104b3 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/relu_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/relu_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/reshape_fully_connected_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_fully_connected_function.hpp similarity index 85% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/reshape_fully_connected_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_fully_connected_function.hpp index 8bb513da0a3aeb..14bd5055bea77b 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/reshape_fully_connected_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_fully_connected_function.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/reshape_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp similarity index 87% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/reshape_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp index 11847d92a2ae0b..8f8286229cbf70 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/reshape_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/reshape_function.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/round_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp similarity index 89% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/round_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp index d854c33a424c4b..e9217941fa3289 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/round_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/round_function.hpp @@ -7,7 +7,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include "ngraph_functions/subgraph_builders.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/split_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp similarity index 87% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/split_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp index ebc19351b69a8a..ae8d795ed3930b 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/split_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/split_function.hpp @@ -8,8 +8,8 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/squeeze_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp similarity index 93% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/squeeze_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp index 559a3fde0da62a..b5300e6a24ad5f 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/squeeze_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/squeeze_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/subtract_multiply_to_multiply_add_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp similarity index 79% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/subtract_multiply_to_multiply_add_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp index 5d7d201edd0330..773e486e6c4b8c 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/subtract_multiply_to_multiply_add_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp @@ -8,10 +8,10 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/add.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/multiply.hpp" +#include "lpt_ngraph_functions/common/add.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/multiply.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/transpose_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp similarity index 87% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/transpose_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp index 836dfb6fd24408..7cb53c79c0c022 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/transpose_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/transpose_function.hpp @@ -7,8 +7,8 @@ #include #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/unsqueeze_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp similarity index 93% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/unsqueeze_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp index 43c08c4c129df1..fad256120b7ee2 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/unsqueeze_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/unsqueeze_function.hpp @@ -6,7 +6,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/variadic_split_function.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp similarity index 89% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/variadic_split_function.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp index e02508ee38b385..ae610611a3f1ff 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/variadic_split_function.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/variadic_split_function.hpp @@ -8,8 +8,8 @@ #include #include -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/add_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/add_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp index def311c946270d..b5d7d1826a0e05 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/add_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/add_function.hpp" #include "low_precision/network_helper.hpp" +#include "low_precision/layer_transformation.hpp" -#include -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/subgraph_builders.hpp" +#include "ngraph/opsets/opset1.hpp" + +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/add_function.hpp" using namespace ngraph::pass::low_precision; @@ -103,7 +104,7 @@ std::shared_ptr AddFunction::getOriginal( } else if (constInput == 1) { parameters = { as_type_ptr(input1) }; } else { - THROW_IE_EXCEPTION << "Unexpected constant input index"; + throw std::runtime_error("Unexpected constant input index"); } return std::make_shared(results, parameters, "AddTransformation"); } @@ -213,7 +214,7 @@ std::shared_ptr AddFunction::getReference( } else if (constInputIndex == 1) { parameters = { as_type_ptr(input1) }; } else { - THROW_IE_EXCEPTION << "Unexpected constant input index"; + throw std::runtime_error("Unexpected constant input index"); } return std::make_shared(results, parameters, "AddTransformation"); } diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/avg_pool_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp similarity index 98% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/avg_pool_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp index ec4068f6b2c992..2e3e2ead5dd45c 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/avg_pool_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/avg_pool_function.cpp @@ -2,13 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/avg_pool_function.hpp" - #include #include -#include "ngraph_functions/subgraph_builders.hpp" + #include "low_precision/network_helper.hpp" +#include "lpt_ngraph_functions/avg_pool_function.hpp" +#include "ngraph_functions/subgraph_builders.hpp" + namespace ngraph { namespace builder { namespace subgraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/clamp_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/clamp_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp index b4200ceb388670..d2612e5c5a2003 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/clamp_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/clamp_function.cpp @@ -2,8 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/clamp_function.hpp" - #include #include #include @@ -11,7 +9,8 @@ #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/clamp_function.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/add.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/add.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/add.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/add.cpp index a4a1c74e27f409..5bde7118c66824 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/add.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/add.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/add.hpp" +#include "lpt_ngraph_functions/common/add.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/builders.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp similarity index 99% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/builders.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp index e9beb28e1bbde5..f7df290de344aa 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/builders.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include #include diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/constant.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/constant.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/constant.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/constant.cpp index 33ebe184ff8f53..5306067c9a877f 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/constant.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/constant.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/constant.hpp" +#include "lpt_ngraph_functions/common/constant.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/dequantization_operations.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/dequantization_operations.cpp similarity index 97% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/dequantization_operations.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/dequantization_operations.cpp index 989335165dbc48..f19ee4fd762307 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/dequantization_operations.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/dequantization_operations.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/fake_quantize_on_data.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_data.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/fake_quantize_on_data.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_data.cpp index cda7fcd649d9b8..8412983008fe64 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/fake_quantize_on_data.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_data.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/fake_quantize_on_weights.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_weights.cpp similarity index 90% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/fake_quantize_on_weights.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_weights.cpp index d2a4f37c9edabb..30cc8ed278e843 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/fake_quantize_on_weights.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/fake_quantize_on_weights.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/multiply.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/multiply.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/multiply.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/multiply.cpp index 476776c9144410..aa2b090f57966a 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/common/multiply.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/multiply.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/common/multiply.hpp" +#include "lpt_ngraph_functions/common/multiply.hpp" #include namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/concat_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp similarity index 97% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/concat_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp index 421ff90a6ffdca..7716abdb98a12a 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/concat_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/concat_function.cpp @@ -2,16 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/concat_function.hpp" +#include "lpt_ngraph_functions/concat_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { @@ -539,12 +538,12 @@ std::shared_ptr ConcatFunction::getReference( "ConcatTransformation"); if (fqOnData1.outputPrecision != fqOnData2.outputPrecision) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } const ngraph::element::Type fqOnDataPrecision = fqOnData1.outputPrecision; if (fqOnDataPrecision != ngraph::element::undefined) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } const ngraph::element::Type fakeQuantizePrecision = fakeQuantize1->get_output_element_type(0); @@ -589,12 +588,12 @@ std::shared_ptr ConcatFunction::getReference( "ConcatTransformation"); if (fqOnData1.outputPrecision != fqOnData2.outputPrecision) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } const ngraph::element::Type fqOnDataPrecision = fqOnData1.outputPrecision; if (fqOnDataPrecision != ngraph::element::undefined) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } const ngraph::element::Type fakeQuantizePrecision = fakeQuantize1->get_output_element_type(0); @@ -664,13 +663,13 @@ std::shared_ptr ConcatFunction::getReferenceWithNeighbors( "ConcatWithNeighborsTransformation"); if ((fqOnData1.outputPrecision != fqOnData2.outputPrecision) || (fqOnData2.outputPrecision != fqOnData3.outputPrecision)) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } const ngraph::element::Type fqOnDataPrecision = fqOnData1.outputPrecision; if (fqOnDataPrecision != ngraph::element::undefined) { if ((fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) || (fakeQuantize2->get_output_element_type(0) != fakeQuantize3->get_output_element_type(0))) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } const ngraph::element::Type fakeQuantizePrecision = fakeQuantize1->get_output_element_type(0); @@ -771,12 +770,12 @@ std::shared_ptr ConcatFunction::getReferenceWithIntermediate( "ConcatWithIntermediateTransformation"); if ((fqOnData1.outputPrecision != fqOnData2.outputPrecision)) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } const ngraph::element::Type fqOnDataPrecision = fqOnData1.outputPrecision; if (fqOnDataPrecision != ngraph::element::undefined) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } const ngraph::element::Type fakeQuantizePrecision = fakeQuantize1->get_output_element_type(0); @@ -881,11 +880,11 @@ std::shared_ptr ConcatFunction::getReferenceWithSplitedInterme "ConcatWithIntermediateTransformation"); if ((fqOnData1.outputPrecision != fqOnData2.outputPrecision)) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } if (fqOnDataPrecision != ngraph::element::undefined) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } } @@ -977,12 +976,12 @@ std::shared_ptr ConcatFunction::getReferenceSelectionWithInter "ConcatWithIntermediateTransformation"); if ((fqOnData1.outputPrecision != fqOnData2.outputPrecision)) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } const ngraph::element::Type fqOnDataPrecision = fqOnData1.outputPrecision; if (fqOnDataPrecision != ngraph::element::undefined) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } const ngraph::element::Type fakeQuantizePrecision = fakeQuantize1->get_output_element_type(0); @@ -1076,11 +1075,11 @@ std::shared_ptr ConcatFunction::getReferenceWithDifferentPreci "ConcatWithDifferentChildsTransformation"); if ((fqOnData1.outputPrecision != fqOnData2.outputPrecision)) { - THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different"; + throw std::runtime_error("FakeQuantize expected precisions are different"); } if (fqOnDataPrecision != ngraph::element::undefined) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { - THROW_IE_EXCEPTION << "FakeQuantize operation precisions are different"; + throw std::runtime_error("FakeQuantize operation precisions are different"); } } diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convert_mul_or_add_finally_with_dequantization_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convert_mul_or_add_finally_with_dequantization_function.cpp index 03d0c913dc8e1d..39a0b99712418e 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convert_mul_or_add_finally_with_dequantization_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.hpp" +#include "lpt_ngraph_functions/convert_mul_or_add_finally_with_dequantization_function.hpp" #include #include @@ -11,7 +11,7 @@ #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/network_helper.hpp" #include #include "low_precision/common/dequantization_op.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/convolution_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp index 97e7814e469ea4..b7f678a5f92990 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/convolution_function.hpp" +#include "lpt_ngraph_functions/convolution_function.hpp" #include #include #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/common/dequantization_op.hpp" #include "low_precision/network_helper.hpp" @@ -35,7 +35,7 @@ std::shared_ptr ConvolutionFunction::getOriginal( const size_t outputChannelsCount = 2 * inputShape[1]; if ((weights->cast_vector().size() != 1ul) && (weights->cast_vector().size() != (inputChannelsCount * outputChannelsCount))) { - THROW_IE_EXCEPTION << "unexpected actual weights values size"; + throw std::runtime_error("unexpected actual weights values size"); } if (weights->cast_vector().size() == 1ul) { @@ -147,7 +147,7 @@ std::shared_ptr ConvolutionFunction::getReferenceWithIncorrect const size_t outputChannelsCount = 2 * inputShape[1]; if ((weightsValues.size() != 1ul) && (weightsValues.size() != (inputChannelsCount * outputChannelsCount))) { - THROW_IE_EXCEPTION << "unexpected actual weights values size"; + throw std::runtime_error("unexpected actual weights values size"); } const std::shared_ptr weights = ngraph::opset1::Constant::create( @@ -228,7 +228,7 @@ std::shared_ptr ConvolutionFunction::getReference( const size_t outputChannelsCount = 2 * inputShape[1]; if ((weights->cast_vector().size() != 1ul) && (weights->cast_vector().size() != (inputChannelsCount * outputChannelsCount))) { - THROW_IE_EXCEPTION << "unexpected actual weights values size"; + throw std::runtime_error("unexpected actual weights values size"); } if (weights->cast_vector().size() == 1ul) { @@ -295,7 +295,7 @@ std::shared_ptr ConvolutionFunction::get( const size_t inputChannelsCount = inputShape[1]; const size_t outputChannelsCount = 2 * inputShape[1]; if ((weightsValues.size() != 1ul) && (weightsValues.size() != (inputChannelsCount * outputChannelsCount))) { - THROW_IE_EXCEPTION << "unexpected actual weights values size"; + throw std::runtime_error("unexpected actual weights values size"); } const std::shared_ptr parentOnData = fakeQuantizeOnData.empty() ? std::dynamic_pointer_cast(input) : fqOnData; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/depth_to_space_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/depth_to_space_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp index f9b48751b51835..9da8688ebe1819 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/depth_to_space_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/depth_to_space_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/depth_to_space_function.hpp" +#include "lpt_ngraph_functions/depth_to_space_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp index 2bc5bf081b5695..ac8613e260ff31 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/elementwise_with_multi_parent_dequantization_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/elementwise_with_multi_parent_dequantization_function.hpp" +#include "lpt_ngraph_functions/elementwise_with_multi_parent_dequantization_function.hpp" #include "low_precision/network_helper.hpp" #include diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_and_convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_and_convolution_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp index 7f372cd9b760a1..04047f409942ff 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_and_convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fake_quantize_and_convolution_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_and_convolution_function.hpp" #include #include "ngraph_functions/subgraph_builders.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp index 232190190b5fd4..c4eacfa26d6d2a 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_two_output_branches_with_convolution_function.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution_function.hpp" #include -#include "ngraph_functions/subgraph_builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/fake_quantize_and_two_output_branches_with_convolution_function.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" #include "low_precision/network_helper.hpp" +#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp similarity index 98% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp index d868aa97734bbc..f1144d9b3d004f 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_precision_selection_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp similarity index 98% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_precision_selection_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp index 2e7231a1be9335..53ffde8d33f482 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fake_quantize_precision_selection_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_precision_selection_function.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fake_quantize_precision_selection_function.hpp" +#include "lpt_ngraph_functions/fake_quantize_precision_selection_function.hpp" #include #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fold_fake_quantize_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp similarity index 89% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fold_fake_quantize_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp index 23758605039db3..b707d61d6893cf 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fold_fake_quantize_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fold_fake_quantize_function.cpp @@ -2,13 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fold_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fold_fake_quantize_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_convert_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_convert_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp index c23bace2f3d3c2..f762b2ee318ba8 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_convert_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_convert_function.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fuse_convert_function.hpp" +#include "lpt_ngraph_functions/fuse_convert_function.hpp" #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp index 99c9715123bbac..d77ebadad1f0ee 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_and_scale_shift_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_and_scale_shift_function.hpp" #include #include "ngraph_functions/builders.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_fake_quantize_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp similarity index 90% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_fake_quantize_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp index e6139608c9196b..d66c05d6d7fbf8 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_fake_quantize_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_fake_quantize_function.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_fake_quantize_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { @@ -81,7 +81,7 @@ std::shared_ptr FuseFakeQuantizeFunction::getOriginal( lastNode = makeDequantization(lastNode, {{element::f32}, {}, {0.01f}}); } else { - THROW_IE_EXCEPTION << "Unknown parameter on output intervals!"; + throw std::runtime_error("Unknown parameter on output intervals!"); } lastNode->set_friendly_name("output"); @@ -95,11 +95,11 @@ std::shared_ptr FuseFakeQuantizeFunction::get( const ngraph::element::Type precisionFqOnData, const FakeQuantizeOnData& fqOnData) { if (branches.size() != 2ul) { - THROW_IE_EXCEPTION << "unsupported branches count"; + throw std::runtime_error("unsupported branches count"); } if (branches[0].dequantization.multiply.outPrecision != branches[1].dequantization.multiply.outPrecision) { - THROW_IE_EXCEPTION << "branch precisions are not equal"; + throw std::runtime_error("branch precisions are not equal"); } ngraph::ParameterVector inputs; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_multiply_to_fake_quantize_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp similarity index 76% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_multiply_to_fake_quantize_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp index 456116ba11a835..d65000b7a38cd9 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_multiply_to_fake_quantize_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_multiply_to_fake_quantize_function.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fuse_multiply_to_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_multiply_to_fake_quantize_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_subtract_to_fake_quantize_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp similarity index 88% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_subtract_to_fake_quantize_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp index 1988222a8a90cf..79b1b36726e794 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/fuse_subtract_to_fake_quantize_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fuse_subtract_to_fake_quantize_function.cpp @@ -2,16 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/fuse_subtract_to_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/fuse_subtract_to_fake_quantize_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/get_dequantization_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/get_dequantization_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp index 36dacee3b40557..624a38c77f55f8 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/get_dequantization_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp @@ -8,7 +8,7 @@ #include #include -#include "ngraph_functions/low_precision_transformations/get_dequantization_function.hpp" +#include "lpt_ngraph_functions/get_dequantization_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/group_convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/group_convolution_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp index 10cb80a6c9986c..008534b08991f1 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/group_convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/group_convolution_function.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/group_convolution_function.hpp" +#include "lpt_ngraph_functions/group_convolution_function.hpp" #include #include #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_weights.hpp" -#include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_weights.hpp" +#include "lpt_ngraph_functions/common/fake_quantize_on_data.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/common/dequantization_op.hpp" using namespace ngraph::opset1; @@ -89,7 +89,7 @@ std::shared_ptr GroupConvolutionFunction::getOriginal( const size_t weightsSize = weightsConst->cast_vector().size(); if ((weightsSize != 1ul) && (weightsSize != (inputChannelsCount * outputChannelsCount))) { - THROW_IE_EXCEPTION << "unexpected actual weights values size"; + throw std::runtime_error("unexpected actual weights values size"); } std::shared_ptr weights = createWeightsOriginal( @@ -189,7 +189,7 @@ std::shared_ptr GroupConvolutionFunction::getReference( const size_t weightsSize = weightsConst->cast_vector().size(); if ((weightsSize != 1ul) && (weightsSize != (inputChannelsCount * outputChannelsCount))) { - THROW_IE_EXCEPTION << "unexpected actual weights values size"; + throw std::runtime_error("unexpected actual weights values size"); } std::shared_ptr weights; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/interpolate_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp similarity index 98% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/interpolate_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp index 44cccef2dd1d72..bc51e941f7c8ab 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/interpolate_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/interpolate_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/interpolate_function.hpp" +#include "lpt_ngraph_functions/interpolate_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mat_mul_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/mat_mul_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp index 5bca24e4131e93..0c0211dcb97504 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mat_mul_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/mat_mul_function.hpp" +#include "lpt_ngraph_functions/mat_mul_function.hpp" #include #include @@ -11,7 +11,7 @@ #include "ngraph_ops/type_relaxed.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { @@ -53,11 +53,11 @@ std::shared_ptr MatMulFunction::getOriginal( const ngraph::element::Type precisionBeforeDequantization2, const DequantizationOperations& dequantization2) { if (!dequantization1.convert.empty() && (precisionBeforeDequantization1 == dequantization1.convert.outPrecision)) { - THROW_IE_EXCEPTION << "unexpected input arguments for branch 1"; + throw std::runtime_error("unexpected input arguments for branch 1"); } if (!dequantization2.convert.empty() && (precisionBeforeDequantization2 == dequantization2.convert.outPrecision)) { - THROW_IE_EXCEPTION << "unexpected input arguments for branch 2"; + throw std::runtime_error("unexpected input arguments for branch 2"); } const std::shared_ptr input1 = std::make_shared(precisionBeforeDequantization1, inputShape1); @@ -137,11 +137,11 @@ std::shared_ptr MatMulFunction::getReference( const DequantizationOperations& dequantization2, const DequantizationOperations& resultDequantizationOperations) { if (!dequantization1.convert.empty() && (precisionBeforeDequantization1 == dequantization1.convert.outPrecision)) { - THROW_IE_EXCEPTION << "unexpected input arguments for branch 1"; + throw std::runtime_error("unexpected input arguments for branch 1"); } if (!dequantization2.convert.empty() && (precisionBeforeDequantization2 == dequantization2.convert.outPrecision)) { - THROW_IE_EXCEPTION << "unexpected input arguments for branch 2"; + throw std::runtime_error("unexpected input arguments for branch 2"); } const std::shared_ptr input1 = std::make_shared(precisionBeforeDequantization1, inputShape1); diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp index 6543e0d84568ec..26da1586d93d7a 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mat_mul_with_optimized_constant_fake_quantize_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_function.hpp" +#include "lpt_ngraph_functions/mat_mul_with_optimized_constant_fake_quantize_function.hpp" #include #include "ngraph_functions/builders.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/max_pool_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/max_pool_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp index 7296028af53313..d1906b0073a7d5 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/max_pool_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/max_pool_function.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/max_pool_function.hpp" +#include "lpt_ngraph_functions/max_pool_function.hpp" #include #include #include "low_precision/network_helper.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/move_dequantization_after_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/move_dequantization_after_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp index cd8d8d80bd3a62..0d9f86cbcfa804 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/move_dequantization_after_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_function.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/move_dequantization_after_function.hpp" +#include "lpt_ngraph_functions/move_dequantization_after_function.hpp" #include "low_precision/network_helper.hpp" #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/move_dequantization_after_with_int_constant_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_with_int_constant_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/move_dequantization_after_with_int_constant_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_with_int_constant_function.cpp index f774a4c3970379..618368377533d7 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/move_dequantization_after_with_int_constant_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/move_dequantization_after_with_int_constant_function.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/move_dequantization_after_with_int_constant_function.hpp" +#include "lpt_ngraph_functions/move_dequantization_after_with_int_constant_function.hpp" #include "low_precision/network_helper.hpp" #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mul_add_to_scaleshift_or_power_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mul_add_to_scaleshift_or_power_function.cpp similarity index 92% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/mul_add_to_scaleshift_or_power_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mul_add_to_scaleshift_or_power_function.cpp index d04c97c2c8b19d..967abde394df66 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mul_add_to_scaleshift_or_power_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mul_add_to_scaleshift_or_power_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/mul_add_to_scaleshift_or_power_function.hpp" +#include "lpt_ngraph_functions/mul_add_to_scaleshift_or_power_function.hpp" #include #include "ngraph_ops/type_relaxed.hpp" @@ -12,9 +12,8 @@ #include #include -#include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp similarity index 83% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp index e7125c09468722..b1d66df7f59880 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/multiply_function.hpp" +#include "lpt_ngraph_functions/multiply_function.hpp" #include #include @@ -10,8 +10,8 @@ #include "low_precision/common/dequantization_op.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" using namespace ngraph::pass::low_precision; @@ -27,11 +27,15 @@ struct BranchNodes { BranchNodes getBranch(const MultiplyBranch& branch) { if (!branch.constant.empty()) { if (branch.inputShape != branch.constant.shape) { - THROW_IE_EXCEPTION << "shapes are not equals: " << branch.inputShape << " & " << branch.constant.shape; + std::ostringstream message; + message << "shapes are not equals: " << branch.inputShape << " & " << branch.constant.shape; + throw std::runtime_error(message.str()); } if (branch.precisionBeforeDequantization != branch.constant.outPrecision) { - THROW_IE_EXCEPTION << "precisions are not equals: " << branch.precisionBeforeDequantization << " & " << branch.constant.outPrecision; + std::ostringstream message; + message << "precisions are not equals: " << branch.precisionBeforeDequantization << " & " << branch.constant.outPrecision; + throw std::runtime_error(message.str()); } } diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_to_group_convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_to_group_convolution_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp index e7e5eb05bcea99..6399521d8007da 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_to_group_convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_to_group_convolution_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/multiply_to_group_convolution_function.hpp" +#include "lpt_ngraph_functions/multiply_to_group_convolution_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "ngraph_ops/type_relaxed.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_with_one_parent_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp similarity index 92% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_with_one_parent_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp index 66487d0bf2516c..a29e007ecdf5cb 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/multiply_with_one_parent_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/multiply_with_one_parent_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/multiply_with_one_parent_function.hpp" +#include "lpt_ngraph_functions/multiply_with_one_parent_function.hpp" #include #include "ngraph_functions/builders.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mvn_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/mvn_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp index 8868756079b418..d5c0bbffc0698c 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/mvn_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/mvn_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/mvn_function.hpp" +#include "lpt_ngraph_functions/mvn_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "ngraph_ops/type_relaxed.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/normalize_l2_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp similarity index 98% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/normalize_l2_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp index 582deb607840bb..d1b7a8075cdeb9 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/normalize_l2_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/normalize_l2_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/normalize_l2_function.hpp" +#include "lpt_ngraph_functions/normalize_l2_function.hpp" #include #include diff --git a/inference-engine/tests/ngraph_functions/src/precomp.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/precomp.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/precomp.hpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/precomp.hpp diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/prelu_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/prelu_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp index 1356f2a87929b3..2ee4055b960819 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/prelu_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/prelu_function.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/prelu_function.hpp" +#include "lpt_ngraph_functions/prelu_function.hpp" #include #include #include "ngraph_ops/type_relaxed.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/relu_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/relu_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp index e02b23e21d1fbf..a0fb6c7639913f 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/relu_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/relu_function.cpp @@ -2,14 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/relu_function.hpp" +#include "lpt_ngraph_functions/relu_function.hpp" #include #include #include "ngraph_ops/type_relaxed.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/network_helper.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/reshape_fully_connected_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_fully_connected_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/reshape_fully_connected_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_fully_connected_function.cpp index d4fd5e6a28ab49..78d291ef9f6004 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/reshape_fully_connected_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_fully_connected_function.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/reshape_fully_connected_function.hpp" +#include "lpt_ngraph_functions/reshape_fully_connected_function.hpp" #include #include @@ -13,7 +13,7 @@ #include #include #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/common/dequantization_op.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/reshape_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/reshape_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp index a0851e39830853..39c7e9f9f844a7 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/reshape_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/reshape_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/reshape_function.hpp" +#include "lpt_ngraph_functions/reshape_function.hpp" #include -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/round_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp similarity index 92% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/round_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp index cdc4b3ec1387da..394de7dbaf2151 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/round_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/round_function.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/round_function.hpp" - #include + +#include "lpt_ngraph_functions/round_function.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" + #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/split_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp similarity index 93% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/split_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp index a3ec1491c58cc2..447a1855eed5c5 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/split_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/split_function.cpp @@ -6,12 +6,12 @@ #include #include -#include "ngraph_functions/low_precision_transformations/split_function.hpp" +#include "lpt_ngraph_functions/split_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/squeeze_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/squeeze_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp index 25e81e8d4b1610..3680a551d0be37 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/squeeze_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/squeeze_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/squeeze_function.hpp" +#include "lpt_ngraph_functions/squeeze_function.hpp" #include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "ngraph_ops/type_relaxed.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/subtract_multiply_to_multiply_add_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp similarity index 95% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/subtract_multiply_to_multiply_add_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp index 0c6bece00c109b..7668447a3a334b 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/subtract_multiply_to_multiply_add_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/subtract_multiply_to_multiply_add_function.cpp @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/subtract_multiply_to_multiply_add_function.hpp" +#include "lpt_ngraph_functions/subtract_multiply_to_multiply_add_function.hpp" #include #include -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "low_precision/common/dequantization_op.hpp" using namespace ngraph::pass::low_precision; diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/transpose_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp similarity index 96% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/transpose_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp index f0aa2b1d4f2e6f..9542ba1113729a 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/transpose_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/transpose_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/transpose_function.hpp" +#include "lpt_ngraph_functions/transpose_function.hpp" #include -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" namespace ngraph { namespace builder { diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/unsqueeze_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp similarity index 94% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/unsqueeze_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp index 5e72f96df052f4..f3528d5fb90666 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/unsqueeze_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/unsqueeze_function.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph_functions/low_precision_transformations/unsqueeze_function.hpp" +#include "lpt_ngraph_functions/unsqueeze_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" +#include "ngraph_functions/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" #include "ngraph_ops/type_relaxed.hpp" namespace ngraph { @@ -76,8 +76,6 @@ std::shared_ptr UnsqueezeFunction::getReference( return std::make_shared(results, ngraph::ParameterVector{ input }, "UnsqueezeTransformation"); } - - } // namespace subgraph } // namespace builder } // namespace ngraph diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/variadic_split_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp similarity index 92% rename from inference-engine/tests/ngraph_functions/src/low_precision_transformations/variadic_split_function.cpp rename to inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp index 7ddeb327206874..0349e0d1c541b0 100644 --- a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/variadic_split_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/variadic_split_function.cpp @@ -5,13 +5,11 @@ #include #include -#include -#include "ngraph_functions/low_precision_transformations/variadic_split_function.hpp" +#include "lpt_ngraph_functions/variadic_split_function.hpp" -#include "ngraph_functions/subgraph_builders.hpp" -#include "low_precision/network_helper.hpp" -#include "ngraph_functions/low_precision_transformations/common/builders.hpp" -#include "ngraph_functions/low_precision_transformations/common/dequantization_operations.hpp" +#include "ngraph_functions/builders.hpp" +#include "lpt_ngraph_functions/common/builders.hpp" +#include "lpt_ngraph_functions/common/dequantization_operations.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/CMakeLists.txt b/inference-engine/tests/ngraph_helpers/ngraph_functions/CMakeLists.txt similarity index 71% rename from inference-engine/tests/ngraph_functions/CMakeLists.txt rename to inference-engine/tests/ngraph_helpers/ngraph_functions/CMakeLists.txt index ce9ac3e6b20302..a7514816390cc4 100644 --- a/inference-engine/tests/ngraph_functions/CMakeLists.txt +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Intel Corporation +# Copyright (C) 2019-2020 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -8,7 +8,6 @@ list(APPEND EXPORT_DEPENDENCIES ${NGRAPH_LIBRARIES} ngraph_backend interpreter_backend - inference_engine_lp_transformations ) set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") @@ -17,10 +16,14 @@ addIeTarget( NAME ${TARGET_NAME} TYPE STATIC ROOT ${PUBLIC_HEADERS_DIR} + INCLUDES + PUBLIC + ${PUBLIC_HEADERS_DIR} ADDITIONAL_SOURCE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/src LINK_LIBRARIES - funcTestUtils + PUBLIC + ${EXPORT_DEPENDENCIES} ADD_CPPLINT DEVELOPER_PACKAGE EXPORT_DEPENDENCIES @@ -31,7 +34,3 @@ ie_faster_build(${TARGET_NAME} UNITY PCH PRIVATE "src/precomp.hpp" ) - -target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR}) - -target_link_libraries(${TARGET_NAME} PUBLIC ${EXPORT_DEPENDENCIES}) diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/builders.hpp similarity index 98% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/builders.hpp index 13069b516a69b0..931da1dd21a7e2 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/builders.hpp @@ -347,6 +347,12 @@ std::shared_ptr makePooling(const ngraph::Output &in, bool excludePad, const ngraph::helpers::PoolingTypes &poolType); +std::shared_ptr makeROIPooling(const Output& input, + const Output& coords, + const Shape& output_size, + const float spatial_scale, + const ngraph::helpers::ROIPoolingTypes& roi_pool_type); + std::shared_ptr makeScatterUpdate(const ngraph::Output &in, const element::Type& indicesType, const std::vector& indicesShape, diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/pass/convert_prc.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/pass/convert_prc.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/pass/convert_prc.hpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/pass/convert_prc.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp similarity index 92% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp index 91d43e21eda99b..9aef1dfacbf110 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp @@ -4,16 +4,13 @@ #pragma once -#include -#include #include "ngraph_functions/builders.hpp" namespace ngraph { namespace builder { namespace subgraph { static std::shared_ptr makeConvPoolRelu(std::vector inputShape = {1, 1, 32, 32}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); params.front()->set_friendly_name("Param_1"); auto const1 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, 32, 1, 32}); @@ -40,8 +37,7 @@ static std::shared_ptr makeConvPoolRelu(std::vector in } static std::shared_ptr makeSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -61,8 +57,7 @@ static std::shared_ptr makeSplitConvConcat(std::vector } static std::shared_ptr makeKSOFunction(std::vector inputShape = {1, 4, 20, 20}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto shapeOf = std::make_shared(params[0]); @@ -81,7 +76,7 @@ static std::shared_ptr makeKSOFunction(std::vector inp } static std::shared_ptr makeSplitMultiConvConcat(std::vector inputShape = {1, 4, 20, 20}) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(InferenceEngine::Precision::FP32); + auto ngPrc = ngraph::element::Type_t::f32; auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -124,8 +119,7 @@ static std::shared_ptr makeSplitMultiConvConcat(std::vector makeTIwithLSTMcell(InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) { - auto ngPRC = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); +static std::shared_ptr makeTIwithLSTMcell(ngraph::element::Type_t ngPRC = ngraph::element::Type_t::f32) { // That which we iterate over const size_t N = 32; // Batch size const size_t L = 10; // Sequence length @@ -184,8 +178,7 @@ static std::shared_ptr makeTIwithLSTMcell(InferenceEngine::Pre } static std::shared_ptr makeSingleConv(std::vector inputShape = {1, 3, 24, 24}, - InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) { - ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); + ngraph::element::Type_t type = ngraph::element::Type_t::f32) { auto param0 = std::make_shared(type, ngraph::Shape(inputShape)); auto conv1 = ngraph::builder::makeConvolution(param0, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, @@ -197,7 +190,7 @@ static std::shared_ptr makeSingleConv(std::vector inpu } static std::shared_ptr makeMultiSingleConv(std::vector inputShape = {1, 3, 24, 24}) { - ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(InferenceEngine::Precision::FP32); + ngraph::element::Type type = ngraph::element::Type_t::f32; auto param0 = std::make_shared(type, ngraph::Shape(inputShape)); auto conv1 = ngraph::builder::makeConvolution(param0, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, ngraph::op::PadType::EXPLICIT, 5); @@ -226,8 +219,7 @@ static std::shared_ptr makeMultiSingleConv(std::vector } static std::shared_ptr make2InputSubtract(std::vector inputShape = {1, 3, 24, 24}, - InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) { - ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); + ngraph::element::Type_t type = ngraph::element::Type_t::f32) { auto param0 = std::make_shared(type, ngraph::Shape(inputShape)); auto param1 = std::make_shared(type, ngraph::Shape(inputShape)); auto subtract = std::make_shared(param0, param1); @@ -238,8 +230,7 @@ static std::shared_ptr make2InputSubtract(std::vector } static std::shared_ptr makeNestedSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -271,8 +262,7 @@ static std::shared_ptr makeNestedSplitConvConcat(std::vector makeSplitConvConcatInputInBranch(std::vector inputShape = {1, 4, 20, 20}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -302,8 +292,7 @@ static std::shared_ptr makeSplitConvConcatInputInBranch(std::v } static std::shared_ptr makeSplitConvConcatNestedInBranch(std::vector inputShape = {1, 4, 20, 20}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape}); int localId = 0; #define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++)); @@ -364,9 +353,8 @@ static std::shared_ptr makeSplitConvConcatNestedInBranch(std:: } static std::shared_ptr makeSplitConvConcatNestedInBranchNestedOut( - std::vector inputShape = {1, 4, 20, 20}, - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::FP32) { - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + std::vector inputShape = {1, 4, 20, 20}, + ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape}); int localId = 0; #define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++)); @@ -467,8 +455,7 @@ static std::shared_ptr makeSplitConvConcatNestedInBranchNested } static std::shared_ptr makeConvBias(std::vector inputShape = {1, 3, 24, 24}, - InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) { - ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); + ngraph::element::Type type = ngraph::element::Type_t::f32) { auto parameter = ngraph::builder::makeParams(type, {inputShape}); parameter[0]->set_friendly_name("parameter"); auto weights = ngraph::opset1::Constant::create(type, ngraph::Shape{6, 3, 1, 1}, {1}); @@ -486,8 +473,7 @@ static std::shared_ptr makeConvBias(std::vector inputS } static std::shared_ptr makeReadConcatSplitAssign(std::vector inputShape = {1, 1, 2, 4}, - InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) { - ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); + ngraph::element::Type type = ngraph::element::Type_t::f32) { auto parameter = ngraph::builder::makeParams(type, {inputShape}); parameter[0]->set_friendly_name("parameter"); auto init_const = ngraph::op::Constant::create(element::f32, Shape{1, 1, 2, 2}, {0, 0, 0, 0}); diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp similarity index 100% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp similarity index 94% rename from inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp index 0682cc26f7cccd..c4e9b51cc529df 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp @@ -79,6 +79,12 @@ enum PoolingTypes { MAX, AVG }; + +enum ROIPoolingTypes { + ROI_MAX, + ROI_BILINEAR +}; + enum ActivationTypes { None, Sigmoid, @@ -233,6 +239,7 @@ inline ngraph::NodeVector castOps2Nodes(const std::vector> interpreterFunction(const std::shared_ptr &function, const std::vector> &inputs, + element::Type_t inType = element::Type_t::undefined, const std::vector convertType = {}); // @@ -245,7 +252,8 @@ void CompareFunctions(const Function &actual, const Function &expected); std::shared_ptr foldFunction(const std::shared_ptr &function, - const std::vector> &inputs); + const std::vector> &inputs, + element::Type_t inpType = element::Type_t::undefined); std::vector> getConstData(const std::shared_ptr &function, std::vector convertType = {}); @@ -253,7 +261,7 @@ std::vector> getConstData(const std::shared_ptr getNodeSharedPtr(const ngraph::NodeTypeInfo &type_info, const ngraph::OutputVector &outputVector); -std::vector convertOutputPrecision(std::vector &output, +std::vector convertOutputPrecision(const std::vector &output, const element::Type_t &fromPrecision, const element::Type_t &toPrecision, const size_t elementsCount); diff --git a/inference-engine/tests/ngraph_functions/src/activation.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/activation.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/activation.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/activation.cpp diff --git a/inference-engine/tests/ngraph_functions/src/batch_norm.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/batch_norm.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/batch_norm.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/batch_norm.cpp diff --git a/inference-engine/tests/ngraph_functions/src/batch_to_space.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/batch_to_space.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/batch_to_space.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/batch_to_space.cpp diff --git a/inference-engine/tests/ngraph_functions/src/binary_convolution.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/binary_convolution.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/binary_convolution.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/binary_convolution.cpp diff --git a/inference-engine/tests/ngraph_functions/src/broadcast.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/broadcast.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/broadcast.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/broadcast.cpp diff --git a/inference-engine/tests/ngraph_functions/src/comparison.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/comparison.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/comparison.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/comparison.cpp diff --git a/inference-engine/tests/ngraph_functions/src/concat.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/concat.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/concat.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/concat.cpp diff --git a/inference-engine/tests/ngraph_functions/src/convolution.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/convolution.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/convolution.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/convolution.cpp diff --git a/inference-engine/tests/ngraph_functions/src/convolution_backprop_data.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/convolution_backprop_data.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/convolution_backprop_data.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/convolution_backprop_data.cpp diff --git a/inference-engine/tests/ngraph_functions/src/ctc_loss.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/ctc_loss.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/ctc_loss.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/ctc_loss.cpp diff --git a/inference-engine/tests/ngraph_functions/src/cum_sum.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/cum_sum.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/cum_sum.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/cum_sum.cpp diff --git a/inference-engine/tests/ngraph_functions/src/depth_to_space.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/depth_to_space.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/depth_to_space.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/depth_to_space.cpp diff --git a/inference-engine/tests/ngraph_functions/src/detection_output.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/detection_output.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/detection_output.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/detection_output.cpp diff --git a/inference-engine/tests/ngraph_functions/src/eltwise.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/eltwise.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/eltwise.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/eltwise.cpp diff --git a/inference-engine/tests/ngraph_functions/src/embedding_bag_offsets_sum.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_offsets_sum.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/embedding_bag_offsets_sum.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_offsets_sum.cpp diff --git a/inference-engine/tests/ngraph_functions/src/embedding_bag_packed_sum.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_packed_sum.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/embedding_bag_packed_sum.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/embedding_bag_packed_sum.cpp diff --git a/inference-engine/tests/ngraph_functions/src/embedding_segments_sum.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/embedding_segments_sum.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/embedding_segments_sum.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/embedding_segments_sum.cpp diff --git a/inference-engine/tests/ngraph_functions/src/fake_quantize.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/fake_quantize.cpp similarity index 99% rename from inference-engine/tests/ngraph_functions/src/fake_quantize.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/fake_quantize.cpp index ccfdb87eafe886..13e687de845130 100644 --- a/inference-engine/tests/ngraph_functions/src/fake_quantize.cpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/fake_quantize.cpp @@ -5,7 +5,6 @@ #include #include -#include #include "ngraph_functions/builders.hpp" namespace ngraph { diff --git a/inference-engine/tests/ngraph_functions/src/fully_connected.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/fully_connected.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/fully_connected.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/fully_connected.cpp diff --git a/inference-engine/tests/ngraph_functions/src/gather_nd.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/gather_nd.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/gather_nd.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/gather_nd.cpp diff --git a/inference-engine/tests/ngraph_functions/src/group_convolution.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/group_convolution.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/group_convolution.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/group_convolution.cpp diff --git a/inference-engine/tests/ngraph_functions/src/group_convolution_backprop_data.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/group_convolution_backprop_data.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/group_convolution_backprop_data.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/group_convolution_backprop_data.cpp diff --git a/inference-engine/tests/ngraph_functions/src/gru_cell.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/gru_cell.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/gru_cell.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/gru_cell.cpp diff --git a/inference-engine/tests/ngraph_functions/src/input_layer.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/input_layer.cpp similarity index 87% rename from inference-engine/tests/ngraph_functions/src/input_layer.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/input_layer.cpp index 913e450135504e..a09e46b6d23308 100644 --- a/inference-engine/tests/ngraph_functions/src/input_layer.cpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/input_layer.cpp @@ -17,8 +17,7 @@ std::shared_ptr makeInputLayer(const element::Type &type, ngraph:: std::shared_ptr input; switch (inputType) { case ngraph::helpers::InputLayerType::CONSTANT: { - std::vector data(ngraph::shape_size(shape)); - input = ngraph::builder::makeConstant(type, shape, data); + input = ngraph::builder::makeConstant(type, shape, {}, true); break; } case ngraph::helpers::InputLayerType::PARAMETER: diff --git a/inference-engine/tests/ngraph_functions/src/logical.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/logical.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/logical.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/logical.cpp diff --git a/inference-engine/tests/ngraph_functions/src/lstm_cell.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/lstm_cell.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/lstm_cell.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/lstm_cell.cpp diff --git a/inference-engine/tests/ngraph_functions/src/mat_mul.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/mat_mul.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/mat_mul.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/mat_mul.cpp diff --git a/inference-engine/tests/ngraph_functions/src/minimum_maximum.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/minimum_maximum.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/minimum_maximum.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/minimum_maximum.cpp diff --git a/inference-engine/tests/ngraph_functions/src/mvn.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/mvn.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/mvn.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/mvn.cpp diff --git a/inference-engine/tests/ngraph_functions/src/non_max_suppression.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/non_max_suppression.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/non_max_suppression.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/non_max_suppression.cpp diff --git a/inference-engine/tests/ngraph_functions/src/normalize_l2.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/normalize_l2.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/normalize_l2.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/normalize_l2.cpp diff --git a/inference-engine/tests/ngraph_functions/src/pad.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/pad.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/pad.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/pad.cpp diff --git a/inference-engine/tests/ngraph_functions/src/params_vector.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/params_vector.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/params_vector.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/params_vector.cpp diff --git a/inference-engine/tests/ngraph_functions/src/pooling.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/pooling.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/pooling.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/pooling.cpp diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/precomp.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/precomp.hpp new file mode 100644 index 00000000000000..eebe470d7f0b62 --- /dev/null +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/precomp.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include diff --git a/inference-engine/tests/ngraph_functions/src/proposal.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/proposal.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/proposal.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/proposal.cpp diff --git a/inference-engine/tests/ngraph_functions/src/reduce.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/reduce.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/reduce.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/reduce.cpp diff --git a/inference-engine/tests/ngraph_functions/src/rnn_cell.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/rnn_cell.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/rnn_cell.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/rnn_cell.cpp diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/roi_pooling.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/roi_pooling.cpp new file mode 100644 index 00000000000000..3ae3532c8dc068 --- /dev/null +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/roi_pooling.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +// + +#include +#include + +#include "ngraph_functions/builders.hpp" + +namespace ngraph { +namespace builder { + +std::shared_ptr makeROIPooling(const Output& input, + const Output& coords, + const Shape& output_size, + const float spatial_scale, + const ngraph::helpers::ROIPoolingTypes& roi_pool_type) { + switch (roi_pool_type) { + case helpers::ROIPoolingTypes::ROI_MAX: + return std::make_shared(input, coords, output_size, spatial_scale, "max"); + case helpers::ROIPoolingTypes::ROI_BILINEAR: + return std::make_shared(input, coords, output_size, spatial_scale, "bilinear"); + default: + throw std::runtime_error("Incorrect type of ROIPooling operation"); + } +} + +} // namespace builder +} // namespace ngraph diff --git a/inference-engine/tests/ngraph_functions/src/scatter_ND_update.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/scatter_ND_update.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/scatter_ND_update.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/scatter_ND_update.cpp diff --git a/inference-engine/tests/ngraph_functions/src/scatter_elements_update.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/scatter_elements_update.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/scatter_elements_update.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/scatter_elements_update.cpp diff --git a/inference-engine/tests/ngraph_functions/src/scatter_update.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/scatter_update.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/scatter_update.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/scatter_update.cpp diff --git a/inference-engine/tests/ngraph_functions/src/select.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/select.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/select.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/select.cpp diff --git a/inference-engine/tests/ngraph_functions/src/shuffle_channels.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/shuffle_channels.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/shuffle_channels.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/shuffle_channels.cpp diff --git a/inference-engine/tests/ngraph_functions/src/space_to_batch.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/space_to_batch.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/space_to_batch.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/space_to_batch.cpp diff --git a/inference-engine/tests/ngraph_functions/src/space_to_depth.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/space_to_depth.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/space_to_depth.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/space_to_depth.cpp diff --git a/inference-engine/tests/ngraph_functions/src/split.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/split.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/split.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/split.cpp diff --git a/inference-engine/tests/ngraph_functions/src/squeeze_unsqueeze.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/squeeze_unsqueeze.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/squeeze_unsqueeze.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/squeeze_unsqueeze.cpp diff --git a/inference-engine/tests/ngraph_functions/src/strided_slice.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/strided_slice.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/strided_slice.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/strided_slice.cpp diff --git a/inference-engine/tests/ngraph_functions/src/tile.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/tile.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/tile.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/tile.cpp diff --git a/inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp similarity index 88% rename from inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp index 4cc9029f0adf49..8fade97cd7bb77 100644 --- a/inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp @@ -79,7 +79,9 @@ OutputVector convert2OutputVector(const std::vector> &node return outs; } -std::vector> interpreterFunction(const std::shared_ptr &function, const std::vector> &inputs, +std::vector> interpreterFunction(const std::shared_ptr &function, + const std::vector> &inputs, + element::Type_t inType, const std::vector convertType) { runtime::Backend::set_backend_shared_library_search_directory(""); auto backend = runtime::Backend::create("INTERPRETER"); @@ -98,7 +100,12 @@ std::vector> interpreterFunction(const std::shared_ptr const auto ¶meterType = parameter->get_element_type(); const auto ¶meterSize = shape_size(parameterShape) * parameterType.size(); - const auto &input = inputs[parameterIndex]; + auto input = inputs[parameterIndex]; + + if (inType != element::undefined && inType != parameterType) { + input = convertOutputPrecision(input, inType, parameter->get_element_type(), shape_size(parameter->get_shape())); + } + const auto &inputSize = input.size(); NGRAPH_CHECK(parameterSize == inputSize, "Got parameter (", parameter->get_friendly_name(), ") of size ", parameterSize, @@ -137,22 +144,31 @@ std::vector> interpreterFunction(const std::shared_ptr } std::shared_ptr foldFunction(const std::shared_ptr &function, - const std::vector> &inputs) { + const std::vector> &inputs, element::Type_t inpType) { std::vector paramElementTypes; std::vector paramShapes; + std::vector> vecTmpConvertedInputs; + vecTmpConvertedInputs.reserve(inputs.size()); + + std::vector inBuffers; + inBuffers.reserve(inputs.size()); + for (const auto ¶m : function->get_parameters()) { paramElementTypes.emplace_back(param->get_element_type()); paramShapes.emplace_back(param->get_shape()); + auto parameterIndex = function->get_parameter_index(param); + auto& input = inputs[parameterIndex]; + + if (inpType != element::undefined && inpType != paramElementTypes.back()) { + vecTmpConvertedInputs.emplace_back(convertOutputPrecision(input, inpType, param->get_element_type(), shape_size(param->get_shape()))); + inBuffers.push_back(vecTmpConvertedInputs.back().data()); + } else { + // const_cast added to satisfy specialize_function interface + // which requires inputs as std::vector + inBuffers.push_back(const_cast(input.data())); + } } - auto inBuffers = std::vector(inputs.size()); - std::transform(inputs.cbegin(), inputs.cend(), inBuffers.begin(), - [](const std::vector &input) { - // const_cast added to satisfy specialize_function interface - // which requires inputs as std::vector - return const_cast(input.data()); - }); - const auto &foldedFunc = specialize_function(function, paramElementTypes, paramShapes, inBuffers); ngraph::pass::ConstantFolding().run_on_function(foldedFunc); for (const auto &op : foldedFunc->get_ops()) { @@ -250,7 +266,7 @@ std::shared_ptr getNodeSharedPtr(const ngraph::NodeTypeInfo &type_ } template -std::vector convertPrecision(std::vector &buffer, const size_t elementsCount, const size_t elementSize) { +std::vector convertPrecision(const std::vector &buffer, const size_t elementsCount, const size_t elementSize) { std::vector convertedData(elementsCount * elementSize); const fromPrec *src = reinterpret_cast(buffer.data()); toPrec *dst = reinterpret_cast(convertedData.data()); @@ -270,8 +286,10 @@ bool is_tensor_iterator_exist(const std::shared_ptr & func) { return false; } -std::vector convertOutputPrecision(std::vector &output, const element::Type_t &fromPrecision, const element::Type_t &toPrecision, - const size_t elementsCount) { +std::vector convertOutputPrecision(const std::vector &output, + const element::Type_t &fromPrecision, + const element::Type_t &toPrecision, + const size_t elementsCount) { switch (fromPrecision) { case element::Type_t::u8: { switch (toPrecision) { @@ -520,6 +538,12 @@ std::vector convertOutputPrecision(std::vector &outp case element::Type_t::u64: { return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); } + case element::Type_t::bf16: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::boolean: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } default: throw std::runtime_error("convertOutputPrecision can't convert from: " + element::Type(fromPrecision).get_type_name() + " to: " + element::Type(toPrecision).get_type_name()); @@ -548,6 +572,9 @@ std::vector convertOutputPrecision(std::vector &outp case element::Type_t::f32: { return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); } + case element::Type_t::bf16: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } case element::Type_t::u64: { return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); } @@ -556,6 +583,43 @@ std::vector convertOutputPrecision(std::vector &outp element::Type(toPrecision).get_type_name()); } } + case element::Type_t::bf16: { + switch (toPrecision) { + case element::Type_t::u8: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::u16: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::i8: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::i16: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::i32: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::i64: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::f32: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::u64: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::bf16: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + case element::Type_t::boolean: { + return convertPrecision(output, elementsCount, element::Type(toPrecision).size()); + } + default: + throw std::runtime_error("convertOutputPrecision can't convert from: " + element::Type(fromPrecision).get_type_name() + " to: " + + element::Type(toPrecision).get_type_name()); + } + } default: throw std::runtime_error("convertOutputPrecision can't convert from: " + element::Type(fromPrecision).get_type_name() + " precision"); } diff --git a/inference-engine/tests/ngraph_functions/src/variadic_split.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/variadic_split.cpp similarity index 100% rename from inference-engine/tests/ngraph_functions/src/variadic_split.cpp rename to inference-engine/tests/ngraph_helpers/ngraph_functions/src/variadic_split.cpp diff --git a/inference-engine/tests/unit/gna/gna_get_2d_reshaped_data.cpp b/inference-engine/tests/unit/gna/gna_get_2d_reshaped_data.cpp new file mode 100644 index 00000000000000..df937edd2226f4 --- /dev/null +++ b/inference-engine/tests/unit/gna/gna_get_2d_reshaped_data.cpp @@ -0,0 +1,87 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include +// to suppress deprecated definition errors +#define IMPLEMENT_INFERENCE_ENGINE_PLUGIN +#include "gna_groups.hpp" + +namespace { +std::vector, std::vector>> input_shapes_2d { + {{1, 128}, {8, 16}}, + {{1, 64}, {8, 8}}, + {{1, 56}, {7, 8}}, + {{1, 48}, {6, 8}}, + {{1, 40}, {5, 8}}, + {{1, 32}, {4, 8}}, + {{1, 24}, {3, 8}}, + {{1, 16}, {2, 8}}, + {{1, 8}, {1, 8}}, + {{1, 19}, {1, 19}}, + {{128, 1}, {8, 16}}, + {{64, 1}, {8, 8}}, + {{56, 1}, {7, 8}}, + {{48, 1}, {6, 8}}, + {{40, 1}, {5, 8}}, + {{32, 1}, {4, 8}}, + {{24, 1}, {3, 8}}, + {{16, 1}, {2, 8}}, + {{8, 1}, {1, 8}}, + {{19, 1}, {1, 19}} +}; + +std::vector, std::vector>> input_shapes_4d { + {{1, 2, 2, 32}, {8, 16, 1, 1}}, + {{1, 2, 4, 8}, {8, 8, 1, 1}}, + {{1, 2, 2, 14}, {7, 8, 1, 1}}, + {{1, 2, 4, 6}, {6, 8, 1, 1}}, + {{1, 2, 2, 10}, {5, 8, 1, 1}}, + {{1, 2, 2, 8}, {4, 8, 1, 1}}, + {{1, 2, 2, 6}, {3, 8, 1, 1}}, + {{1, 2, 2, 4}, {2, 8, 1, 1}}, + {{1, 2, 2, 2}, {1, 8, 1, 1}}, + {{1, 1, 1, 19}, {1, 19, 1, 1}}, + {{32, 2, 2, 1}, {8, 16, 1, 1}}, + {{8, 4, 2, 1}, {8, 8, 1, 1}}, + {{14, 2, 2, 1}, {7, 8, 1, 1}}, + {{6, 4, 2, 1}, {6, 8, 1, 1}}, + {{10, 2, 2, 1}, {5, 8, 1, 1}}, + {{8, 2, 2, 1}, {4, 8, 1, 1}}, + {{6, 2, 2, 1}, {3, 8, 1, 1}}, + {{4, 2, 2, 1}, {2, 8, 1, 1}}, + {{2, 2, 2, 1}, {1, 8, 1, 1}}, + {{19, 1, 1, 1}, {1, 19, 1, 1}} +}; + +class Get2DReshapedDataTest : public ::testing::Test { + protected: + const char* input_name = "input"; + const InferenceEngine::Precision precision = InferenceEngine::Precision::FP32; + const size_t max_batch_size = 8; + void Reshape2dAndCheck(const std::pair, std::vector>& input_shape, + InferenceEngine::Layout layout) const { + auto data = std::make_shared(input_name, + InferenceEngine::TensorDesc(precision, input_shape.first, layout)); + auto new_data = GNAPluginNS::Get2DReshapedData(data, max_batch_size); + ASSERT_EQ(new_data->getDims(), input_shape.second); + ASSERT_EQ(new_data->getPrecision(), precision); + ASSERT_EQ(new_data->getLayout(), layout); + } +}; + +TEST_F(Get2DReshapedDataTest, testReshape2D) { + auto layout = InferenceEngine::NC; + for (const auto &input_shape : input_shapes_2d) { + Reshape2dAndCheck(input_shape, layout); + } +} + +TEST_F(Get2DReshapedDataTest, testReshape4D) { + auto layout = InferenceEngine::NCHW; + for (const auto &input_shape : input_shapes_4d) { + Reshape2dAndCheck(input_shape, layout); + } +} +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp index ec2cd9c0133b9e..39377b5976a9dc 100644 --- a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp +++ b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp @@ -9,7 +9,7 @@ #include #include -#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp" +#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iasync_infer_request_internal.hpp" diff --git a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_plugin_test.cpp b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_plugin_test.cpp index cdf1e11efb0aaa..4aa4fbf8fc3955 100644 --- a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_plugin_test.cpp +++ b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_plugin_test.cpp @@ -25,7 +25,7 @@ class InferenceEnginePluginInternalTest : public ::testing::Test { shared_ptr mockExeNetworkInternal; shared_ptr mockExeNetworkTS; shared_ptr mockInferRequestInternal; - MockNotEmptyICNNNetwork mockNotEmptyNet; + std::shared_ptr mockNotEmptyNet = std::make_shared(); std::string pluginId; ResponseDesc dsc; @@ -50,14 +50,14 @@ class InferenceEnginePluginInternalTest : public ::testing::Test { void getInferRequestWithMockImplInside(IInferRequest::Ptr &request) { ExecutableNetwork exeNetwork; InputsDataMap inputsInfo; - mockNotEmptyNet.getInputsInfo(inputsInfo); + mockNotEmptyNet->getInputsInfo(inputsInfo); OutputsDataMap outputsInfo; - mockNotEmptyNet.getOutputsInfo(outputsInfo); + mockNotEmptyNet->getOutputsInfo(outputsInfo); mockInferRequestInternal = make_shared(inputsInfo, outputsInfo); mockExeNetworkTS = make_shared(); EXPECT_CALL(*mock_plugin_impl.get(), LoadExeNetworkImpl(_, _)).WillOnce(Return(mockExeNetworkTS)); EXPECT_CALL(*mockExeNetworkTS.get(), CreateInferRequestImpl(_, _)).WillOnce(Return(mockInferRequestInternal)); - ASSERT_NO_THROW(exeNetwork = plugin->LoadNetwork(mockNotEmptyNet, {})); + ASSERT_NO_THROW(exeNetwork = plugin->LoadNetwork(InferenceEngine::CNNNetwork(mockNotEmptyNet), {})); ASSERT_NO_THROW(request = exeNetwork.CreateInferRequest()); } }; diff --git a/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp b/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp index 810341ff9b54a9..a6a8ec507d6332 100644 --- a/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp +++ b/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp @@ -11,7 +11,7 @@ #include "unit_test_utils/mocks/mock_iexecutable_network.hpp" #include "unit_test_utils/mocks/mock_iinfer_request.hpp" -#include "unit_test_utils/mocks/mock_ie_imemory_state.hpp" +#include "unit_test_utils/mocks/mock_ie_ivariable_state.hpp" #include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp" #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp" diff --git a/inference-engine/tests/unit/vpu/CMakeLists.txt b/inference-engine/tests/unit/vpu/CMakeLists.txt index 215562f2af178a..5be30a30926832 100644 --- a/inference-engine/tests/unit/vpu/CMakeLists.txt +++ b/inference-engine/tests/unit/vpu/CMakeLists.txt @@ -13,7 +13,7 @@ addIeTargetTest( ROOT ${CMAKE_CURRENT_SOURCE_DIR} ADDITIONAL_SOURCE_DIRS # because ngraphFunctions sources need to be compiled with LTO as well - "${IE_TESTS_ROOT}/ngraph_functions/src" + "${IE_TESTS_ROOT}/ngraph_helpers/ngraph_functions/src" INCLUDES "${IE_MAIN_SOURCE_DIR}/src/vpu/myriad_plugin" "${IE_MAIN_SOURCE_DIR}/thirdparty/movidius" diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp deleted file mode 100644 index a845eaf77e5c50..00000000000000 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace single_layer_tests; - -struct crop_base_params { - std::vector in_dims; - std::vector out_dims; - std::vector offsets; -}; - -#ifdef IN -#undef IN -#endif - -struct crop_test_params : crop_base_params { - std::string device_name; - - crop_test_params(std::string name, crop_base_params params) : - crop_base_params(params), device_name(name) {} -}; - -template -void ref_crop(InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, crop_test_params prm) { - data_t *dst_ptr = dst.data(); - - int ndims = prm.in_dims.size(); - - size_t OFFSET_N = prm.offsets.at(0); - size_t OFFSET_C = prm.offsets.at(1); - size_t OFFSET_D = ndims == 5 ? prm.offsets.at(ndims - 3) : 0; - size_t OFFSET_H = prm.offsets.at(ndims - 2); - size_t OFFSET_W = prm.offsets.at(ndims - 1); - - size_t ON = prm.out_dims[0]; - size_t OC = prm.out_dims[1]; - size_t OD = ndims == 5 ? prm.out_dims[ndims - 3] : 1; - size_t OH = prm.out_dims[ndims - 2]; - size_t OW = prm.out_dims[ndims - 1]; - - size_t IN = prm.in_dims[0]; - size_t IC = prm.in_dims[1]; - size_t ID = ndims == 5 ? prm.in_dims[ndims - 3] : 1; - size_t IH = prm.in_dims[ndims - 2]; - size_t IW = prm.in_dims[ndims - 1]; - - auto dst_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t { - return (n * OC * OD * OH * OW + c * OD * OH * OW + d * OH * OW + h * OW + w); - }; - auto src_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t { - return (n * IC * ID * IH * IW + c * ID * IH * IW + d * IH * IW + h * IW + w); - }; - - ASSERT_GE(IN - OFFSET_N, ON); - ASSERT_GE(IC - OFFSET_C, OC); - ASSERT_GE(ID - OFFSET_D, OD); - ASSERT_GE(IH - OFFSET_H, OH); - ASSERT_GE(IW - OFFSET_W, OW); - - data_t* src_ptr = src.data(); - for (size_t n = 0; n < ON; ++n) { - for (size_t c = 0; c < OC; ++c) { - for (size_t d = 0; d < OD; ++d) { - for (size_t h = 0; h < OH; ++h) { - for (size_t w = 0; w < OW; ++w) { - dst_ptr[dst_off(n, c, d, h, w)] = src_ptr[src_off(n + OFFSET_N, c + OFFSET_C, d + OFFSET_D, - h + OFFSET_H, w + OFFSET_W)]; - } - } - } - } - } -} - -class smoke_CropOnlyTest: public TestsCommon, - public WithParamInterface { - std::string layers_t = R"V0G0N( - - - - - - - - - - - _ID0_ - _ID1_ - _ID2_ - _ID3_ - _ID4_ - - - - - _OD0_ - _OD1_ - _OD2_ - _OD3_ - _OD4_ - - - -)V0G0N"; - - std::string edges_t = R"V0G0N( - -)V0G0N"; - - std::string getModel(crop_test_params p) { - std::string model = layers_t; - - auto dims_size = p.in_dims.size(); - - if (dims_size == 4) { - REMOVE_LINE(model, ""); - REMOVE_LINE(model, "_ID4_"); - REMOVE_LINE(model, "_OD4_"); - } - - REPLACE_WITH_NUM(model, "_ID0_", p.in_dims[0]); - REPLACE_WITH_NUM(model, "_ID1_", p.in_dims[1]); - REPLACE_WITH_NUM(model, "_ID2_", p.in_dims[2]); - REPLACE_WITH_NUM(model, "_ID3_", p.in_dims[3]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_ID4_", p.in_dims[4]); - - REPLACE_WITH_NUM(model, "_OD0_", p.out_dims[0]); - REPLACE_WITH_NUM(model, "_OD1_", p.out_dims[1]); - REPLACE_WITH_NUM(model, "_OD2_", p.out_dims[2]); - REPLACE_WITH_NUM(model, "_OD3_", p.out_dims[3]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_OD4_", p.out_dims[4]); - - REPLACE_WITH_NUM(model, "_OF0_", p.offsets[0]); - REPLACE_WITH_NUM(model, "_OF1_", p.offsets[1]); - REPLACE_WITH_NUM(model, "_OF2_", p.offsets[2]); - REPLACE_WITH_NUM(model, "_OF3_", p.offsets[3]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_OF4_", p.offsets[4]); - - model = IRTemplateGenerator::getIRTemplate("Crop_Only", p.in_dims, "FP32", model, edges_t); - - return model; - } - -protected: - virtual void SetUp() { - try { - crop_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in_dims.size()) { - case 4: layout = InferenceEngine::NCHW; break; - case 5: layout = InferenceEngine::NCDHW; break; - } - - InputsDataMap inputs = network.getInputsInfo(); - DataPtr inPtr1 = inputs["in1"]->getInputData(); - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob(inPtr1->getTensorDesc()); - src->allocate(); - fill_data(src->buffer(), src->size()); - - TBlob* srcPtr = dynamic_cast*>(src.get()); - BlobMap srcs; - srcs.insert(std::pair("in1", src)); - - OutputsDataMap out = network.getOutputsInfo(); - BlobMap dstBlobs; - std::pair item = *out.begin(); - TBlob::Ptr dst; - dst = make_shared_blob(item.second->getTensorDesc()); - dst->allocate(); - dstBlobs[item.first] = dst; - - TBlob::Ptr dst_ref; - dst_ref = make_shared_blob(item.second->getTensorDesc()); - dst_ref->allocate(); - - ref_crop(*srcPtr, *dst_ref, p); - - ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - inferRequest.SetInput(srcs); - inferRequest.SetOutput(dstBlobs); - inferRequest.Infer(); - - compare(*dstBlobs.begin()->second, *dst_ref); - - } catch (const details::InferenceEngineException &e) { - FAIL() << e.what(); - } - } -}; - -#define case_1 crop_base_params({{1, 5, 32, 32}, {1, 2, 23, 23}, {0, 2, 5, 4}}) -#define case_2 crop_base_params({{1, 5, 32, 32}, {1, 5, 5, 5}, {0, 0, 20, 20}}) -#define case_3 crop_base_params({{1, 5, 32, 32}, {1, 5, 32, 10}, {0, 0, 0, 20}}) -#define case_4 crop_base_params({{1, 5, 32, 20}, {1, 5, 30, 10}, {0, 0, 2, 10}}) -#define case_5 crop_base_params({{1, 5, 32, 20, 14}, {1, 5, 30, 10, 8}, {0, 0, 2, 10, 6}}) -#define case_6 crop_base_params({{5, 9, 32, 20, 14}, {2, 5, 30, 10, 8}, {3, 4, 2, 10, 6}}) - -TEST_P(smoke_CropOnlyTest, TestsCrop) {} - -std::string getTestCaseName(testing::TestParamInfo obj) { - int ndims = obj.param.in_dims.size(); - - return obj.param.device_name + - "_in" + std::to_string(obj.param.in_dims[0]) + - "_ic" + std::to_string(obj.param.in_dims[1]) + - "_id" + std::to_string(ndims == 5 ? obj.param.in_dims[ndims - 3] : 1) + - "_ih" + std::to_string(obj.param.in_dims[ndims - 2]) + - "_iw" + std::to_string(obj.param.in_dims[ndims - 1]) + - "_on" + std::to_string(obj.param.out_dims[0]) + - "_oc" + std::to_string(obj.param.out_dims[1]) + - "_od" + std::to_string(ndims == 5 ? obj.param.out_dims[ndims - 3] : 1) + - "_oh" + std::to_string(obj.param.out_dims[ndims - 2]) + - "_ow" + std::to_string(obj.param.out_dims[ndims - 1]); -} - -crop_test_params crop_only_test_cases[] = { - crop_test_params("CPU", case_1), - crop_test_params("CPU", case_2), - crop_test_params("CPU", case_3), - crop_test_params("CPU", case_4), - crop_test_params("CPU", case_5), - crop_test_params("CPU", case_6), -}; - -INSTANTIATE_TEST_CASE_P( - TestsPooling, smoke_CropOnlyTest, ::testing::ValuesIn(crop_only_test_cases), getTestCaseName); diff --git a/inference-engine/thirdparty/CMakeLists.txt b/inference-engine/thirdparty/CMakeLists.txt index 9185fcd4787547..cd35228121fd1d 100644 --- a/inference-engine/thirdparty/CMakeLists.txt +++ b/inference-engine/thirdparty/CMakeLists.txt @@ -50,7 +50,7 @@ function(ie_build_pugixml) set(BUILD_TESTS_current ${BUILD_TESTS}) set(BUILD_TESTS OFF CACHE BOOL "Build tests" FORCE) set(BUILD_SHARED_LIBS OFF) - add_subdirectory(pugixml) + add_subdirectory(pugixml EXCLUDE_FROM_ALL) set(BUILD_TESTS ${BUILD_TESTS_current} CACHE BOOL "Build tests" FORCE) endfunction() @@ -63,7 +63,7 @@ else() endif() add_subdirectory(stb_lib) -add_subdirectory(ade) +add_subdirectory(ade EXCLUDE_FROM_ALL) add_subdirectory(fluid/modules/gapi) set_target_properties(ade fluid stb_image PROPERTIES FOLDER thirdparty) diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp index 16f83ac34da4e2..746ac5c33f53b5 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_b_fs_zyx_fsv16.cpp @@ -30,6 +30,8 @@ ParamsKey DeconvolutionKernel_b_fs_zyx_fsv16::GetSupportedKey() const { k.EnableInputWeightsType(WeightsType::F32); k.EnableInputDataType(Datatype::F16); k.EnableOutputDataType(Datatype::F16); + k.EnableOutputDataType(Datatype::INT8); + k.EnableOutputDataType(Datatype::UINT8); k.EnableInputWeightsType(WeightsType::F16); k.EnableInputLayout(DataLayout::b_fs_yx_fsv16); k.EnableOutputLayout(DataLayout::b_fs_yx_fsv16); @@ -44,6 +46,7 @@ ParamsKey DeconvolutionKernel_b_fs_zyx_fsv16::GetSupportedKey() const { k.EnableBatching(); k.EnableSubGroup(); k.EnableSubGroupShort(); + k.EnableDifferentTypes(); return k; } @@ -155,10 +158,11 @@ JitConstants DeconvolutionKernel_b_fs_zyx_fsv16::GetJitConstants(const deconvolu } jit.AddConstant(MakeJitConstant("OC_BLOCK", 16)); - if (output.GetDType() == Datatype::F32) + if (input.GetDType() == Datatype::F32) { jit.AddConstant(MakeJitConstant("DT_F32", 1)); - else + } else { jit.AddConstant(MakeJitConstant("DT_F16", 1)); + } auto mb_block = 1; auto ic_block = 16; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp index ac89b0b5167460..d44e11f311848c 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/deconvolution/deconvolution_kernel_bfyx_opt.cpp @@ -26,6 +26,8 @@ ParamsKey DeconvolutionKernel_bfyx_opt::GetSupportedKey() const { k.EnableInputWeightsType(WeightsType::F32); k.EnableOutputDataType(Datatype::F16); k.EnableOutputDataType(Datatype::F32); + k.EnableOutputDataType(Datatype::INT8); + k.EnableOutputDataType(Datatype::UINT8); k.EnableInputLayout(DataLayout::bfyx); k.EnableOutputLayout(DataLayout::bfyx); k.EnableTensorOffset(); @@ -36,6 +38,7 @@ ParamsKey DeconvolutionKernel_bfyx_opt::GetSupportedKey() const { k.EnableSplitSupport(); k.EnableDepthwiseSeparableOpt(); k.EnableGroupedConvolution(); + k.EnableDifferentTypes(); return k; } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/gen9_common_conv_bwd_data.cl b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/gen9_common_conv_bwd_data.cl index 12935b052f8d52..f1535269e31470 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/gen9_common_conv_bwd_data.cl +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/gen9_common_conv_bwd_data.cl @@ -1,5 +1,5 @@ /******************************************************************************* -* Copyright 2019 Intel Corporation +* Copyright 2019-2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,21 @@ * limitations under the License. *******************************************************************************/ -#include "ocl_types.h" #include "include/fetch.cl" #include "include/data_types.cl" +#define INPUT_TYPE8 MAKE_VECTOR_TYPE(INPUT0_TYPE, 8) +#define OUTPUT_TYPE8 MAKE_VECTOR_TYPE(OUTPUT_TYPE, 8) +#define FILTER_TYPE8 MAKE_VECTOR_TYPE(FILTER_TYPE, 8) + +#if DT_F16 == 1 +#define FMA_ARG_TYPE half +#define FMA_ARG_TYPE8 half8 +#else +#define FMA_ARG_TYPE INPUT0_TYPE +#define FMA_ARG_TYPE8 INPUT_TYPE8 +#endif + #if ID > 1 #define CASE_3D 1 #else @@ -31,11 +42,11 @@ __attribute__((reqd_work_group_size(LWS_0, LWS_1, LWS_2))) // attr:no-format __attribute__((intel_reqd_sub_group_size(SUB_GROUP_SIZE))) // attr:no-format #endif KERNEL(gen9_common_conv_bwd_data_kernel)( - const __global DATA_T *diff_dst, - __global DATA_T * restrict diff_src, - const __global DATA_T *wei, + const __global INPUT0_TYPE *diff_dst, + __global OUTPUT_TYPE * restrict diff_src, + const __global FILTER_TYPE *wei, #if WITH_BIAS - const __global DATA_T *bias, + const __global BIAS_TYPE *bias, #endif #if HAS_FUSED_OPS_DECLS FUSED_OPS_DECLS, @@ -76,11 +87,11 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( diff_dst += input_offset + mb * OC_FULL * G * OD_FULL * OH_FULL * OW_FULL + g * OC * OD_FULL * OH_FULL * OW_FULL * MB_BLOCK; #if WITH_BIAS - DATA8_T blockC00 = (DATA8_T)bias[g * IC + gic * IC_BLOCK + local_id]; - DATA8_T blockC01 = (DATA8_T)bias[g * IC + gic * IC_BLOCK + local_id]; + INPUT_TYPE8 blockC00 = (INPUT_TYPE8)bias[g * IC + gic * IC_BLOCK + local_id]; + INPUT_TYPE8 blockC01 = (INPUT_TYPE8)bias[g * IC + gic * IC_BLOCK + local_id]; #else - DATA8_T blockC00 = 0.0f; - DATA8_T blockC01 = 0.0f; + INPUT_TYPE8 blockC00 = INPUT0_VAL_ZERO; + INPUT_TYPE8 blockC01 = INPUT0_VAL_ZERO; #endif wei += gic * KD * KH * KW * OC_BLOCK * IC_BLOCK @@ -111,13 +122,13 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( #endif if (oh >= OH || ow >= OW) continue; - const __global DATA_T *diff_dst1 = diff_dst + const __global INPUT0_TYPE *diff_dst1 = diff_dst + ow * OC_BLOCK * MB_BLOCK + oh * OW_FULL * OC_BLOCK * MB_BLOCK; #if CASE_3D diff_dst1 += od * OH_FULL * OW_FULL * OC_BLOCK * MB_BLOCK; #endif - const __global DATA_T *wei1 = wei + const __global FILTER_TYPE *wei1 = wei #if CASE_3D + kd * KH * KW * OC_BLOCK * IC_BLOCK #endif @@ -148,44 +159,30 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( #if SW != 1 || SH != 1 || SD != 1 || PH != 0 || PW != 0 || PD != 0 if (do_ker) { #endif - const __global DATA_T *diff_dst1 = diff_dst + const __global INPUT0_TYPE *diff_dst1 = diff_dst + ow * OC_BLOCK * MB_BLOCK + oh * OW_FULL * OC_BLOCK * MB_BLOCK; #if CASE_3D diff_dst1 += od * OH_FULL * OW_FULL * OC_BLOCK * MB_BLOCK; #endif - const __global DATA_T *wei1 = wei; + const __global FILTER_TYPE *wei1 = wei; #endif -#define LOAD_DIFF_DST(_block, _diff_dst, mb_chunk) \ - { \ - (_block) = AS_DATA8_T( \ - BLOCK_READ8((const __global BLOCK_DATA_T *)((_diff_dst) \ - + (mb_chunk)*OC_BLOCK))); \ - } - -#define SAVE_SRC_DIFF(_block, _diff_src, mb_chunk) \ - { \ - BLOCK_WRITE8((const __global BLOCK_DATA_T *)(&( \ - _diff_src)[(mb_chunk)*IC_BLOCK]), \ - AS_BLOCK_DATA8_T((_block))); \ - } - #if DT_F32 #define TRANSPOSE_8(_block, _col) \ - (DATA8_T)(intel_sub_group_shuffle(_block, _col)) + (intel_sub_group_shuffle(_block, _col)) #else #define TRANSPOSE_8(_block, _col) \ - (DATA8_T)(intel_sub_group_shuffle(_block[0], _col), \ - intel_sub_group_shuffle(_block[1], _col), \ - intel_sub_group_shuffle(_block[2], _col), \ - intel_sub_group_shuffle(_block[3], _col), \ - intel_sub_group_shuffle(_block[4], _col), \ - intel_sub_group_shuffle(_block[5], _col), \ - intel_sub_group_shuffle(_block[6], _col), \ - intel_sub_group_shuffle(_block[7], _col)) + (intel_sub_group_shuffle(_block[0], _col), \ + intel_sub_group_shuffle(_block[1], _col), \ + intel_sub_group_shuffle(_block[2], _col), \ + intel_sub_group_shuffle(_block[3], _col), \ + intel_sub_group_shuffle(_block[4], _col), \ + intel_sub_group_shuffle(_block[5], _col), \ + intel_sub_group_shuffle(_block[6], _col), \ + intel_sub_group_shuffle(_block[7], _col)) #endif -#define FMA8(a, b, c) fma((DATA8_T)(a), (DATA8_T)b, (DATA8_T)c) +#define FMA8(a, b, c) fma((FMA_ARG_TYPE8)(a), (FMA_ARG_TYPE8)b, (FMA_ARG_TYPE8)c) #define MULTIPLY_BLOCKS_8x8(_result, _blockA, _blockB, _blockB1) \ { \ @@ -207,14 +204,10 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( _result = FMA8(_blockB1.s7, TRANSPOSE_8(_blockA, 15), _result); \ } - DATA8_T blockA0, blockA1; - LOAD_DIFF_DST(blockA0, diff_dst1, 0); - LOAD_DIFF_DST(blockA1, diff_dst1, 8); - DATA8_T blockB00 = AS_DATA8_T( - BLOCK_READ8((const __global BLOCK_DATA_T *)wei1)); - DATA8_T blockB01 = AS_DATA8_T( - BLOCK_READ8((const __global BLOCK_DATA_T *)(wei1 - + 8 * IC_BLOCK))); + INPUT_TYPE8 blockA0 = DT_INPUT_BLOCK_READ(diff_dst1, 0); + INPUT_TYPE8 blockA1 = DT_INPUT_BLOCK_READ(diff_dst1, 8 * OC_BLOCK); + FILTER_TYPE8 blockB00 = DT_FILTER_BLOCK_READ8(wei1, 0); + FILTER_TYPE8 blockB01 = DT_FILTER_BLOCK_READ8(wei1, 8 * IC_BLOCK); MULTIPLY_BLOCKS_8x8(blockC00, blockA0, blockB00, blockB01); MULTIPLY_BLOCKS_8x8(blockC01, blockA1, blockB00, blockB01); @@ -232,7 +225,7 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( ocb += OC_BLOCK; } while (ocb < OC); - __global DATA_T *src_write0 = diff_src + OUTPUT_OFFSET + mb * IC_FULL * G * ID_FULL * IH_FULL * IW_FULL + __global OUTPUT_TYPE *src_write0 = diff_src + OUTPUT_OFFSET + mb * IC_FULL * G * ID_FULL * IH_FULL * IW_FULL + gic * ID_FULL * IH_FULL * IW_FULL * IC_BLOCK * MB_BLOCK + g * IC * ID_FULL * IH_FULL * IW_FULL * MB_BLOCK + id * IH_FULL * IW_FULL * IC_BLOCK * MB_BLOCK + ih * IW_FULL * IC_BLOCK * MB_BLOCK @@ -240,20 +233,24 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( blockC00 = ACTIVATION(blockC00, ACTIVATION_PARAMS); blockC01 = ACTIVATION(blockC01, ACTIVATION_PARAMS); + OUTPUT_TYPE8 res0, res1; #if HAS_FUSED_OPS { FUSED_OPS_BLOCK_C00; - blockC00 = FUSED_OPS_RESULT_BLOCK_C00; + res0 = FUSED_OPS_RESULT_BLOCK_C00; } { FUSED_OPS_BLOCK_C01; - blockC01 = FUSED_OPS_RESULT_BLOCK_C01; + res1 = FUSED_OPS_RESULT_BLOCK_C01; } +#else + res0 = blockC00; + res1 = blockC01; #endif - SAVE_SRC_DIFF(blockC00, src_write0, 0); - SAVE_SRC_DIFF(blockC01, src_write0, 8); + DT_OUTPUT_BLOCK_WRITE8(src_write0, 0, res0); + DT_OUTPUT_BLOCK_WRITE8(src_write0, 8 * IC_BLOCK, res1); #endif #if VER_8OW16C == 1 @@ -278,7 +275,7 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( const int iw = (ihw % IWB) * IW_BLOCK; diff_dst += input_offset + mb * OC_FULL * G * OD_FULL * OH_FULL * OW_FULL + g * OC * OD_FULL * OH_FULL * OW_FULL * MB_BLOCK; - DATA_T blockC00[IW_BLOCK] = {0.0f}; + INPUT0_TYPE blockC00[IW_BLOCK] = {INPUT0_VAL_ZERO}; #if WITH_BIAS for (int i = 0; i < IW_BLOCK; i++) @@ -307,12 +304,12 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( oh /= SH; if (oh >= OH) continue; - const __global DATA_T *diff_dst1 + const __global INPUT0_TYPE *diff_dst1 = diff_dst + oh * OW_FULL * OC_BLOCK * MB_BLOCK; #if CASE_3D diff_dst1 += od * OH_FULL * OW_FULL * OC_BLOCK * MB_BLOCK; #endif - const __global DATA_T *wei1 = wei + const __global FILTER_TYPE *wei1 = wei #if CASE_3D + kd * KH * KW * OC_BLOCK * IC_BLOCK #endif @@ -341,21 +338,21 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( #if SW != 1 || SH != 1 || SD != 1 || PH != 0 || PW != 0 || PD != 0 if (do_ker) { #endif - const __global DATA_T *diff_dst1 + const __global INPUT0_TYPE *diff_dst1 = diff_dst + oh * OW_FULL * OC_BLOCK * MB_BLOCK; #if CASE_3D diff_dst1 += od * OH_FULL * OW_FULL * OC_BLOCK * MB_BLOCK; #endif - const __global DATA_T *wei1 = wei; + const __global FILTER_TYPE *wei1 = wei; #endif int ocb = 0; do { #define TRANSPOSE_1(_block, _col) \ - (DATA_T)(intel_sub_group_shuffle(_block, _col)) + (intel_sub_group_shuffle(_block, _col)) -#define FMA1(a, b, c) fma((DATA_T)(a), (DATA_T)b, (DATA_T)c) +#define FMA1(a, b, c) fma((FMA_ARG_TYPE)(a), (FMA_ARG_TYPE)b, (FMA_ARG_TYPE)c) #define MULTIPLY_BLOCKS_8x8(_result, _blockA, _blockB, _blockB1) \ { \ @@ -377,12 +374,9 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( _result = FMA1(_blockB1.s7, TRANSPOSE_1(_blockA, 15), _result); \ } - DATA8_T blockB00 = AS_DATA8_T( - BLOCK_READ8((const __global BLOCK_DATA_T *)wei1)); - DATA8_T blockB01 = AS_DATA8_T( - BLOCK_READ8((const __global BLOCK_DATA_T *)(wei1 - + 8 * IC_BLOCK))); - DATA_T blockA[IW_BLOCK]; + FILTER_TYPE8 blockB00 = DT_FILTER_BLOCK_READ8(wei1, 0); + FILTER_TYPE8 blockB01 = DT_FILTER_BLOCK_READ8(wei1, 8 * IC_BLOCK); + INPUT0_TYPE blockA[IW_BLOCK]; __attribute__(( opencl_unroll_hint(IW_BLOCK))) // attr:no-format @@ -407,9 +401,7 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( blockA[i] = 0.0; continue; } - blockA[i] = AS_DATA_T( - BLOCK_READ((const __global BLOCK_DATA_T *)(&( - diff_dst1)[ow * OC_BLOCK]))); + blockA[i] = DT_INPUT_BLOCK_READ(diff_dst1, ow * OC_BLOCK); } __attribute__(( @@ -434,7 +426,7 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( #endif #endif - __global DATA_T *src_write0 = diff_src + output_offset + mb * IC_FULL * G * ID_FULL * IH_FULL * IW_FULL + __global OUTPUT_TYPE *src_write0 = diff_src + output_offset + mb * IC_FULL * G * ID_FULL * IH_FULL * IW_FULL + gic * ID_FULL * IH_FULL * IW_FULL * IC_BLOCK * MB_BLOCK + g * IC * ID_FULL * IH_FULL * IW_FULL * MB_BLOCK + id * IH_FULL * IW_FULL * IC_BLOCK * MB_BLOCK + ih * IW_FULL * IC_BLOCK * MB_BLOCK @@ -443,12 +435,14 @@ KERNEL(gen9_common_conv_bwd_data_kernel)( for (int i = 0; i < IW_BLOCK; i++) { blockC00[i] = ACTIVATION(blockC00[i], ACTIVATION_PARAMS); if (iw + i >= IW) continue; + OUTPUT_TYPE res; #if HAS_FUSED_OPS FUSED_OPS_BLOCK_CI; - blockC00[i] = FUSED_OPS_RESULT_BLOCK_CI; + res = FUSED_OPS_RESULT_BLOCK_CI; +#else + res = blockC00[i]; #endif - BLOCK_WRITE((__global BLOCK_DATA_T *)(&(src_write0)[i * IC_BLOCK]), - AS_BLOCK_DATA_T(blockC00[i])); + DT_OUTPUT_BLOCK_WRITE(src_write0, i * IC_BLOCK, res); } #endif } diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/ocl_types.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/ocl_types.h deleted file mode 100644 index 332e9551cee0f6..00000000000000 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/ocl_types.h +++ /dev/null @@ -1,444 +0,0 @@ -/******************************************************************************* -* Copyright 2019 Intel Corporation -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*******************************************************************************/ - -// #include "ocl_math_utils.h" - -#define for_ for - -#define CONCAt2(a, b) a##b -#define CONCAT2(a, b) CONCAt2(a, b) - -#if DT_F32 == 1 -#define DATA_T float -#define DATA8_T float8 -#define DATA_MAX FLT_MAX -#define DATA_MIN -DATA_MAX -#define DATA_ZERO 0.0f -#define DATA_ONE 1.0f -#define DEF_ACC_DATA_T float -#define DEF_ACC_DATA8_T float8 -#define POST_OP_DATA_T float -#define TO_DATA_T(v) static_cast(v) -#define TO_DEF_ACC_DATA_T(v) static_cast(v) -#define DATA_TO_REF convert_float -#define CONVERT_DATA_T convert_float -#define CONVERT_DATA8_T convert_float8 -#define CONVERT_FLOAT_T convert_float -#define CONVERT_FLOAT8_T convert_float8 -#define ROUND - -#define BLOCK_READ intel_sub_group_block_read -#define BLOCK_WRITE intel_sub_group_block_write -#define BLOCK_READ8 intel_sub_group_block_read8 -#define BLOCK_WRITE8 intel_sub_group_block_write8 - -#define AS_DATA_T as_float -#define AS_DATA8_T as_float8 - -#define AS_UINT_T as_uint -#define AS_UINT8_T as_uint8 - -#define BLOCK_DATA_T uint -#define BLOCK_DATA8_T uint8 -#define AS_BLOCK_DATA_T as_uint -#define AS_BLOCK_DATA8_T as_uint8 -#elif DT_F16 == 1 -#pragma OPENCL EXTENSION cl_khr_fp16 : enable - -#define DATA_T half -#define DATA8_T half8 -#define DATA_MAX HALF_MAX -#define DATA_MIN -DATA_MAX -#define DATA_ZERO 0.0h -#define DATA_ONE 1.0h -#define DEF_ACC_DATA_T half -#define DEF_ACC_DATA8_T half8 -#define POST_OP_DATA_T half -#define TO_DATA_T(v) (half)(v) -#define TO_DEF_ACC_DATA_T(v) (half)(v) -#define DATA_TO_REF convert_half -#define CONVERT_DATA_T convert_half -#define CONVERT_DATA8_T convert_half8 -#define CONVERT_FLOAT_T convert_float -#define CONVERT_FLOAT8_T convert_float8 -#define ROUND - -#define BLOCK_READ intel_sub_group_block_read_us -#define BLOCK_WRITE intel_sub_group_block_write_us -#define BLOCK_READ8 intel_sub_group_block_read_us8 -#define BLOCK_WRITE8 intel_sub_group_block_write_us8 -#define AS_DATA_T as_half -#define AS_DATA8_T as_half8 - -#define AS_UINT_T as_ushort -#define AS_UINT8_T as_ushort8 - -#define BLOCK_DATA_T ushort -#define BLOCK_DATA8_T ushort8 -#define AS_BLOCK_DATA_T as_ushort -#define AS_BLOCK_DATA8_T as_ushort8 -#elif DT_BF16 == 1 -#define DATA_T ushort -#define POST_OP_DATA_T float -#define DATA8_T ushort8 -#define DATA_MAX 3.38953138925153547590470800371487866880e+38F -#define DATA_MIN (-DATA_MAX) -#define DATA_ZERO 0.0f -#define DATA_ONE 1.0f -#define DEF_ACC_DATA_T float -#define DEF_ACC_DATA8_T float8 -#define TO_DATA_T(v) convert_f32_to_bf16(v) -#define TO_DEF_ACC_DATA_T(v) convert_bf16_to_f32(v) -#define DATA_TO_REF convert_bf16_to_f32 -#define CONVERT_DATA_T convert_f32_to_bf16 -#define CONVERT_DATA8_T convert_f32_to_bf16_vec8 -#define CONVERT_FLOAT_T convert_bf16_to_f32 -#define CONVERT_FLOAT8_T convert_bf16_to_f32_vec8 -#define ROUND - -#define BLOCK_READ intel_sub_group_block_read_us -#define BLOCK_WRITE intel_sub_group_block_write_us -#define BLOCK_READ8 intel_sub_group_block_read_us8 -#define BLOCK_WRITE8 intel_sub_group_block_write_us8 -#define AS_DATA_T as_ushort -#define AS_DATA8_T as_ushort8 - -#define AS_UINT_T as_ushort -#define AS_UINT8_T as_ushort8 - -#define BLOCK_DATA_T ushort -#define BLOCK_DATA8_T ushort8 -#define AS_BLOCK_DATA_T as_ushort -#define AS_BLOCK_DATA8_T as_ushort8 -#elif DT_S8 == 1 -#define DATA_T char -#define DATA8_T char8 -#define DATA_MAX CHAR_MAX -#define DATA_MIN CHAR_MIN -#define DATA_ZERO 0 -#define DATA_ONE 1 -#define DEF_ACC_DATA_T int -#define DEF_ACC_DATA8_T int8 -#define POST_OP_DATA_T float -#define TO_DATA_T(v) static_cast(v) -#define DATA_TO_REF convert_char -#define CONVERT_DATA_T convert_char -#define CONVERT_DATA8_T convert_char8 -#define ROUND rint - -#define BLOCK_READ intel_sub_group_block_read_uc -#define BLOCK_WRITE intel_sub_group_block_write_uc -#define BLOCK_READ8 intel_sub_group_block_read_uc8 -#define BLOCK_WRITE8 intel_sub_group_block_write_uc8 -#define AS_DATA_T as_char -#define AS_DATA8_T as_char8 - -#define AS_UINT_T as_uchar -#define AS_UINT8_T as_uchar8 - -#define BLOCK_DATA_T uchar -#define BLOCK_DATA8_T uchar8 -#define AS_BLOCK_DATA_T as_uchar -#define AS_BLOCK_DATA8_T as_uchar8 -#elif DT_U8 == 1 -#define DATA_T uchar -#define DATA8_T uchar8 -#define DATA_MAX UCHAR_MAX -#define DATA_MIN 0 -#define DATA_ZERO 0 -#define DATA_ONE 1 -#define DEF_ACC_DATA_T int -#define DEF_ACC_DATA8_T int8 -#define POST_OP_DATA_T float -#define TO_DATA_T(v) (uchar)(v) -#define DATA_TO_REF convert_uchar -#define CONVERT_DATA_T convert_uchar -#define CONVERT_DATA8_T convert_uchar8 -#define ROUND rint - -#define BLOCK_READ intel_sub_group_block_read_uc -#define BLOCK_WRITE intel_sub_group_block_write_uc -#define BLOCK_READ8 intel_sub_group_block_read_uc8 -#define BLOCK_WRITE8 intel_sub_group_block_write_uc8 -#define AS_DATA_T as_uchar -#define AS_DATA8_T as_uchar8 - -#define AS_UINT_T as_uchar -#define AS_UINT8_T as_uchar8 - -#define BLOCK_DATA_T uchar -#define BLOCK_DATA8_T uchar8 -#define AS_BLOCK_DATA_T as_uchar -#define AS_BLOCK_DATA8_T as_uchar8 -#elif DT_S32 == 1 -#define DATA_T int -#define CONVERT_DATA_T convert_int_sat_rte -#define POST_OP_DATA_T float -#elif !defined(DT_UNDEF) -#error "Unexpected data type" -#endif - -#if VECT_DT_N == 1 -#define VECT_DATA_T DATA_T -#define VECT_DEF_ACC_DATA_T DEF_ACC_DATA_T -#define AS_VECT_DATA_T AS_DATA_T -#define VECT_BLOCK_READ BLOCK_READ -#define VECT_BLOCK_WRITE BLOCK_WRITE -#define VECT_UINT_READ intel_sub_group_block_read -#define VECT_UINT_WRITE intel_sub_group_block_write -#define VECT_BLOCK_DATA_T BLOCK_DATA_T -#define AS_VECT_BLOCK_DATA_T AS_BLOCK_DATA_T -#define CONVERT_VECT_FLOAT_T CONVERT_FLOAT_T -#define CONVERT_VECTOR_DATA_T CONVERT_DATA_T -#define VECT_INT_T int -#define VECT_UINT_T uint -#define VECT_FLOAT_T float -#define AS_VECT_INT_T as_int -#define AS_VECT_UINT_T as_uint -#elif VECT_DT_N == 8 -#define VECT_DATA_T DATA8_T -#define VECT_DEF_ACC_DATA_T DEF_ACC_DATA8_T -#define AS_VECT_DATA_T AS_DATA8_T -#define VECT_BLOCK_READ BLOCK_READ8 -#define VECT_BLOCK_WRITE BLOCK_WRITE8 -#define VECT_UINT_READ intel_sub_group_block_read8 -#define VECT_UINT_WRITE intel_sub_group_block_write8 -#define VECT_BLOCK_DATA_T BLOCK_DATA8_T -#define AS_VECT_BLOCK_DATA_T AS_BLOCK_DATA8_T -#define CONVERT_VECT_FLOAT_T CONVERT_FLOAT8_T -#define CONVERT_VECTOR_DATA_T CONVERT_DATA8_T -#define VECT_INT_T int8 -#define VECT_UINT_T uint8 -#define VECT_FLOAT_T float8 -#define AS_VECT_INT_T as_int8 -#define AS_VECT_UINT_T as_uint8 -#endif - -#ifdef SRC_DATA_T -#define SRC_DATA8_T CONCAT2(SRC_DATA_T, 8) -#if SRC_DT_BF16 -#define SRC_TO_REF(x) convert_bf16_to_f32(x) -#define SRC_TO_REF8(x) convert_bf16_to_f32_vec8(x) -#else -#define SRC_TO_REF(x) (x) -#define SRC_TO_REF8(x) (x) -#endif -#if SRC_DT_BF16 -#define TO_SRC(x) convert_f32_to_bf16(x) -#elif SRC_DT_U8 -#define TO_SRC(x) convert_uchar_sat_rte(x) -#elif SRC_DT_S8 -#define TO_SRC(x) convert_char_sat_rte(x) -#elif SRC_DT_S32 -#define TO_SRC(x) convert_int_sat_rte(x) -#else -#define TO_SRC(x) (x) -#endif -#endif - -#ifdef WEI_DATA_T -#if WEI_DT_BF16 -#define WEI_TO_REF(x) convert_bf16_to_f32(x) -#define REF_TO_WEI(x) convert_f32_to_bf16(x) -#else -#define WEI_TO_REF(x) (x) -#define REF_TO_WEI(x) (x) -#endif -#if WEI_DT_BF16 -#define TO_WEI(x) convert_f32_to_bf16(x) -#elif WEI_DT_U8 -#define TO_WEI(x) convert_uchar_sat_rte(x) -#elif WEI_DT_S8 -#define TO_WEI(x) convert_char_sat_rte(x) -#elif WEI_DT_S32 -#define TO_WEI(x) convert_int_sat_rte(x) -#else -#define TO_WEI(x) (x) -#endif -#endif - -#ifdef BIA_DATA_T -#if BIA_DT_BF16 -#define BIA_TO_REF(x) convert_bf16_to_f32(x) -#define REF_TO_BIA(x) convert_f32_to_bf16(x) -#else -#define BIA_TO_REF(x) (x) -#define REF_TO_BIA(x) (x) -#endif -#if BIA_DT_BF16 -#define TO_BIA(x) convert_f32_to_bf16(x) -#elif BIA_DT_U8 -#define TO_BIA(x) convert_uchar_sat_rte(x) -#elif BIA_DT_S8 -#define TO_BIA(x) convert_char_sat_rte(x) -#elif BIA_DT_S32 -#define TO_BIA(x) convert_int_sat_rte(x) -#else -#define TO_BIA(x) (x) -#endif -#endif - -#ifdef DST_DATA_T -#define DST_DATA8_T CONCAT2(DST_DATA_T, 8) -#if DST_DT_BF16 -#define DST_TO_REF(x) convert_bf16_to_f32(x) -#define DST_TO_REF8(x) convert_bf16_to_f32_vec8(x) -#define REF_TO_DST(x) convert_f32_to_bf16(x) -#define REF_TO_DST8(x) convert_f32_to_bf16_vec8(convert_float8(x)) -#else -#define DST_TO_REF(x) (x) -#define DST_TO_REF8(x) (x) -#define REF_TO_DST(x) (x) -#define REF_TO_DST8(x) (x) -#endif -#if DST_DT_BF16 -#define TO_DST(x) convert_f32_to_bf16(x) -#define TO_DST8(x) convert_f32_to_bf16_vec8(convert_float8(x)) -#elif DST_DT_F16 -#define TO_DST(x) convert_half(x) -#define TO_DST8(x) convert_half8(x) -#elif DST_DT_U8 -#define TO_DST(x) convert_uchar_sat_rte(x) -#define TO_DST8(x) convert_uchar8_sat_rte(x) -#elif DST_DT_S8 -#define TO_DST(x) convert_char_sat_rte(x) -#define TO_DST8(x) convert_char8_sat_rte(x) -#elif DST_DT_S32 -#define TO_DST(x) convert_int_sat_rte(x) -#define TO_DST8(x) convert_int8_sat_rte(x) -#elif DST_DT_F32 -#define TO_DST(x) convert_float(x) -#define TO_DST8(x) convert_float8(x) -#else -#error "Not expected" -#endif -#endif - -#ifdef ACC_DATA_T -#if ACC_DT_F16 -#define TO_ACC(x) convert_half(x) -#elif ACC_DT_F32 -#define TO_ACC(x) convert_float(x) -#elif ACC_DT_S32 -#define TO_ACC(x) convert_int(x) -#else -#error "Unexpected accumulation data type" -#endif -#endif - -#define OFF_MD(prefix, x0, x1, x2, x3, x4, x5) \ - ((x0 / prefix##_B0_2) / prefix##_B0_1 * prefix##_S0_0) \ - + ((x0 / prefix##_B0_2) % prefix##_B0_1 * prefix##_S0_1) \ - + ((x0 % prefix##_B0_2) * prefix##_S0_2) \ - + ((x1 / prefix##_B1_2) / prefix##_B1_1 * prefix##_S1_0) \ - + ((x1 / prefix##_B1_2) % prefix##_B1_1 * prefix##_S1_1) \ - + ((x1 % prefix##_B1_2) * prefix##_S1_2) \ - + ((x2 / prefix##_B2_2) / prefix##_B2_1 * prefix##_S2_0) \ - + ((x2 / prefix##_B2_2) % prefix##_B2_1 * prefix##_S2_1) \ - + ((x2 % prefix##_B2_2) * prefix##_S2_2) \ - + ((x3 / prefix##_B3_2) / prefix##_B3_1 * prefix##_S3_0) \ - + ((x3 / prefix##_B3_2) % prefix##_B3_1 * prefix##_S3_1) \ - + ((x3 % prefix##_B3_2) * prefix##_S3_2) \ - + ((x4 / prefix##_B4_2) / prefix##_B4_1 * prefix##_S4_0) \ - + ((x4 / prefix##_B4_2) % prefix##_B4_1 * prefix##_S4_1) \ - + ((x4 % prefix##_B4_2) * prefix##_S4_2) \ - + ((x5 / prefix##_B5_2) / prefix##_B5_1 * prefix##_S5_0) \ - + ((x5 / prefix##_B5_2) % prefix##_B5_1 * prefix##_S5_1) \ - + ((x5 % prefix##_B5_2) * prefix##_S5_2) - -#if NDIMS == 3 -#define SRC_OFF(x0, x1, d, h, x2) \ - (((x0) % SRC_B0) * SRC_SB0 + ((x0) / SRC_B0) * SRC_S0 \ - + ((x1) % SRC_B1) * SRC_SB1 + ((x1) / SRC_B1) * SRC_S1 \ - + ((x2) % SRC_B2) * SRC_SB2 + ((x2) / SRC_B2) * SRC_S2) - -#if WITH_GROUPS == 1 -#define WHT_OFF(x0, x1, x2, d, h, x3) \ - (((x0) % WHT_B0) * WHT_SB0 + ((x0) / WHT_B0) * WHT_S0 \ - + ((x1) % WHT_B1) * WHT_SB1 + ((x1) / WHT_B1) * WHT_S1 \ - + ((x2) % WHT_B2) * WHT_SB2 + ((x2) / WHT_B2) * WHT_S2 \ - + ((x3) % WHT_B3) * WHT_SB3 + ((x3) / WHT_B3) * WHT_S3) -#else -#define WHT_OFF(g, x0, x1, d, h, x2) \ - (((x0) % WHT_B0) * WHT_SB0 + ((x0) / WHT_B0) * WHT_S0 \ - + ((x1) % WHT_B1) * WHT_SB1 + ((x1) / WHT_B1) * WHT_S1 \ - + ((x2) % WHT_B2) * WHT_SB2 + ((x2) / WHT_B2) * WHT_S2) -#endif - -#define DST_OFF(x0, x1, d, h, x2) \ - (((x0) % DST_B0) * DST_SB0 + ((x0) / DST_B0) * DST_S0 \ - + ((x1) % DST_B1) * DST_SB1 + ((x1) / DST_B1) * DST_S1 \ - + ((x2) % DST_B2) * DST_SB2 + ((x2) / DST_B2) * DST_S2) -#elif NDIMS == 4 -#define SRC_OFF(x0, x1, d, x2, x3) \ - (((x0) % SRC_B0) * SRC_SB0 + ((x0) / SRC_B0) * SRC_S0 \ - + ((x1) % SRC_B1) * SRC_SB1 + ((x1) / SRC_B1) * SRC_S1 \ - + ((x2) % SRC_B2) * SRC_SB2 + ((x2) / SRC_B2) * SRC_S2 \ - + ((x3) % SRC_B3) * SRC_SB3 + ((x3) / SRC_B3) * SRC_S3) - -#if WITH_GROUPS == 1 -#define WHT_OFF(x0, x1, x2, d, x3, x4) \ - (((x0) % WHT_B0) * WHT_SB0 + ((x0) / WHT_B0) * WHT_S0 \ - + ((x1) % WHT_B1) * WHT_SB1 + ((x1) / WHT_B1) * WHT_S1 \ - + ((x2) % WHT_B2) * WHT_SB2 + ((x2) / WHT_B2) * WHT_S2 \ - + ((x3) % WHT_B3) * WHT_SB3 + ((x3) / WHT_B3) * WHT_S3 \ - + ((x4) % WHT_B4) * WHT_SB4 + ((x4) / WHT_B4) * WHT_S4) -#else -#define WHT_OFF(g, x1, x2, d, x3, x4) \ - (((x1) % WHT_B0) * WHT_SB0 + ((x1) / WHT_B0) * WHT_S0 \ - + ((x2) % WHT_B1) * WHT_SB1 + ((x2) / WHT_B1) * WHT_S1 \ - + ((x3) % WHT_B2) * WHT_SB2 + ((x3) / WHT_B2) * WHT_S2 \ - + ((x4) % WHT_B3) * WHT_SB3 + ((x4) / WHT_B3) * WHT_S3) -#endif - -#define DST_OFF(x0, x1, d, x2, x3) \ - (((x0) % DST_B0) * DST_SB0 + ((x0) / DST_B0) * DST_S0 \ - + ((x1) % DST_B1) * DST_SB1 + ((x1) / DST_B1) * DST_S1 \ - + ((x2) % DST_B2) * DST_SB2 + ((x2) / DST_B2) * DST_S2 \ - + ((x3) % DST_B3) * DST_SB3 + ((x3) / DST_B3) * DST_S3) -#elif NDIMS == 5 -#define SRC_OFF(x0, x1, x2, x3, x4) \ - (((x0) % SRC_B0) * SRC_SB0 + ((x0) / SRC_B0) * SRC_S0 \ - + ((x1) % SRC_B1) * SRC_SB1 + ((x1) / SRC_B1) * SRC_S1 \ - + ((x2) % SRC_B2) * SRC_SB2 + ((x2) / SRC_B2) * SRC_S2 \ - + ((x3) % SRC_B3) * SRC_SB3 + ((x3) / SRC_B3) * SRC_S3 \ - + ((x4) % SRC_B4) * SRC_SB4 + ((x4) / SRC_B4) * SRC_S4) - -#if WITH_GROUPS == 1 -#define WHT_OFF(x0, x1, x2, x3, x4, x5) \ - (((x0) % WHT_B0) * WHT_SB0 + ((x0) / WHT_B0) * WHT_S0 \ - + ((x1) % WHT_B1) * WHT_SB1 + ((x1) / WHT_B1) * WHT_S1 \ - + ((x2) % WHT_B2) * WHT_SB2 + ((x2) / WHT_B2) * WHT_S2 \ - + ((x3) % WHT_B3) * WHT_SB3 + ((x3) / WHT_B3) * WHT_S3 \ - + ((x4) % WHT_B4) * WHT_SB4 + ((x4) / WHT_B4) * WHT_S4 \ - + ((x5) % WHT_B5) * WHT_SB5 + ((x5) / WHT_B5) * WHT_S5) -#else -#define WHT_OFF(g, x1, x2, x3, x4, x5) \ - (((x1) % WHT_B0) * WHT_SB0 + ((x1) / WHT_B0) * WHT_S0 \ - + ((x2) % WHT_B1) * WHT_SB1 + ((x2) / WHT_B1) * WHT_S1 \ - + ((x3) % WHT_B2) * WHT_SB2 + ((x3) / WHT_B2) * WHT_S2 \ - + ((x4) % WHT_B3) * WHT_SB3 + ((x4) / WHT_B3) * WHT_S3 \ - + ((x5) % WHT_B4) * WHT_SB4 + ((x5) / WHT_B4) * WHT_S4) -#endif - -#define DST_OFF(x0, x1, x2, x3, x4) \ - (((x0) % DST_B0) * DST_SB0 + ((x0) / DST_B0) * DST_S0 \ - + ((x1) % DST_B1) * DST_SB1 + ((x1) / DST_B1) * DST_S1 \ - + ((x2) % DST_B2) * DST_SB2 + ((x2) / DST_B2) * DST_S2 \ - + ((x3) % DST_B3) * DST_SB3 + ((x3) / DST_B3) * DST_S3 \ - + ((x4) % DST_B4) * DST_SB4 + ((x4) / DST_B4) * DST_S4) -#endif - diff --git a/inference-engine/thirdparty/clDNN/src/CMakeLists.txt b/inference-engine/thirdparty/clDNN/src/CMakeLists.txt index 6a223b38ff1527..3872a70ae3ff57 100644 --- a/inference-engine/thirdparty/clDNN/src/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/src/CMakeLists.txt @@ -172,20 +172,3 @@ elseif((NOT ANDROID) AND (UNIX)) target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE pthread) endif() target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE ${CLDNN__SYSTEM_LINK_LIBRARIES}) - -# ========================================== Installation ============================================== - -# API headers. -install(DIRECTORY "${CLDNN__API_DIR}/" - DESTINATION "include/clDNN" - FILE_PERMISSIONS OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ - ) -# Binaries. -install(TARGETS "${CLDNN_BUILD__PROJ}" - ARCHIVE DESTINATION "lib" - LIBRARY DESTINATION "lib" - RUNTIME DESTINATION "bin" - INCLUDES DESTINATION "include/clDNN" - ) - -# ====================================================================================================== diff --git a/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp b/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp index 32efb50d630c7f..85b6d03382fa57 100644 --- a/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp +++ b/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp @@ -676,11 +676,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) { (input_data.get_dependency(0).get_output_layout().data_type == data_types::u8 || input_data.get_dependency(0).get_output_layout().data_type == data_types::i8); - should_fuse |= input_data.is_type() && quantize_node.get_scale_shift_opt() && - // fp16/fp32 optimized kernels don't support chaning data type - (input_data.get_dependency(0).get_output_layout().data_type == data_types::u8 || - input_data.get_dependency(0).get_output_layout().data_type == data_types::i8 || - input_data.get_output_layout().data_type == out_layout.data_type); + should_fuse |= input_data.is_type() && quantize_node.get_scale_shift_opt(); should_fuse |= input_data.is_type() && quantize_node.get_scale_shift_opt(); diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp index 566bf119e4612c..6d043cf5d5a6ed 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp @@ -445,6 +445,17 @@ class ConvEltwTest : public ::BaseFusingTest { network_not_fused.set_input_data("input", input_prim); compare(network_not_fused, network_fused, p); + auto find_prim = [](primitive_info& p) -> bool { + // Add more ids when needed + if (p.original_id == "deconv_prim") + return true; + return false; + }; + + auto pi_fused = network_fused.get_primitives_info(); + auto info_fused = std::find_if(pi_fused.begin(), pi_fused.end(), find_prim); + if (info_fused != pi_fused.end()) + std::cout << "kernel: " << info_fused->kernel_id << std::endl; } layout get_input_layout(conv_eltw_test_params& p) { @@ -4333,23 +4344,23 @@ TEST_P(deconv_scale_actv_quant_i8, basic) { INSTANTIATE_TEST_CASE_P(fusings_gpu, deconv_scale_actv_quant_i8, ::testing::ValuesIn(std::vector{ - deconv_test_params{ CASE_DECONV_FP32_1, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_2, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_4, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_5, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_6, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_7, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_8, 3, 5 }, + deconv_test_params{ CASE_DECONV_FP32_1, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_2, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_4, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_5, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_6, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_7, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_8, 2, 5 }, - deconv_test_params{ CASE_DECONV_FP16_1, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_2, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_4, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_5, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_6, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_7, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_8, 3, 5 }, + deconv_test_params{ CASE_DECONV_FP16_1, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_2, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_4, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_5, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_6, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_7, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_8, 2, 5 }, deconv_test_params{ CASE_DECONV_U8S8_1, 2, 5 }, deconv_test_params{ CASE_DECONV_U8S8_2, 2, 5 }, @@ -4369,26 +4380,26 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, deconv_scale_actv_quant_i8, deconv_test_params{ CASE_DECONV_S8S8_7, 2, 5 }, deconv_test_params{ CASE_DECONV_S8S8_8, 2, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_1, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_2, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_3, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_4, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_5, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_6, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_7, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP32_3D_8, 3, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_1, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_2, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_3, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_4, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_5, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_6, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_7, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP32_3D_8, 2, 5 }, // FIXME no quantize implementation for bs_fs_yx_bsv16_fsv16 format AND add_required_reorders pass completely ruins data types // add_required_reorders pass tries to reorder everything to output type if no format exists, this ruins fp32 -> int8 quantize //deconv_test_params{ CASE_DECONV_FP32_3D_9, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_1, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_2, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_3, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_4, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_5, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_6, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_7, 3, 5 }, - deconv_test_params{ CASE_DECONV_FP16_3D_8, 3, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_1, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_2, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_3, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_4, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_5, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_6, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_7, 2, 5 }, + deconv_test_params{ CASE_DECONV_FP16_3D_8, 2, 5 }, //deconv_test_params{ CASE_DECONV_FP16_3D_9, 3, 5 }, deconv_test_params{ CASE_DECONV_U8S8_3D_1, 2, 5 }, @@ -4444,23 +4455,23 @@ TEST_P(deconv_scale_actv_quant_u8_eltw_scale_actv_quant_i8, basic) { INSTANTIATE_TEST_CASE_P(fusings_gpu, deconv_scale_actv_quant_u8_eltw_scale_actv_quant_i8, ::testing::ValuesIn(std::vector{ - deconv_test_params{ CASE_DECONV_FP32_1, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_2, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_4, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_5, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_6, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_7, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_8, 4, 9 }, - - deconv_test_params{ CASE_DECONV_FP16_1, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_2, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_4, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_5, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_6, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_7, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_8, 4, 9 }, + deconv_test_params{ CASE_DECONV_FP32_1, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_2, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_4, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_5, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_6, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_7, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_8, 2, 9 }, + + deconv_test_params{ CASE_DECONV_FP16_1, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_2, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_4, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_5, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_6, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_7, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_8, 2, 9 }, deconv_test_params{ CASE_DECONV_U8S8_1, 2, 9 }, deconv_test_params{ CASE_DECONV_U8S8_2, 2, 9 }, @@ -4480,24 +4491,24 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, deconv_scale_actv_quant_u8_eltw_scale_actv_ deconv_test_params{ CASE_DECONV_S8S8_7, 2, 9 }, deconv_test_params{ CASE_DECONV_S8S8_8, 2, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_1, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_2, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_3, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_4, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_5, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_6, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_7, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP32_3D_8, 4, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_1, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_2, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_3, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_4, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_5, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_6, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_7, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP32_3D_8, 2, 9 }, // deconv_test_params{ CASE_DECONV_FP32_3D_9, 6, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_1, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_2, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_3, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_4, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_5, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_6, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_7, 4, 9 }, - deconv_test_params{ CASE_DECONV_FP16_3D_8, 4, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_1, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_2, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_3, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_4, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_5, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_6, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_7, 2, 9 }, + deconv_test_params{ CASE_DECONV_FP16_3D_8, 2, 9 }, // deconv_test_params{ CASE_DECONV_FP16_3D_9, 6, 9 }, deconv_test_params{ CASE_DECONV_U8S8_3D_1, 2, 9 }, @@ -4548,14 +4559,14 @@ TEST_P(deconv_scale_activation_quantize_i8_eltwise_quantize_u8, basic) { INSTANTIATE_TEST_CASE_P(fusings_gpu, deconv_scale_activation_quantize_i8_eltwise_quantize_u8, ::testing::ValuesIn(std::vector{ - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_1, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_2, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_3, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_4, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_5, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_6, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_7, 4, 7}, - conv_eltw_test_params{CASE_DECONV_ELTW_FP32_8, 4, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_1, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_2, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_3, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_4, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_5, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_6, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_7, 2, 7}, + conv_eltw_test_params{CASE_DECONV_ELTW_FP32_8, 2, 7}, conv_eltw_test_params{CASE_DECONV_ELTW_i8_1, 2, 7}, conv_eltw_test_params{CASE_DECONV_ELTW_i8_2, 2, 7}, diff --git a/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c b/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c index 481da4c99a012b..b67a7fcb1f818b 100644 --- a/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c +++ b/inference-engine/thirdparty/movidius/mvnc/src/mvnc_api.c @@ -424,7 +424,7 @@ static ncStatus_t getDeviceFwProtocolPrefix(const deviceDesc_t deviceDesc, static char* getDevicePlatform(deviceDesc_t deviceDesc, int useUniversalFirmware) { if (deviceDesc.platform == X_LINK_MYRIAD_X) { - if (useUniversalFirmware && deviceDesc.protocol != X_LINK_PCIE) { + if (useUniversalFirmware) { return "ma2x8x"; } else { return "ma248x"; diff --git a/inference-engine/thirdparty/movidius/mvnc/tests/cases/mvnc_common_test_cases.cpp b/inference-engine/thirdparty/movidius/mvnc/tests/cases/mvnc_common_test_cases.cpp index c06aea52940969..3fd0f9f3465475 100644 --- a/inference-engine/thirdparty/movidius/mvnc/tests/cases/mvnc_common_test_cases.cpp +++ b/inference-engine/thirdparty/movidius/mvnc/tests/cases/mvnc_common_test_cases.cpp @@ -100,7 +100,7 @@ std::string MvncTestsCommon::getMyriadFirmwarePath(const deviceDesc_t& in_device #else const std::string extension = "mvcmd"; #endif - return firmwareDir + std::string("pcie-ma248x.") + extension; + return firmwareDir + std::string("pcie-ma2x8x.") + extension; } return getMyriadUSBFirmwarePath(in_deviceDesc.name); diff --git a/inference-engine/thirdparty/movidius/tests/XLink/helpers/XLink_tests_helpers.cpp b/inference-engine/thirdparty/movidius/tests/XLink/helpers/XLink_tests_helpers.cpp index c51b466a82680a..f9e14c3ea9d8c3 100644 --- a/inference-engine/thirdparty/movidius/tests/XLink/helpers/XLink_tests_helpers.cpp +++ b/inference-engine/thirdparty/movidius/tests/XLink/helpers/XLink_tests_helpers.cpp @@ -88,7 +88,7 @@ std::string XLinkTestsHelper::getMyriadFirmwarePath(const deviceDesc_t& in_devic #else const std::string extension = "mvcmd"; #endif - return FIRMWARE_SUBFOLDER + std::string("pcie-ma248x.") + extension; + return FIRMWARE_SUBFOLDER + std::string("pcie-ma2x8x.") + extension; } return getMyriadUSBFirmwarePath(in_deviceDesc.name); diff --git a/inference-engine/tools/compile_tool/README.md b/inference-engine/tools/compile_tool/README.md index 1c3c48cb81195a..4bda89a94bc192 100644 --- a/inference-engine/tools/compile_tool/README.md +++ b/inference-engine/tools/compile_tool/README.md @@ -2,8 +2,7 @@ The Compile tool is a C++ application that enables you to dump a loaded executable network blob. The tool is delivered as an executable file that can be run on both Linux\* and Windows\*. -The tool is located in the `/deployment_tools/inference_engine/lib/intel64/` directory on Linux -and `` on Windows. +The tool is located in the `/deployment_tools/tools/compile_tool` directory. The workflow of the Compile tool is as follows: diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index 80b1aa2e1961fb..3fcebe98abd631 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -257,6 +257,7 @@ extensions/front/onnx/detectionoutput_ext.py extensions/front/onnx/dropout_ext.py extensions/front/onnx/elementwise_ext.py extensions/front/onnx/expand_ext.py +extensions/front/onnx/faster_rcnn.json extensions/front/onnx/flatten_ext.py extensions/front/onnx/flattenONNX_to_reshape.py extensions/front/onnx/gather_ext.py diff --git a/model-optimizer/extensions/front/onnx/faster_rcnn.json b/model-optimizer/extensions/front/onnx/faster_rcnn.json new file mode 100644 index 00000000000000..c08e094c12cd88 --- /dev/null +++ b/model-optimizer/extensions/front/onnx/faster_rcnn.json @@ -0,0 +1,20 @@ +[ + { + "custom_attributes": + { + "max_detections_per_image": 100, + "max_delta_log_wh": 4.135166645050049, + "score_threshold": 0.05, + "nms_threshold": 0.5, + "post_nms_count": 2000, + "input_fpn_heads": ["486", "454", "422", "390"], + "do_outputs": ["6371", "6373", "6375"], + "box_regressions_input_node": "2614", + "class_predicitons_node": "2615", + "ROIFeatureExtractor2_input": "2335", + "ROIFeatureExtractor2_output": "2592" + }, + "id": "ONNXMaskRCNNReplacement", + "match_kind": "general" + } +] \ No newline at end of file diff --git a/model-optimizer/extensions/front/onnx/mask_rcnn.json b/model-optimizer/extensions/front/onnx/mask_rcnn.json index 80fd30d1881db4..adc5b02d050f96 100644 --- a/model-optimizer/extensions/front/onnx/mask_rcnn.json +++ b/model-optimizer/extensions/front/onnx/mask_rcnn.json @@ -6,7 +6,14 @@ "max_delta_log_wh": 4.135166645050049, "score_threshold": 0.05, "nms_threshold": 0.5, - "post_nms_count": 2000 + "post_nms_count": 2000, + "input_fpn_heads": ["486", "454", "422", "390"], + "do_outputs": ["6530", "6532", "6534"], + "box_regressions_input_node": "2773", + "class_predicitons_node": "2774", + "ROIFeatureExtractor1_output": "6795", + "ROIFeatureExtractor2_input": "2490", + "ROIFeatureExtractor2_output": "2751" }, "id": "ONNXMaskRCNNReplacement", "match_kind": "general" diff --git a/model-optimizer/extensions/front/onnx/mask_rcnn_conversion.py b/model-optimizer/extensions/front/onnx/mask_rcnn_conversion.py index ee26bada7428f9..7b6b30d783128d 100644 --- a/model-optimizer/extensions/front/onnx/mask_rcnn_conversion.py +++ b/model-optimizer/extensions/front/onnx/mask_rcnn_conversion.py @@ -28,8 +28,6 @@ from mo.graph.graph import Node from mo.ops.reshape import Reshape -input_fpn_heads = ('486', '454', '422', '390') - class ONNXMaskRCNNTransformation(FrontReplacementFromConfigFileGeneral): """ @@ -45,24 +43,25 @@ class ONNXMaskRCNNTransformation(FrontReplacementFromConfigFileGeneral): replacement_id = 'ONNXMaskRCNNReplacement' def run_before(self): - # the node "2774" which is used in this transformation is of op SoftMaxONNX. But operations of op SoftMaxONNX + # the class_predicitons_node which is used in this transformation is of op SoftMaxONNX. But operations of op SoftMaxONNX # will be replaced with a transformation SoftmaxONNXFrontReplacer return [SoftmaxONNXFrontReplacer] def transform_graph(self, graph: Graph, replacement_descriptions: dict): - insert_ExperimentalDetectronROIFeatureExtractor2(graph) + insert_ExperimentalDetectronROIFeatureExtractor2(graph, replacement_descriptions) insert_do(graph, replacement_descriptions) - insert_ExperimentalDetectronROIFeatureExtractor1(graph) + insert_ExperimentalDetectronROIFeatureExtractor1(graph, replacement_descriptions) -def insert_do(graph: Graph, replacement_descriptions): - do_outputs = ['6530', '6532', '6534'] +def insert_do(graph: Graph, replacement_descriptions: dict): + do_outputs = replacement_descriptions['do_outputs'] prior_boxes_node = Node(graph, 'ROIFeatureExtractor_2') num_classes = 81 + box_regressions_input_node = Node(graph, replacement_descriptions['box_regressions_input_node']) box_regressions_node = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 4 * num_classes]), - dict(name='box_regressions'), Node(graph, '2773')) + dict(name='box_regressions'), box_regressions_input_node) - class_predicitons_node = Node(graph, '2774') + class_predicitons_node = Node(graph, replacement_descriptions['class_predicitons_node']) im_info_node = Parameter(graph, {"name": 'im_info', 'shape': int64_array([1, 3])}).create_node() do_node = ExperimentalDetectronDetectionOutput(graph, {'name': 'DetectionOutput', @@ -92,8 +91,12 @@ def insert_do(graph: Graph, replacement_descriptions): do_node.out_port(1).get_connection().insert_node(Cast(graph, {'dst_type': np.int64}).create_node()) -def insert_ExperimentalDetectronROIFeatureExtractor1(graph: Graph): - old_output_node = Node(graph, '6795') +def insert_ExperimentalDetectronROIFeatureExtractor1(graph: Graph, replacement_descriptions: dict): + if 'ROIFeatureExtractor1_output' not in replacement_descriptions: + # In case of Faster-RCNN this transformation is not needed and this attribute shouldn't be set + return + input_fpn_heads = replacement_descriptions['input_fpn_heads'] + old_output_node = Node(graph, replacement_descriptions['ROIFeatureExtractor1_output']) input_fpn_head_nodes = [Node(graph, node_id) for node_id in input_fpn_heads] fpn_roi_align = ExperimentalDetectronROIFeatureExtractor(graph, {'name': 'ROIFeatureExtractor_1', 'distribute_rois_between_levels': 1, @@ -110,8 +113,9 @@ def insert_ExperimentalDetectronROIFeatureExtractor1(graph: Graph): old_output_node.out_port(0).get_connection().set_source(fpn_roi_align.out_port(0)) -def insert_ExperimentalDetectronROIFeatureExtractor2(graph: Graph): - old_output_node = Node(graph, '2751') +def insert_ExperimentalDetectronROIFeatureExtractor2(graph: Graph, replacement_descriptions: dict): + input_fpn_heads = replacement_descriptions['input_fpn_heads'] + old_output_node = Node(graph, replacement_descriptions['ROIFeatureExtractor2_output']) input_fpn_head_nodes = [Node(graph, node_id) for node_id in input_fpn_heads] fpn_roi_align = ExperimentalDetectronROIFeatureExtractor(graph, {'name': 'ROIFeatureExtractor_2', 'distribute_rois_between_levels': 1, @@ -121,7 +125,7 @@ def insert_ExperimentalDetectronROIFeatureExtractor2(graph: Graph): 'pyramid_scales': int64_array( [4, 8, 16, 32, 64]), 'sampling_ratio': 2, }).create_node() - fpn_roi_align.in_port(0).connect(Node(graph, '2490').out_port(0)) + fpn_roi_align.in_port(0).connect(Node(graph, replacement_descriptions['ROIFeatureExtractor2_input']).out_port(0)) for ind, fpn_node in enumerate(input_fpn_head_nodes): fpn_roi_align.in_port(ind + 1).connect(fpn_node.out_port(0)) diff --git a/model-optimizer/extensions/front/tf/TFSliceToSlice.py b/model-optimizer/extensions/front/tf/TFSliceToSlice.py index 8c03ca7376a1d6..62e59878452249 100644 --- a/model-optimizer/extensions/front/tf/TFSliceToSlice.py +++ b/model-optimizer/extensions/front/tf/TFSliceToSlice.py @@ -16,6 +16,7 @@ import numpy as np +from extensions.ops.Cast import Cast from extensions.ops.elementwise import Add, Equal from extensions.ops.select import Select from mo.front.common.replacement import FrontReplacementOp @@ -74,4 +75,7 @@ def replace_sub_graph(self, graph: Graph, match: dict): # out of select to end (2nd of slice) select_node.out_port(0).connect(slice_node.in_port(2)) + cast = Cast(graph, dict(name=sum_node.name + '/CastToI64', dst_type=np.int64)).create_node() + select_node.in_port(2).get_connection().insert_node(cast) + node.out_port(0).get_connection().set_source(slice_node.out_port(0)) diff --git a/model-optimizer/extensions/front/tf/TFSliceToSlice_test.py b/model-optimizer/extensions/front/tf/TFSliceToSlice_test.py index 14be81eb3c43cd..2919a71bfc47b0 100644 --- a/model-optimizer/extensions/front/tf/TFSliceToSlice_test.py +++ b/model-optimizer/extensions/front/tf/TFSliceToSlice_test.py @@ -37,6 +37,7 @@ **regular_op_with_empty_data('equal', {'op': 'Equal', 'type': 'Equal'}), **regular_op_with_empty_data('select', {'op': 'Select', 'type': 'Select'}), **regular_op_with_empty_data('slice', {'op': 'Slice', 'type': None}), + **regular_op_with_empty_data('cast', {'op': 'Cast', 'type': 'Convert'}), } @@ -68,7 +69,8 @@ def test_slice_replacer_begin_with_2_inputs(self): *connect_front('equal:0', 'select:0'), - *connect_front('end_const:0', 'select:2'), + *connect_front('end_const:0', 'cast:0'), + *connect_front('cast:0', 'select:2'), *connect_front('select:0', 'slice:2'), *connect_front('slice:0', 'output'), @@ -97,7 +99,8 @@ def test_slice_replacer(self): *connect_front('int32_max:0', '1:select'), *connect_front('minus_one:0', '1:equal'), *connect_front('equal:0', '0:select'), - *connect_front('end_const:0', '2:select'), + *connect_front('end_const:0', '0:cast'), + *connect_front('cast:0', '2:select'), *connect_front('select:0', '2:slice'), *connect_front('slice:0', 'output'), ], nodes_with_edges_only=True) diff --git a/model-optimizer/extensions/middle/SliceConverter.py b/model-optimizer/extensions/middle/SliceConverter.py index 5ed91a756ff11b..5a9df90917ea90 100644 --- a/model-optimizer/extensions/middle/SliceConverter.py +++ b/model-optimizer/extensions/middle/SliceConverter.py @@ -16,100 +16,117 @@ import numpy as np +from extensions.ops.Cast import Cast +from extensions.ops.gather import Gather +from mo.front.caffe.extractors.utils import get_canonical_axis_index from mo.front.common.partial_infer.utils import int64_array +from mo.front.tf.graph_utils import create_op_with_const_inputs from mo.graph.graph import Graph, rename_nodes +from mo.graph.port import Port from mo.middle.replacement import MiddleReplacementPattern +from mo.ops.clamp import Clamp +from mo.ops.concat import Concat from mo.ops.const import Const from mo.ops.strided_slice import StridedSlice -from mo.utils.error import Error -def convert_negative_indices(indices: np.array, shape: np.array): - for ind, value in enumerate(indices): - if value < 0: - indices[ind] += shape[ind] +def create_ss_interval_border(graph: Graph, slice_border_port: Port, shape: np.ndarray, axes: np.ndarray, node_name: str): + """ + This function creates "begin"/"end" parameters for the StridedSlice based on Slice's "starts"/"ends" + + :param graph: graph to operate on. + :param slice_border_port: node output port that provides "starts"/"ends" values for the Slice. + :param shape: input shape of the Slice + :param axes: axes that "starts" and "ends" apply to + :param node_name: Slice node name + :return: Concat node that forms "begin"/"end" values for the StridedSlice + """ + # the value for 'starts' or 'ends' might be maximum/minimum possible value of int64. This + # value must be converted to maximum/minimum of int32 because such big values do not fit into the int32 which is + # supported by the StridedSlice layer + clamp = create_op_with_const_inputs( + graph, Clamp, port_value_dict={1: np.iinfo(np.int32).min, 2: np.iinfo(np.int32).max}, + op_attrs=dict(name=node_name + '/Clamp')) + clamp.in_port(0).connect(slice_border_port) + # we have to convert "starts"/"ends" values from the network to one data type with constant values that are created + # here to prevent type errors in Concat node + cast = Cast(graph, dict(name=node_name + '/CastToI64', dst_type=np.int64)).create_node() + cast.in_port(0).connect(clamp.out_port(0)) + concat = Concat(graph, dict(name=node_name + '/Concat', axis=0)).create_node() + for value_idx, port_idx in enumerate(axes): + concat.add_input_port(port_idx) + # "axes" may not be sorted, so we need to split "starts"/"ends" values and connect each value to the correct + # Concat input port + value = create_op_with_const_inputs( + graph, Gather, port_value_dict={1: int64_array([value_idx]), 2: int64_array(0)}, + op_attrs={'name': node_name + '/Gather'}) + cast.out_port(0).connect(value.in_port(0)) + value.out_port(0).connect(concat.in_port(port_idx)) + for port_idx in range(len(shape)): + if not concat.is_in_port_connected(port_idx): + concat.add_input_port(port_idx) + # This border value would be ignored in StridedSlice because of the begin_mask\end_mask + const = Const(graph, dict(name=node_name + '/Const', value=int64_array([0]))).create_node() + const.out_port(0).connect(concat.in_port(port_idx)) + + return concat class ConvertSlice(MiddleReplacementPattern): """ - This class converts Slice operation to StridedSlice + This class converts a Slice operation to StridedSlice in reshape-able way by parsing the 'starts' and 'ends' + parameters based on the 'axes' parameter """ enabled = True - op = "Slice" force_clean_up = True - def run_after(self): - from extensions.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[ - ('slice', dict(kind='op', op='Slice')) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['slice'] - - input_shape = node.in_port(0).data.get_shape() - output_shape = node.out_port(0).data.get_shape() - starts = node.in_port(1).data.get_value() - ends = node.in_port(2).data.get_value() - if starts is None or ends is None: - raise Error('The input with starts or end is not constant for node {}'.format(node.id)) - - # the value for 'ends' is usually maximum possible value of int64. This - # value must be converted to maximum of int32 because such big values do not fit into the int32 which is - # supported by the StridedSlice layer - ends = np.clip(ends, np.iinfo(np.int32).min, np.iinfo(np.int32).max) - if node.is_in_port_connected(3): - axes = node.in_port(3).data.get_value() - if axes is None: - raise Error('The input with axes is not constant for node {}'.format(node.id)) - else: - axes = int64_array(list(range(starts.size))) - - if node.is_in_port_connected(4): - steps = node.in_port(4).data.get_value() - if steps is None: - raise Error('The input with steps is not constant for node {}'.format(node.id)) - else: - steps = np.ones([starts.size]) - - ss_begin_mask = np.zeros(len(input_shape), dtype=np.int32) - ss_end_mask = np.zeros(len(input_shape), dtype=np.int32) - ss_begin = np.zeros(len(input_shape), dtype=np.int32) - ss_end = np.zeros(len(input_shape), dtype=np.int32) - ss_step = np.ones(len(input_shape), dtype=np.int32) - - # prepare inputs and attributes for the StridedSlice layer - for i, axis in enumerate(axes): - if starts[i] != 0: + def find_and_replace_pattern(self, graph: Graph): + for node in graph.get_op_nodes(op='Slice'): + node_name = node.soft_get('name', node.id) + + input_shape = node.in_port(0).data.get_shape() + if node.is_in_port_connected(3): + axes = node.in_port(3).data.get_value().copy() + assert axes is not None, 'The input with axes is not constant for node {}'.format(node_name) + for i, val in enumerate(axes): + axes[i] = get_canonical_axis_index(input_shape, val) + else: + axes = int64_array(range(len(input_shape))) + + ss_begin = create_ss_interval_border(graph, node.in_port(1).get_source(), input_shape, axes, node_name) + ss_end = create_ss_interval_border(graph, node.in_port(2).get_source(), input_shape, axes, node_name) + node.in_port(1).disconnect() + node.in_port(2).disconnect() + rename_nodes([(ss_begin, node_name + '/Begin'), (ss_end, node_name + '/End')]) + + if node.is_in_port_connected(4): + steps = node.in_port(4).data.get_value() + assert steps is not None, 'The input with steps is not constant for node {}'.format(node_name) + else: + steps = np.ones([axes.size]) + + ss_begin_mask = np.zeros(len(input_shape), dtype=np.int64) + ss_end_mask = np.zeros(len(input_shape), dtype=np.int64) + ss_step = np.ones(len(input_shape), dtype=np.int64) + + for i, axis in enumerate(axes): ss_begin_mask[axis] = 1 - ss_begin[axis] = starts[i] - - ss_end_mask[axis] = 1 - ss_end[axis] = ends[i] - - ss_step[axis] = steps[i] - - slice_node_name = node.soft_get('name', node.id) - - begin_node = Const(graph, {'value': ss_begin, 'name': slice_node_name + '/begin'}).create_node() - end_node = Const(graph, {'value': ss_end, 'name': slice_node_name + '/end'}).create_node() - strides_node = Const(graph, {'value': ss_step, 'name': slice_node_name + '/stride'}).create_node() - - ss = StridedSlice(graph, dict(new_axis_mask=np.zeros(len(output_shape), dtype=np.int32), - shrink_axis_mask=np.zeros(len(output_shape), dtype=np.int32), - ellipsis_mask=np.zeros(len(output_shape), dtype=np.int32), - begin_mask=ss_begin_mask, - end_mask=ss_end_mask)).create_node() - rename_nodes([(node, slice_node_name + '_delete'), (ss, slice_node_name)]) - node.in_port(0).get_connection().set_destination(ss.in_port(0)) - begin_node.out_port(0).connect(ss.in_port(1)) - end_node.out_port(0).connect(ss.in_port(2)) - strides_node.out_port(0).connect(ss.in_port(3)) - node.out_port(0).get_connection().set_source(ss.out_port(0)) + ss_end_mask[axis] = 1 + ss_step[axis] = steps[i] + + ss_strides = Const(graph, dict(name=node_name + '/Strides', value=ss_step)).create_node() + + ss = StridedSlice(graph, dict(name='ss', new_axis_mask=np.zeros(len(input_shape), dtype=np.int64), + shrink_axis_mask=np.zeros(len(input_shape), dtype=np.int64), + ellipsis_mask=np.zeros(len(input_shape), dtype=np.int64), + begin_mask=ss_begin_mask, + end_mask=ss_end_mask)).create_node() + + node.in_port(0).get_connection().set_destination(ss.in_port(0)) + ss.in_port(1).connect(ss_begin.out_port(0)) + ss.in_port(2).connect(ss_end.out_port(0)) + ss.in_port(3).connect(ss_strides.out_port(0)) + node.out_port(0).get_connection().set_source(ss.out_port(0)) + + rename_nodes([(node, node_name + '/ShouldBeDeleted'), (ss, node_name)]) diff --git a/model-optimizer/extensions/middle/SliceConverter_test.py b/model-optimizer/extensions/middle/SliceConverter_test.py index 92b118d0605b20..63380c28292248 100644 --- a/model-optimizer/extensions/middle/SliceConverter_test.py +++ b/model-optimizer/extensions/middle/SliceConverter_test.py @@ -20,304 +20,377 @@ from extensions.middle.SliceConverter import ConvertSlice from mo.front.common.partial_infer.utils import int64_array -from mo.graph.graph import Node -from mo.ops.slice import Slice from mo.utils.ir_engine.compare_graphs import compare_graphs -from mo.utils.unittest.graph import build_graph +from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, \ + regular_op_with_empty_data, result, connect, connect_data nodes_attributes = { - # input data - 'placeholder_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'placeholder_3': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Slice layer - 'slice': {'type': 'Slice', 'kind': 'op', 'op': 'Slice', 'name': 'slice_node'}, - 'slice_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Output operation - 'output_op': {'type': 'Const', 'value': None, 'kind': 'op', 'op': 'Const'}, - 'output_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'op_output': { 'kind': 'op', 'op': 'Result'}, - # StridedSlice layer - 'strided_slice': {'kind': 'op', 'op': 'StridedSlice', 'slices': None, 'shrink_axis_mask': None} + **regular_op_with_shaped_data('input', [2, 3, 300, 300], {'type': 'Parameter', 'op': 'Parameter'}), + **regular_op_with_empty_data('starts', {'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('ends', {'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('axes', {'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('steps', {'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('slice', {'op': 'Slice', 'type': None}), + + **regular_op_with_empty_data('ss_begin_cast', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}), + **regular_op_with_empty_data('ss_begin_clamp', {'op': 'Clamp', 'type': None}), + **regular_op_with_empty_data('ss_begin_clamp_min', {'value': np.iinfo(np.int32).min, 'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('ss_begin_clamp_max', {'value': np.iinfo(np.int32).max, 'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('ss_begin_gather_0', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_begin_gather_0_idx', int64_array([0])), + **regular_op_with_shaped_data('ss_begin_gather_0_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_begin_gather_1', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_begin_gather_1_idx', int64_array([1])), + **regular_op_with_shaped_data('ss_begin_gather_1_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_begin_gather_2', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_begin_gather_2_idx', int64_array([2])), + **regular_op_with_shaped_data('ss_begin_gather_2_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_begin_gather_3', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_begin_gather_3_idx', int64_array([3])), + **regular_op_with_shaped_data('ss_begin_gather_3_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_begin_const_0', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_begin_const_1', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_begin_const_2', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_begin_const_3', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_begin_concat', {'op': 'Concat', 'type': 'Concat'}), + + **regular_op_with_empty_data('ss_end_cast', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}), + **regular_op_with_empty_data('ss_end_clamp', {'op': 'Clamp', 'type': None}), + **regular_op_with_empty_data('ss_end_clamp_min', {'value': np.iinfo(np.int32).min, 'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('ss_end_clamp_max', {'value': np.iinfo(np.int32).max, 'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('ss_end_gather_0', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_end_gather_0_idx', int64_array([0])), + **regular_op_with_shaped_data('ss_end_gather_0_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_end_gather_1', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_end_gather_1_idx', int64_array([1])), + **regular_op_with_shaped_data('ss_end_gather_1_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_end_gather_2', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_end_gather_2_idx', int64_array([2])), + **regular_op_with_shaped_data('ss_end_gather_2_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_end_gather_3', {'op': 'Gather', 'type': 'Gather'}), + **valued_const_with_data('ss_end_gather_3_idx', int64_array([3])), + **regular_op_with_shaped_data('ss_end_gather_3_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), + **regular_op_with_empty_data('ss_end_const_0', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_end_const_1', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_end_const_2', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_end_const_3', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), + **regular_op_with_empty_data('ss_end_concat', {'op': 'Concat', 'type': 'Concat'}), + + **regular_op_with_empty_data('ss_strides', {'op': 'Const', 'type': 'Const'}), + **regular_op_with_empty_data('ss', {'op': 'StridedSlice', 'type': 'StridedSlice', + 'new_axis_mask': np.zeros(4, dtype=np.int64), + 'shrink_axis_mask': np.zeros(4, dtype=np.int64), + 'ellipsis_mask': np.zeros(4, dtype=np.int64)}), + **result('result') } +pattern_graph = [ + *connect('input:0', '0:slice'), + *connect('starts:0', '1:slice'), + *connect('ends:0', '2:slice'), + *connect('axes:0', '3:slice'), + *connect('steps:0', '4:slice'), + *connect('slice:0', '0:result') +] + +pattern_ref_graph = [ + *connect('input:0', '0:ss'), + *connect('starts:0', '0:ss_begin_clamp'), + *connect('ss_begin_clamp:0', '0:ss_begin_cast'), + *connect('ss_begin_clamp_min:0', '1:ss_begin_clamp'), + *connect('ss_begin_clamp_max:0', '2:ss_begin_clamp'), + *connect('ss_begin_concat:0', '1:ss'), + *connect('ends:0', '0:ss_end_clamp'), + *connect('ss_end_clamp:0', '0:ss_end_cast'), + *connect('ss_end_clamp_min:0', '1:ss_end_clamp'), + *connect('ss_end_clamp_max:0', '2:ss_end_clamp'), + *connect('ss_end_concat:0', '2:ss'), + *connect('ss_strides:0', '3:ss'), + *connect('ss:0', '0:result'), + + *connect('ss_begin_gather_0_idx:0', '1:ss_begin_gather_0'), + *connect('ss_begin_gather_0_axis:0', '2:ss_begin_gather_0'), + *connect('ss_begin_gather_1_idx:0', '1:ss_begin_gather_1'), + *connect('ss_begin_gather_1_axis:0', '2:ss_begin_gather_1'), + *connect('ss_begin_gather_2_idx:0', '1:ss_begin_gather_2'), + *connect('ss_begin_gather_2_axis:0', '2:ss_begin_gather_2'), + *connect('ss_begin_gather_3_idx:0', '1:ss_begin_gather_3'), + *connect('ss_begin_gather_3_axis:0', '2:ss_begin_gather_3'), + + *connect('ss_end_gather_0_idx:0', '1:ss_end_gather_0'), + *connect('ss_end_gather_0_axis:0', '2:ss_end_gather_0'), + *connect('ss_end_gather_1_idx:0', '1:ss_end_gather_1'), + *connect('ss_end_gather_1_axis:0', '2:ss_end_gather_1'), + *connect('ss_end_gather_2_idx:0', '1:ss_end_gather_2'), + *connect('ss_end_gather_2_axis:0', '2:ss_end_gather_2'), + *connect('ss_end_gather_3_idx:0', '1:ss_end_gather_3'), + *connect('ss_end_gather_3_axis:0', '2:ss_end_gather_3'), +] + class ConvertSliceTests(unittest.TestCase): - nodes_attributes = { - # input data - 'placeholder_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Slice layer inputs - 'starts': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'starts_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'ends': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'ends_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'strides': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'strides_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'axes': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'axes_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'steps': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'steps_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Slice layer - 'slice': {'type': 'Slice', 'kind': 'op', 'op': 'Slice', 'name': 'slice_node'}, - 'slice_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Output operation - 'output_op': {'type': 'Const', 'kind': 'op', 'op': 'Const'}, - 'output_data': {'shape': None, 'kind': 'data', 'data_type': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - # StridedSlice layer - 'strided_slice': {'kind': 'op', 'op': 'StridedSlice', 'slices': None, 'shrink_axis_mask': None} - } - - def test_slice_all_params(self): - input_shape = int64_array([5, 10, 20]) - starts_value = int64_array([4, 2]) - ends_value = int64_array([15, 8]) - axes_value = int64_array([2, 1]) - steps_value = int64_array([1, 1]) - - masks_value = np.zeros([len(input_shape)], dtype=np.int64) - graph = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'slice', {'in': 2}), - ('axes', 'axes_data'), - ('axes_data', 'slice', {'in': 3}), - ('steps', 'steps_data'), - ('steps_data', 'slice', {'in': 4}), - ('slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'starts': {'shape': starts_value.shape, 'value': starts_value}, - 'starts_data': {'shape': starts_value.shape, 'value': starts_value}, - 'ends': {'shape': ends_value.shape, 'value': ends_value}, - 'ends_data': {'shape': ends_value.shape, 'value': ends_value}, - 'steps': {'shape': steps_value.shape, 'value': steps_value}, - 'steps_data': {'shape': steps_value.shape, 'value': steps_value}, - 'axes': {'shape': axes_value.shape, 'value': axes_value}, - 'axes_data': {'shape': axes_value.shape, 'value': axes_value}, - }, nodes_with_edges_only=True - ) - slice_node = Node(graph, 'slice') - Slice.infer(slice_node) - - pattern = ConvertSlice() - pattern.find_and_replace_pattern(graph) - - ss_node = Node(graph, graph.get_node_id_by_name('slice_node')) - assert ss_node.type == 'StridedSlice', 'Something wrong with transformed Slice node' - - graph_ref = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'strided_slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'strided_slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'strided_slice', {'in': 2}), - ('strides', 'strides_data'), - ('strides_data', 'strided_slice', {'in': 3}), - ('strided_slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'strided_slice': {'new_axis_mask': masks_value, 'shrink_axis_mask': masks_value, - 'ellipsis_mask': masks_value, 'begin_mask': int64_array([0, 1, 1]), - 'end_mask': int64_array([0, 1, 1])}, - 'slice_data': {'shape': int64_array([5, 6, 11])} - }, nodes_with_edges_only=True - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True) + + def test_convert_slice_to_strided_slice_one_axis(self): + graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_graph, + update_attributes={ + 'starts': {'value': int64_array([0]), 'shape': [1]}, + 'ends': {'value': int64_array([1]), 'shape': [1]}, + 'axes': {'value': int64_array([0]), 'shape': [1]}, + 'axes_d': {'value': int64_array([0]), 'shape': [1]}, + 'steps': {'value': int64_array([1]), 'shape': [1]}, + 'steps_d': {'value': int64_array([1]), 'shape': [1]} + }, + nodes_with_edges_only=True + ) + + ref_graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_ref_graph + [ + *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), + *connect('ss_begin_gather_0:0', '0:ss_begin_concat'), + *connect('ss_begin_const_1:0', '1:ss_begin_concat'), + *connect('ss_begin_const_2:0', '2:ss_begin_concat'), + *connect('ss_begin_const_3:0', '3:ss_begin_concat'), + + *connect('ss_end_cast:0', '0:ss_end_gather_0'), + *connect('ss_end_gather_0:0', '0:ss_end_concat'), + *connect('ss_end_const_1:0', '1:ss_end_concat'), + *connect('ss_end_const_2:0', '2:ss_end_concat'), + *connect('ss_end_const_3:0', '3:ss_end_concat'), + ], + update_attributes={ + 'starts': {'value': int64_array([0]), 'shape': [1]}, + 'ends': {'value': int64_array([1]), 'shape': [1]}, + 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, + 'ss': {'begin_mask': int64_array([1, 0, 0, 0]), 'end_mask': int64_array([1, 0, 0, 0])} + } + ) + ConvertSlice().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) self.assertTrue(flag, resp) - def test_no_steps_no_axes(self): - input_shape = int64_array([5, 10, 20]) - starts_value = int64_array([3, 2, 7]) - ends_value = int64_array([5, 8, 15]) - steps_value = int64_array([1, 1, 1]) - masks_value = np.zeros([len(input_shape)], dtype=np.int64) - graph = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'slice', {'in': 2}), - ('slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'starts': {'shape': starts_value.shape, 'value': starts_value}, - 'starts_data': {'shape': starts_value.shape, 'value': starts_value}, - 'ends': {'shape': ends_value.shape, 'value': ends_value}, - 'ends_data': {'shape': ends_value.shape, 'value': ends_value}, - }, nodes_with_edges_only=True - ) - slice_node = Node(graph, 'slice') - Slice.infer(slice_node) - - pattern = ConvertSlice() - pattern.find_and_replace_pattern(graph) - - ss_node = Node(graph, graph.get_node_id_by_name('slice_node')) - assert ss_node.type == 'StridedSlice', 'Something wrong with transformed Slice node' - - graph_ref = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'strided_slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'strided_slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'strided_slice', {'in': 2}), - ('strides', 'strides_data'), - ('strides_data', 'strided_slice', {'in': 3}), - ('strided_slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'strided_slice': {'new_axis_mask': masks_value, 'shrink_axis_mask': masks_value, - 'ellipsis_mask': masks_value, 'begin_mask': np.ones([3]), - 'end_mask': np.ones([3])}, - 'slice_data': {'shape': int64_array([2, 6, 8])} - }, nodes_with_edges_only=True - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True) + def test_convert_slice_to_strided_slice_one_axis_steps_is_2(self): + graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_graph, + update_attributes={ + 'starts': {'value': int64_array([0]), 'shape': [1]}, + 'ends': {'value': int64_array([150]), 'shape': [1]}, + 'axes': {'value': int64_array([2]), 'shape': [1]}, + 'axes_d': {'value': int64_array([2]), 'shape': [1]}, + 'steps': {'value': int64_array([2]), 'shape': [1]}, + 'steps_d': {'value': int64_array([2]), 'shape': [1]} + }, + nodes_with_edges_only=True + ) + + ref_graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_ref_graph + [ + *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), + *connect('ss_begin_gather_0:0', '2:ss_begin_concat'), + *connect('ss_begin_const_0:0', '0:ss_begin_concat'), + *connect('ss_begin_const_1:0', '1:ss_begin_concat'), + *connect('ss_begin_const_3:0', '3:ss_begin_concat'), + + *connect('ss_end_cast:0', '0:ss_end_gather_0'), + *connect('ss_end_gather_0:0', '2:ss_end_concat'), + *connect('ss_end_const_0:0', '0:ss_end_concat'), + *connect('ss_end_const_1:0', '1:ss_end_concat'), + *connect('ss_end_const_3:0', '3:ss_end_concat'), + ], + update_attributes={ + 'starts': {'value': int64_array([0]), 'shape': [1]}, + 'ends': {'value': int64_array([150]), 'shape': [1]}, + 'ss_strides': {'value': int64_array([1, 1, 2, 1]), 'shape': [4]}, + 'ss': {'begin_mask': int64_array([0, 0, 1, 0]), 'end_mask': int64_array([0, 0, 1, 0])} + } + ) + ConvertSlice().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) self.assertTrue(flag, resp) - def test_no_axes(self): - input_shape = int64_array([5, 10, 20]) - starts_value = int64_array([3, 2, 7]) - ends_value = int64_array([5, 8, 15]) - steps_value = int64_array([2, 3, 1]) - masks_value = np.zeros([len(input_shape)], dtype=np.int64) - graph = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'slice', {'in': 2}), - ('steps', 'steps_data'), - ('steps_data', 'slice', {'in': 4}), - ('slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'starts': {'shape': starts_value.shape, 'value': starts_value}, - 'starts_data': {'shape': starts_value.shape, 'value': starts_value}, - 'ends': {'shape': ends_value.shape, 'value': ends_value}, - 'ends_data': {'shape': ends_value.shape, 'value': ends_value}, - 'steps': {'shape': steps_value.shape, 'value': steps_value}, - 'steps_data': {'shape': steps_value.shape, 'value': steps_value}, - }, nodes_with_edges_only=True - ) - slice_node = Node(graph, 'slice') - Slice.infer(slice_node) - - pattern = ConvertSlice() - pattern.find_and_replace_pattern(graph) - - ss_node = Node(graph, graph.get_node_id_by_name('slice_node')) - assert ss_node.type == 'StridedSlice', 'Something wrong with transformed Slice node' - - graph_ref = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'strided_slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'strided_slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'strided_slice', {'in': 2}), - ('strides', 'strides_data'), - ('strides_data', 'strided_slice', {'in': 3}), - ('strided_slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'strided_slice': {'new_axis_mask': masks_value, 'shrink_axis_mask': masks_value, - 'ellipsis_mask': masks_value, 'begin_mask': np.ones([3]), - 'end_mask': np.ones([3])}, - 'slice_data': {'shape': int64_array([1, 2, 8])} - }, nodes_with_edges_only=True - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True) + def test_convert_slice_to_strided_slice_two_axes(self): + graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_graph, + update_attributes={ + 'starts': {'value': int64_array([0, 0]), 'shape': [2]}, + 'ends': {'value': int64_array([150, 150]), 'shape': [2]}, + 'axes': {'value': int64_array([2, 3]), 'shape': [2]}, + 'axes_d': {'value': int64_array([2, 3]), 'shape': [2]}, + 'steps': {'value': int64_array([1, 1]), 'shape': [2]}, + 'steps_d': {'value': int64_array([1, 1]), 'shape': [2]} + }, + nodes_with_edges_only=True + ) + + ref_graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_ref_graph + [ + *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), + *connect('ss_begin_gather_0:0', '2:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), + *connect('ss_begin_gather_1:0', '3:ss_begin_concat'), + *connect('ss_begin_const_0:0', '0:ss_begin_concat'), + *connect('ss_begin_const_1:0', '1:ss_begin_concat'), + + *connect('ss_end_cast:0', '0:ss_end_gather_0'), + *connect('ss_end_gather_0:0', '2:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), + *connect('ss_end_gather_1:0', '3:ss_end_concat'), + *connect('ss_end_const_0:0', '0:ss_end_concat'), + *connect('ss_end_const_1:0', '1:ss_end_concat'), + ], + update_attributes={ + 'starts': {'value': int64_array([0, 0]), 'shape': [2]}, + 'ends': {'value': int64_array([150, 150]), 'shape': [2]}, + 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, + 'ss': {'begin_mask': int64_array([0, 0, 1, 1]), 'end_mask': int64_array([0, 0, 1, 1])} + } + ) + ConvertSlice().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) self.assertTrue(flag, resp) - def test_no_steps(self): - input_shape = int64_array([5, 10, 20]) - starts_value = int64_array([4, 2]) - ends_value = int64_array([15, 8]) - axes_value = int64_array([2, 1]) - masks_value = np.zeros([len(input_shape)], dtype=np.int64) - graph = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'slice', {'in': 2}), - ('axes', 'axes_data'), - ('axes_data', 'slice', {'in': 3}), - ('slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'starts': {'shape': starts_value.shape, 'value': starts_value}, - 'starts_data': {'shape': starts_value.shape, 'value': starts_value}, - 'ends': {'shape': ends_value.shape, 'value': ends_value}, - 'ends_data': {'shape': ends_value.shape, 'value': ends_value}, - 'axes': {'shape': axes_value.shape, 'value': axes_value}, - 'axes_data': {'shape': axes_value.shape, 'value': axes_value}, - }, nodes_with_edges_only=True - ) - slice_node = Node(graph, 'slice') - Slice.infer(slice_node) - - pattern = ConvertSlice() - pattern.find_and_replace_pattern(graph) - - ss_node = Node(graph, graph.get_node_id_by_name('slice_node')) - assert ss_node.type == 'StridedSlice', 'Something wrong with transformed Slice node' - - graph_ref = build_graph(self.nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'strided_slice', {'in': 0}), - ('starts', 'starts_data'), - ('starts_data', 'strided_slice', {'in': 1}), - ('ends', 'ends_data'), - ('ends_data', 'strided_slice', {'in': 2}), - ('strides', 'strides_data'), - ('strides_data', 'strided_slice', {'in': 3}), - ('strided_slice', 'slice_data'), - ('slice_data', 'output_op'), - ('output_op', 'output_data'), - ('output_data', 'op_output') - ], - {'placeholder_1_data': {'shape': input_shape}, - 'strided_slice': {'new_axis_mask': masks_value, 'shrink_axis_mask': masks_value, - 'ellipsis_mask': masks_value, 'begin_mask': int64_array([0, 1, 1]), - 'end_mask': int64_array([0, 1, 1])}, - 'slice_data': {'shape': int64_array([5, 6, 11])} - }, nodes_with_edges_only=True - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True) + def test_convert_slice_to_strided_slice_three_axes(self): + graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_graph, + update_attributes={ + 'starts': {'value': int64_array([0, 0, 0]), 'shape': [3]}, + 'ends': {'value': int64_array([2, 150, 150]), 'shape': [3]}, + 'axes': {'value': int64_array([1, 2, 3]), 'shape': [3]}, + 'axes_d': {'value': int64_array([1, 2, 3]), 'shape': [3]}, + 'steps': {'value': int64_array([1, 1, 1]), 'shape': [3]}, + 'steps_d': {'value': int64_array([1, 1, 1]), 'shape': [3]} + }, + nodes_with_edges_only=True + ) + + ref_graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_ref_graph + [ + *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), + *connect('ss_begin_gather_0:0', '1:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), + *connect('ss_begin_gather_1:0', '2:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_2'), + *connect('ss_begin_gather_2:0', '3:ss_begin_concat'), + *connect('ss_begin_const_0:0', '0:ss_begin_concat'), + + *connect('ss_end_cast:0', '0:ss_end_gather_0'), + *connect('ss_end_gather_0:0', '1:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), + *connect('ss_end_gather_1:0', '2:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_2'), + *connect('ss_end_gather_2:0', '3:ss_end_concat'), + *connect('ss_end_const_0:0', '0:ss_end_concat'), + ], + update_attributes={ + 'starts': {'value': int64_array([0, 0, 0]), 'shape': [3]}, + 'ends': {'value': int64_array([2, 150, 150]), 'shape': [3]}, + 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, + 'ss': {'begin_mask': int64_array([0, 1, 1, 1]), 'end_mask': int64_array([0, 1, 1, 1])} + } + ) + ConvertSlice().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) self.assertTrue(flag, resp) + + def test_convert_slice_to_strided_slice_not_sorted_axes(self): + graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_graph, + update_attributes={ + 'starts': {'value': int64_array([0, 1, 1, 0]), 'shape': [4]}, + 'ends': {'value': int64_array([1, 150, 150, 2]), 'shape': [4]}, + 'axes': {'value': int64_array([0, 2, 3, 1]), 'shape': [4]}, + 'axes_d': {'value': int64_array([0, 2, 3, 1]), 'shape': [4]}, + 'steps': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, + 'steps_d': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]} + }, + nodes_with_edges_only=True + ) + + ref_graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_ref_graph + [ + *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), + *connect('ss_begin_gather_0:0', '0:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), + *connect('ss_begin_gather_1:0', '2:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_2'), + *connect('ss_begin_gather_2:0', '3:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_3'), + *connect('ss_begin_gather_3:0', '1:ss_begin_concat'), + + *connect('ss_end_cast:0', '0:ss_end_gather_0'), + *connect('ss_end_gather_0:0', '0:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), + *connect('ss_end_gather_1:0', '2:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_2'), + *connect('ss_end_gather_2:0', '3:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_3'), + *connect('ss_end_gather_3:0', '1:ss_end_concat'), + ], + update_attributes={ + 'starts': {'value': int64_array([0, 1, 1, 0]), 'shape': [4]}, + 'ends': {'value': int64_array([1, 150, 150, 2]), 'shape': [4]}, + 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, + 'ss': {'begin_mask': int64_array([1, 1, 1, 1]), 'end_mask': int64_array([1, 1, 1, 1])} + } + ) + ConvertSlice().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) + self.assertTrue(flag, resp) + + def test_convert_slice_to_strided_slice_without_axes_and_steps(self): + graph = build_graph( + nodes_attrs=nodes_attributes, + edges=[ + *connect('input:0', '0:slice'), + *connect('starts:0', '1:slice'), + *connect('ends:0', '2:slice'), + *connect('slice:0', '0:result') + ], + update_attributes={ + 'starts': {'value': int64_array([0, 0, 0, 0]), 'shape': [4]}, + 'ends': {'value': int64_array([1, 2, 150, 150]), 'shape': [4]}, + }, + nodes_with_edges_only=True + ) + + ref_graph = build_graph( + nodes_attrs=nodes_attributes, + edges=pattern_ref_graph + [ + *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), + *connect('ss_begin_gather_0:0', '0:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), + *connect('ss_begin_gather_1:0', '1:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_2'), + *connect('ss_begin_gather_2:0', '2:ss_begin_concat'), + *connect_data('ss_begin_cast:0', '0:ss_begin_gather_3'), + *connect('ss_begin_gather_3:0', '3:ss_begin_concat'), + + *connect('ss_end_cast:0', '0:ss_end_gather_0'), + *connect('ss_end_gather_0:0', '0:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), + *connect('ss_end_gather_1:0', '1:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_2'), + *connect('ss_end_gather_2:0', '2:ss_end_concat'), + *connect_data('ss_end_cast:0', '0:ss_end_gather_3'), + *connect('ss_end_gather_3:0', '3:ss_end_concat'), + ], + update_attributes={ + 'starts': {'value': int64_array([0, 0, 0, 0]), 'shape': [4]}, + 'ends': {'value': int64_array([1, 2, 150, 150]), 'shape': [4]}, + 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, + 'ss': {'begin_mask': int64_array([1, 1, 1, 1]), 'end_mask': int64_array([1, 1, 1, 1])} + } + ) + ConvertSlice().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) + self.assertTrue(flag, resp) \ No newline at end of file diff --git a/model-optimizer/extensions/ops/MatMul.py b/model-optimizer/extensions/ops/MatMul.py index d96e47c144ff30..4b0f1b4d1cde7e 100644 --- a/model-optimizer/extensions/ops/MatMul.py +++ b/model-optimizer/extensions/ops/MatMul.py @@ -25,43 +25,7 @@ class MatMul(Op): """ - MatMul operation takes two tensors and performs usual matrix-matrix multiplication, matrix-vector multiplication - or vector-matrix multiplication depending on argument shapes. - - Input tensors can have any rank >= 1. - - Two right-most axes in each tensor are interpreted as matrix rows and columns dimensions while - all left-most axes (if present) are interpreted as multi-dimensional batch: - - [BATCH_DIM_1, BATCH_DIM_2,..., BATCH_DIM_K, ROW_INDEX_DIM, COL_INDEX_DIM] - - The operation supports usual broadcast semantics for batch dimensions. - It enables multiplication of batch of pairs of matrices in a single shot. - - Before matrix multiplication, there is an implicit shape alignment for input arguments. - It consists of the following steps: - - 1. If rank of an input less than 2 it is unsqueezed to 2D tensor by adding axes with size 1 to the left of the shape - For example, if input has shape [S] it will be reshaped to [1, S]. It is applied for each input independently - 2. Applied transpositions specified by optional transpose_a and transpose_b attributes - 3. If ranks of input arguments are different after steps 1 and 2, each is unsqueezed from the left side of - the shape by necessary number of axes to make both shapes of the same rank - 4. Usual rules of the broadcasting are applied for batch dimensions - - Two attributes, transpose_a and transpose_b specifies embedded transposition for two right-most dimension for the - first and the second input tensors correspondingly. It implies swapping of ROW_INDEX_DIM and COL_INDEX_DIM in - the corresponding input tensor. Batch dimensions are not affected by these attributes. - - Shape inference mechanism: - 0-port aligned input shape: - [BATCH_DIM_1, BATCH_DIM_2,..., BATCH_DIM_K, A_ROW_INDEX_DIM, A_COL_INDEX_DIM] - 1-port aligned input shape: - [BATCH_DIM_1, BATCH_DIM_2,..., BATCH_DIM_K, B_ROW_INDEX_DIM, B_COL_INDEX_DIM] - where A_COL_INDEX_DIM == B_ROW_INDEX_DIM - - Output shape: - [BATCH_DIM_1, BATCH_DIM_2,..., BATCH_DIM_K, A_ROW_INDEX_DIM, B_COL_INDEX_DIM] - + Operation is specified at docs/ops/matrix/MatMul_1.md """ op = 'MatMul' @@ -72,7 +36,7 @@ def __init__(self, graph: Graph, attrs: dict): 'version': 'opset1', 'transpose_a': False, 'transpose_b': False, - 'infer': __class__.infer, + 'infer': self.infer, 'in_ports_count': 2, 'out_ports_count': 1, } @@ -88,8 +52,6 @@ def supported_attrs(self): def shape_alignment(node: Node): """ Specification of MatMul operation allows inputs to be aligned together before matrix multiplication. - Alignment steps described in MatMul operation doc-string upper in current file. - Current method raises an error if input shapes are not valid at any step of alignment process :return: aligned copies of both input shapes """ @@ -107,12 +69,12 @@ def shape_alignment(node: Node): "".format(i, node_name, input_shape) assert input_shape.size >= 1, "MatMul doesn't support inputs with rank lower than 1. {} input of `{}` " \ "node has shape {}".format(i, node_name, input_shape) - + rank = input_shape.size # shape alignment - if input_shape.size < 2: - input_shape = np.insert(input_shape, 0, 1) - if (i == 0 and transpose_a) or (i == 1 and transpose_b): + if rank != 1 and ((i == 0 and transpose_a) or (i == 1 and transpose_b)): input_shape[-2], input_shape[-1] = input_shape[-1], input_shape[-2] + if rank == 1: + input_shape = np.insert(input_shape, int(i == 1), 1) max_shape_length = max(input_shapes[0].size, input_shapes[1].size) input_shape = np.insert(input_shape, 0, [1] * (max_shape_length - input_shape.size)) @@ -176,6 +138,14 @@ def infer(node: Node): "".format(name, [A_shape, B_shape]) output_shape = np.concatenate((A_shape[:-1], B_shape[-1:])) + + if node.in_port(0).data.get_shape().size == 1: + assert output_shape[-2] == 1 + output_shape = np.delete(output_shape, -2, 0) + if node.in_port(1).data.get_shape().size == 1: + assert output_shape[-1] == 1 + output_shape = np.delete(output_shape, -1, 0) + node.out_port(0).data.set_shape(output_shape) in_ch = 0 if not node.transpose_b else 1 @@ -187,13 +157,10 @@ def infer(node: Node): def transpose(value): num_of_dims = value.ndim if num_of_dims == 1: - return np.reshape(value, (len(value), 1)) - elif num_of_dims == 2: - return np.transpose(value, [1, 0]) + return value else: return np.transpose(value, [*range(0, num_of_dims - 2), num_of_dims - 1, num_of_dims - 2]) - # MatMul-like operation from frameworks class GemmONNX(Op): diff --git a/model-optimizer/extensions/ops/MatMul_test.py b/model-optimizer/extensions/ops/MatMul_test.py index 361038d47190b5..7b4625921c6c5e 100644 --- a/model-optimizer/extensions/ops/MatMul_test.py +++ b/model-optimizer/extensions/ops/MatMul_test.py @@ -46,10 +46,14 @@ class TestMatMul(unittest.TestCase): ] @generate(*[ - ([1024], [1024, 1000], [1, 1000], False, False), + ([1024], [1024, 1000], [1000], False, False), + ([1024], [1024, 1000], [1000], True, False), + ([1024], [1000, 1024], [1000], True, True), ([1, 1024], [1024, 1000], [1, 1000], False, False), ([1, 1024], [1000, 1024], [1, 1000], False, True), - ([1024], [1024, 1000], [1, 1000], False, False), + ([1024, 1000], [1000], [1024], False, False), + ([1024, 1000], [1000], [1024], False, True), + ([1000, 1024], [1000], [1024], True, True), ([10, 1024], [1024, 1000], [10, 1000], False, False), ([5, 10, 1024], [1024, 1000], [5, 10, 1000], False, False), ([5, 10, 1024], [5, 1024, 1000], [5, 10, 1000], False, False), @@ -67,8 +71,8 @@ def test_positive_matmul_infer(self, A_shape, B_shape, C_shape, transpose_a, tra node = Node(graph, 'mat_mul') MatMul.infer(node) - msg = "MatMul infer failed for case: A_shape={}, B_shape={}, transpose_a={}, transpose_b={}" \ - "expexted_shape={}, actual_shape={}" + msg = "MatMul infer failed for case: A_shape={}, B_shape={}, transpose_a={}, transpose_b={} " \ + "expected_shape={}, actual_shape={}" self.assertTrue(np.array_equal(graph.node['mat_mul_d']['shape'], int64_array(C_shape)), msg.format(A_shape, B_shape, transpose_a, transpose_b, C_shape, diff --git a/model-optimizer/extensions/ops/MatMul_value_propagation_test.py b/model-optimizer/extensions/ops/MatMul_value_propagation_test.py index e0a98b5fd02e6a..9d5a267dddeddd 100644 --- a/model-optimizer/extensions/ops/MatMul_value_propagation_test.py +++ b/model-optimizer/extensions/ops/MatMul_value_propagation_test.py @@ -73,6 +73,10 @@ class TestMatMulValuePropagation(unittest.TestCase): ([1, 3, 5, 8, 8], np.arange(1, 1 + 8 * 8 * 3 * 5).reshape((1, 3, 5, 8, 8)), [4, 8], np.arange(-2, -2 + 4 * 8).reshape((4, 8)), True, True), + + ([2], np.zeros((2)), [2], np.zeros((2)), False, False), + ([2], np.zeros((2)), [1, 2, 3], np.zeros((1, 2, 3)), False, False), + ([1, 2, 3], np.zeros((1, 2, 3)), [3], np.zeros((3)), False, False), ]) def test_value_propagation(self, a_shape, a_value, b_shape, b_value, transpose_a, transpose_b): graph = build_graph( diff --git a/model-optimizer/install_prerequisites/install_prerequisites.sh b/model-optimizer/install_prerequisites/install_prerequisites.sh index 3d5d79213b2500..345dbe01cda489 100755 --- a/model-optimizer/install_prerequisites/install_prerequisites.sh +++ b/model-optimizer/install_prerequisites/install_prerequisites.sh @@ -55,7 +55,9 @@ elif [[ -f /etc/lsb-release ]]; then fi if [[ $DISTRO == "centos" ]]; then - if command -v python3.7 >/dev/null 2>&1; then + if command -v python3.8 >/dev/null 2>&1; then + python_binary=python3.8 + elif command -v python3.7 >/dev/null 2>&1; then python_binary=python3.7 elif command -v python3.6 >/dev/null 2>&1; then python_binary=python3.6 @@ -65,17 +67,20 @@ if [[ $DISTRO == "centos" ]]; then if [ -z "$python_binary" ]; then sudo -E yum install -y https://centos7.iuscommunity.org/ius-release.rpm - #sudo -E yum install -y python36u easy_install python36u-pip sudo -E yum install -y python36u python36u-pip sudo -E pip3.6 install virtualenv python_binary=python3.6 fi + # latest pip is needed to install tensorflow + sudo -E "$python_binary" -m pip install --upgrade pip elif [[ $DISTRO == "ubuntu" ]]; then sudo -E apt update sudo -E apt -y --no-install-recommends install python3-pip python3-venv python_binary=python3 + sudo -E "$python_binary" -m pip install --upgrade pip elif [[ "$OSTYPE" == "darwin"* ]]; then python_binary=python3 + python3 -m pip install --upgrade pip fi diff --git a/model-optimizer/mo/graph/perm_inputs.py b/model-optimizer/mo/graph/perm_inputs.py index 41d331006bd26d..87ab580cc4b033 100644 --- a/model-optimizer/mo/graph/perm_inputs.py +++ b/model-optimizer/mo/graph/perm_inputs.py @@ -169,6 +169,7 @@ def transpose(op_node: Node, port_info: str, input_port: int): transpose = create_op_with_const_inputs( graph, Transpose, {1: permutation.perm}, {'name': transpose_name, 'override_output_shape': True}) op_node.in_port(input_port).get_connection().insert_node(transpose) + transpose.infer(transpose) def transpose_nchw_to_nhwc(op_node: Node, port_info: str, input_port: int): @@ -186,6 +187,7 @@ def transpose_nchw_to_nhwc(op_node: Node, port_info: str, input_port: int): transpose = create_op_with_const_inputs( graph, Transpose, {1: perm}, {'name': transpose_name, 'override_output_shape': True}) op_node.in_port(input_port).get_connection().insert_node(transpose) + transpose.infer(transpose) class PermuteInputs: diff --git a/model-optimizer/mo/utils/broadcasting.py b/model-optimizer/mo/utils/broadcasting.py index c1d251a98c7356..d900514f033e0b 100644 --- a/model-optimizer/mo/utils/broadcasting.py +++ b/model-optimizer/mo/utils/broadcasting.py @@ -134,7 +134,10 @@ def explicit_broadcasting(input_value: np.array, target_shape: np.array, axes_ma :return: broadcasted value """ res_shape, normalized_axes_mapping = explicit_shape_broadcasting(input_value.shape, target_shape, axes_mapping) + #TODO: Function 'expand_dims' should be replaced with 'numpy.expand_dims' if numpy version will be >=18.x in requirements. expand_dim_axis = set(np.arange(len(target_shape))) - set(normalized_axes_mapping) - - input_expanded = np.expand_dims(input_value.copy(), axis=list(expand_dim_axis)) + input_expanded = input_value.copy() + + for axis in sorted(list(expand_dim_axis)): + input_expanded = np.expand_dims(input_expanded, axis) return np.broadcast_to(input_expanded, res_shape) diff --git a/ngraph/changes.md b/ngraph/changes.md index bd5d3c281eff6d..e3e7dc43922c82 100644 --- a/ngraph/changes.md +++ b/ngraph/changes.md @@ -103,9 +103,9 @@ methods have been decorated with deprecated warnings which may be enabled by set To update, remove the passed argument. For example, ```C++ // Old -make_shared(make_shared(element::f32, Shape{2, 4})); +make_shared(make_shared(element::Type_t::f32, Shape{2, 4})); // New (remove TensorViewType) -make_shared(element::f32, Shape{2, 4}); +make_shared(element::Type_t::f32, Shape{2, 4}); // Old make_shared(results, result_type, parameters); diff --git a/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp b/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp index a4569cb1bf826f..c6b78ea4a93890 100644 --- a/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp +++ b/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp @@ -169,7 +169,7 @@ namespace ngraph std::size_t start_match_axis) { auto shape_const = - op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape); + op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape); return std::make_shared( value, shape_const, diff --git a/ngraph/core/builder/src/builder/autobroadcast.cpp b/ngraph/core/builder/src/builder/autobroadcast.cpp index 9ac059f47c761f..129c5403357fc6 100644 --- a/ngraph/core/builder/src/builder/autobroadcast.cpp +++ b/ngraph/core/builder/src/builder/autobroadcast.cpp @@ -177,8 +177,8 @@ namespace ngraph if (!broadcast_axes.empty()) { - auto shape_const = - op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape); + auto shape_const = op::Constant::create( + element::Type_t::u64, Shape{output_shape.size()}, output_shape); broadcasted_node = make_shared( broadcasted_node, shape_const, @@ -236,8 +236,8 @@ namespace ngraph trimmed_value = builder::opset1::reshape(value, trimmed_value_shape); } - auto shape_const = - op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape); + auto shape_const = op::Constant::create( + element::Type_t::u64, Shape{output_shape.size()}, output_shape); auto value_bcast = make_shared( trimmed_value, shape_const, opset1::get_axes_mapping_output(output_shape, axes)); @@ -354,7 +354,8 @@ namespace ngraph iota(begin(axes) + start_match_axis, end(axes), start_match_axis + input_shape.size()); auto axes_mapping = opset1::get_axes_mapping(output_shape, axes); - return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); + return op::Constant::create( + element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping); } namespace opset1 @@ -434,14 +435,15 @@ namespace ngraph vector mapping(input_shape.size()); iota(begin(mapping), end(mapping), start_match_axis); - return op::Constant::create(element::i64, Shape{mapping.size()}, mapping); + return op::Constant::create(element::Type_t::i64, Shape{mapping.size()}, mapping); } Output get_axes_mapping_output(const Shape& output_shape, const AxisSet& broadcast_axes) { vector axes_mapping{get_axes_mapping(output_shape, broadcast_axes)}; - return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); + return op::Constant::create( + element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping); } Output make_broadcast(const Output& node, @@ -450,7 +452,8 @@ namespace ngraph { return make_shared( node, - op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + op::Constant::create( + element::Type_t::i64, Shape{target_shape.size()}, target_shape), get_axes_mapping_output(target_shape, broadcast_axes)); } @@ -460,7 +463,8 @@ namespace ngraph { return make_shared( node, - op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + op::Constant::create( + element::Type_t::i64, Shape{target_shape.size()}, target_shape), get_axes_mapping_output(target_shape, node.get_shape(), start_match_axis)); } diff --git a/ngraph/core/builder/src/builder/reduce_ops.cpp b/ngraph/core/builder/src/builder/reduce_ops.cpp index ede1e90bce0d3c..305171c2baf2bf 100644 --- a/ngraph/core/builder/src/builder/reduce_ops.cpp +++ b/ngraph/core/builder/src/builder/reduce_ops.cpp @@ -49,10 +49,10 @@ namespace ngraph const auto dim_values = std::make_shared( value_shape, reduction_axes, - ngraph::opset1::Constant::create(element::i64, {}, {0})); + ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0})); return std::make_shared( - dim_values, ngraph::opset1::Constant::create(element::i64, {}, {0})); + dim_values, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0})); } std::shared_ptr builder::opset1::mean(const Output& value, @@ -62,7 +62,7 @@ namespace ngraph std::shared_ptr elems_number; const auto value_elem_type = value.get_element_type(); const auto reduction_axes_const = ngraph::opset1::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()); + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()); const auto value_elems_sum = std::make_shared(value, reduction_axes_const, keep_dims); if (value.get_partial_shape().is_static()) @@ -109,7 +109,7 @@ namespace ngraph diff = std::make_shared( std::make_shared(diff, diff), ngraph::opset1::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()), + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()), false); const auto& et = value.get_element_type(); diff --git a/ngraph/core/builder/src/builder/reshape.cpp b/ngraph/core/builder/src/builder/reshape.cpp index cc52942cea5e33..fe5500ad9ec170 100644 --- a/ngraph/core/builder/src/builder/reshape.cpp +++ b/ngraph/core/builder/src/builder/reshape.cpp @@ -47,13 +47,13 @@ shared_ptr builder::opset1::reshape(const Output& value, const Shape auto value_rank = value.get_shape().size(); AxisVector axes_vector(value_rank); std::iota(axes_vector.begin(), axes_vector.end(), 0); - auto axes = op::Constant::create(element::i64, Shape{value_rank}, axes_vector); + auto axes = op::Constant::create(element::Type_t::i64, Shape{value_rank}, axes_vector); return std::make_shared(value, axes); } else { auto out_pattern = op::Constant::create( - element::i64, Shape{shape.size()}, vector(shape.begin(), shape.end())); + element::Type_t::i64, Shape{shape.size()}, vector(shape.begin(), shape.end())); return make_shared(value, out_pattern, false) ->add_provenance_group_members_above({value}); @@ -63,7 +63,7 @@ shared_ptr builder::opset1::reshape(const Output& value, const Shape shared_ptr builder::opset1::reorder_axes(const Output& value, vector axes_order) { const auto axes_order_const = - op::Constant::create(element::i64, + op::Constant::create(element::Type_t::i64, Shape{axes_order.size()}, vector(axes_order.begin(), axes_order.end())); return make_shared(value, axes_order_const) @@ -83,7 +83,7 @@ shared_ptr builder::opset1::transpose(const Output& value) const auto input_rank = std::make_shared(std::make_shared(value)); - const auto neg_one = ngraph::opset1::Constant::create(element::i64, Shape{}, {-1}); + const auto neg_one = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {-1}); const auto start_node = std::make_shared(input_rank, neg_one); const auto reverse_axes_order = std::make_shared(reshape(start_node, Shape{}), // start @@ -114,7 +114,7 @@ namespace ngraph get_normalized_axis_node(const std::shared_ptr node_rank, int64_t axis) { auto axis_node = - ngraph::opset1::Constant::create(element::i64, Shape{1}, {axis}); + ngraph::opset1::Constant::create(element::Type_t::i64, Shape{1}, {axis}); // shortcut for alredy positive value if (axis >= 0) { @@ -138,11 +138,11 @@ shared_ptr builder::opset1::flatten(const Output& value, int axis) shared_ptr output_shape; if (axis == 0) { - output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {1, -1}); + output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {1, -1}); } else if (axis == 1) { - output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {0, -1}); + output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {0, -1}); } else { @@ -152,15 +152,15 @@ shared_ptr builder::opset1::flatten(const Output& value, int axis) const auto first_part_dims = make_shared( value_shape, - ngraph::opset1::Constant::create(element::i64, {1}, {0}), + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}), axis_node, vector{}, vector{}); const auto first_part_dims_length = make_shared( - first_part_dims, ngraph::opset1::Constant::create(element::i64, {}, {0}), true); + first_part_dims, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}), true); const auto remaining_part_length = - ngraph::opset1::Constant::create(element::i64, {1}, {-1}); + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {-1}); output_shape = make_shared( OutputVector{first_part_dims_length, remaining_part_length}, 0); @@ -230,19 +230,21 @@ shared_ptr builder::opset1::collapse(const Output& value, const auto rank = make_shared(shape); // Split lengths used in VariadicSplit - const auto start_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {start_axis}); - const auto end_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {end_axis + 1}); + const auto start_axis_node = + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {start_axis}); + const auto end_axis_node = + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {end_axis + 1}); const auto collapsed_axis = make_shared(end_axis_node, start_axis_node); const auto post_axis = make_shared(rank, end_axis_node); const auto split_lengths = make_shared( OutputVector{start_axis_node, collapsed_axis, post_axis}, 0); - const auto split_axis = ngraph::opset1::Constant::create(element::i64, {}, {0}); + const auto split_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}); const auto split_node = make_shared(shape, split_axis, split_lengths); - const auto reduced_axis = ngraph::opset1::Constant::create(element::i64, {1}, {0}); + const auto reduced_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}); const auto collapsed_axis_size = make_shared(split_node->output(1), reduced_axis, true); diff --git a/ngraph/core/builder/src/builder/split.cpp b/ngraph/core/builder/src/builder/split.cpp index 7b254d3f0759b4..3e47f07a2e5d12 100644 --- a/ngraph/core/builder/src/builder/split.cpp +++ b/ngraph/core/builder/src/builder/split.cpp @@ -25,9 +25,9 @@ OutputVector builder::opset1::split(const Output& value, const std::vector& split_lengths, int64_t axis) { - const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis}); - const auto split_lengths_node = - ngraph::opset1::Constant::create(element::u64, Shape{split_lengths.size()}, split_lengths); + const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis}); + const auto split_lengths_node = ngraph::opset1::Constant::create( + element::Type_t::u64, Shape{split_lengths.size()}, split_lengths); const auto variadic_split = std::make_shared(value, axis_node, split_lengths_node); @@ -36,7 +36,7 @@ OutputVector builder::opset1::split(const Output& value, OutputVector builder::opset1::split(const Output& value, size_t num_splits, int64_t axis) { - const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis}); + const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis}); const auto split = std::make_shared(value, axis_node, num_splits); return split->outputs(); diff --git a/ngraph/core/include/ngraph/node.hpp b/ngraph/core/include/ngraph/node.hpp index b2b77731c2ee80..31220dd9b6084f 100644 --- a/ngraph/core/include/ngraph/node.hpp +++ b/ngraph/core/include/ngraph/node.hpp @@ -33,6 +33,7 @@ #include "ngraph/attribute_visitor.hpp" #include "ngraph/check.hpp" #include "ngraph/coordinate.hpp" +#include "ngraph/coordinate_diff.hpp" #include "ngraph/deprecated.hpp" #include "ngraph/descriptor/input.hpp" #include "ngraph/descriptor/output.hpp" diff --git a/ngraph/core/include/ngraph/op/bucketize.hpp b/ngraph/core/include/ngraph/op/bucketize.hpp index 1d9452aeb4d3f1..5449da11a7900d 100644 --- a/ngraph/core/include/ngraph/op/bucketize.hpp +++ b/ngraph/core/include/ngraph/op/bucketize.hpp @@ -40,7 +40,7 @@ namespace ngraph /// edge of interval. default true = includes right edge Bucketize(const Output& data, const Output& buckets, - const element::Type output_type = element::i64, + const element::Type output_type = element::Type_t::i64, const bool with_right_bound = true); virtual void validate_and_infer_types() override; diff --git a/ngraph/core/include/ngraph/op/constant.hpp b/ngraph/core/include/ngraph/op/constant.hpp index f5e97b71f03c57..22c90b3e383f4c 100644 --- a/ngraph/core/include/ngraph/op/constant.hpp +++ b/ngraph/core/include/ngraph/op/constant.hpp @@ -273,31 +273,31 @@ namespace ngraph } /// \brief Returns the value of the constant node as a Shape object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. Shape get_shape_val() const; /// \brief Returns the value of the constant node as a Strides /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. Strides get_strides_val() const; /// \brief Returns the value of the constant node as a Coordinate /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. Coordinate get_coordinate_val() const; /// \brief Returns the value of the constant node as a /// CoordinateDiff object - /// Can only be used on element::i64 nodes. + /// Can only be used on element::Type_t::i64 nodes. CoordinateDiff get_coordinate_diff_val() const; /// \brief Returns the value of the constant node as an AxisVector /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. AxisVector get_axis_vector_val() const; /// \brief Returns the value of the constant node as an AxisSet /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. /// Repeated values are allowed. AxisSet get_axis_set_val() const; diff --git a/ngraph/core/include/ngraph/op/fake_quantize.hpp b/ngraph/core/include/ngraph/op/fake_quantize.hpp index 081c9ab370cbe9..7b4c5a8b707850 100644 --- a/ngraph/core/include/ngraph/op/fake_quantize.hpp +++ b/ngraph/core/include/ngraph/op/fake_quantize.hpp @@ -85,7 +85,7 @@ namespace ngraph private: std::size_t m_levels; - AutoBroadcastSpec m_auto_broadcast; + AutoBroadcastSpec m_auto_broadcast = op::AutoBroadcastType::NUMPY; }; } using v0::FakeQuantize; diff --git a/ngraph/core/include/ngraph/op/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/lstm_sequence.hpp index 81cf782ac40768..fd7a946c10304a 100644 --- a/ngraph/core/include/ngraph/op/lstm_sequence.hpp +++ b/ngraph/core/include/ngraph/op/lstm_sequence.hpp @@ -117,7 +117,7 @@ namespace ngraph R, B, Constant::create( - element::f32, + element::Type_t::f32, Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL), 3UL * static_cast(hidden_size)}, std::vector{0.f}), diff --git a/ngraph/core/include/ngraph/op/max_pool.hpp b/ngraph/core/include/ngraph/op/max_pool.hpp index 51fd44ad2750b7..ebb624fd266f0c 100644 --- a/ngraph/core/include/ngraph/op/max_pool.hpp +++ b/ngraph/core/include/ngraph/op/max_pool.hpp @@ -49,24 +49,8 @@ namespace ngraph const Shape& pads_begin, const Shape& pads_end, const Shape& kernel, - op::RoundingType rounding_mode, - const PadType& auto_pad); - - /// \brief Constructs a batched max pooling operation. - /// - /// \param arg The node producing the input data batch tensor. - /// \param strides The strides. - /// \param pads_begin The beginning of padding shape. - /// \param pads_end The end of padding shape. - /// \param kernel The kernel shape. - /// \param rounding_mode Whether to use ceiling or floor rounding type while - /// computing output shape. - MaxPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - op::RoundingType rounding_mode); + op::RoundingType rounding_mode = op::RoundingType::FLOOR, + const PadType& auto_pad = op::PadType::EXPLICIT); bool visit_attributes(AttributeVisitor& visitor) override; size_t get_version() const override { return 1; } @@ -108,7 +92,7 @@ namespace ngraph Shape m_pads_begin; Shape m_pads_end; PadType m_auto_pad; - op::RoundingType m_rounding_type{op::RoundingType::FLOOR}; + op::RoundingType m_rounding_type; private: bool update_auto_padding(const PartialShape& in_shape, diff --git a/ngraph/core/include/ngraph/op/non_max_suppression.hpp b/ngraph/core/include/ngraph/op/non_max_suppression.hpp index b6a93610f62a3d..0154cf3733f355 100644 --- a/ngraph/core/include/ngraph/op/non_max_suppression.hpp +++ b/ngraph/core/include/ngraph/op/non_max_suppression.hpp @@ -125,14 +125,15 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values for the last /// 3 inputs @@ -143,11 +144,12 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -176,7 +178,7 @@ namespace ngraph protected: BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; + ngraph::element::Type m_output_type = ngraph::element::Type_t::i64; void validate(); int64_t max_boxes_output_from_input() const; }; @@ -205,14 +207,15 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values for the last /// 3 inputs @@ -223,11 +226,12 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); void validate_and_infer_types() override; @@ -261,11 +265,12 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values in the last. /// 3 inputs. @@ -278,12 +283,13 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values in the last. /// 2 inputs. @@ -297,13 +303,14 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default value in the last. /// input. @@ -318,14 +325,15 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation. /// @@ -340,15 +348,16 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const Output& soft_nms_sigma, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const Output& soft_nms_sigma, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -382,7 +391,7 @@ namespace ngraph protected: BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; + ngraph::element::Type m_output_type = ngraph::element::Type_t::i64; void validate(); }; } // namespace v5 diff --git a/ngraph/core/include/ngraph/op/non_zero.hpp b/ngraph/core/include/ngraph/op/non_zero.hpp index 9f7886c79c3cec..2f0053431f6502 100644 --- a/ngraph/core/include/ngraph/op/non_zero.hpp +++ b/ngraph/core/include/ngraph/op/non_zero.hpp @@ -74,7 +74,7 @@ namespace ngraph const HostTensorVector& inputs) const override; protected: - element::Type m_output_type = element::i64; + element::Type m_output_type = element::Type_t::i64; }; } using v3::NonZero; diff --git a/ngraph/core/include/ngraph/op/roi_pooling.hpp b/ngraph/core/include/ngraph/op/roi_pooling.hpp index e1d7073ea78950..0c45f2e4b7d54c 100644 --- a/ngraph/core/include/ngraph/op/roi_pooling.hpp +++ b/ngraph/core/include/ngraph/op/roi_pooling.hpp @@ -32,7 +32,7 @@ namespace ngraph ROIPooling() = default; /// \brief Constructs a ROIPooling operation /// - /// \param input Input feature map {N, C, ...} + /// \param input Input feature map {N, C, H, W} /// \param coords Coordinates of bounding boxes /// \param output_size Height/Width of ROI output features /// \param spatial_scale Ratio of input feature map over input image size @@ -41,7 +41,7 @@ namespace ngraph const Output& coords, const Shape& output_size, const float spatial_scale, - const std::string& method); + const std::string& method = "max"); void validate_and_infer_types() override; @@ -58,7 +58,10 @@ namespace ngraph float m_spatial_scale; std::string m_method; }; - } + + } // namespace v0 using v0::ROIPooling; - } -} + + } // namespace op + +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp index 1894704416007a..5df323666bee62 100644 --- a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp @@ -33,7 +33,8 @@ namespace ngraph const NodeTypeInfo& get_type_info() const override { return type_info; } ScatterNDUpdate() = default; /// \param inputs Tensor - /// \param indices Index tensor: Data type must be `element::i32` or `element::i64` + /// \param indices Index tensor: Data type must be `element::Type_t::i32` or + /// `element::Type_t::i64` /// \param updates Tensor: Must have same type as inputs ScatterNDUpdate(const Output& inputs, const Output& indices, diff --git a/ngraph/core/include/ngraph/op/shape_of.hpp b/ngraph/core/include/ngraph/op/shape_of.hpp index 38aa6d3b31ceb9..cc322eafb8db4c 100644 --- a/ngraph/core/include/ngraph/op/shape_of.hpp +++ b/ngraph/core/include/ngraph/op/shape_of.hpp @@ -32,7 +32,8 @@ namespace ngraph const NodeTypeInfo& get_type_info() const override { return type_info; } ShapeOf() = default; /// \brief Constructs a shape-of operation. - ShapeOf(const Output& arg, const element::Type output_type = element::i64); + ShapeOf(const Output& arg, + const element::Type output_type = element::Type_t::i64); bool visit_attributes(AttributeVisitor& visitor) override; virtual std::shared_ptr diff --git a/ngraph/core/include/ngraph/op/topk.hpp b/ngraph/core/include/ngraph/op/topk.hpp index 8a6b13da13de96..c35830b7e2553a 100644 --- a/ngraph/core/include/ngraph/op/topk.hpp +++ b/ngraph/core/include/ngraph/op/topk.hpp @@ -57,14 +57,14 @@ namespace ngraph const int64_t axis, const std::string& mode, const std::string& sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); TopK(const Output& data, const Output& k, const int64_t axis, const Mode mode, const SortType sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -104,7 +104,7 @@ namespace ngraph uint64_t m_normalized_axis; Mode m_mode; SortType m_sort; - element::Type m_index_element_type{element::i32}; + element::Type m_index_element_type{element::Type_t::i32}; virtual size_t read_k_from_constant_node(const std::shared_ptr& node, const element::Type& k_element_type) const; @@ -146,14 +146,14 @@ namespace ngraph const int64_t axis, const std::string& mode, const std::string& sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); TopK(const Output& data, const Output& k, const int64_t axis, const Mode mode, const SortType sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; virtual std::shared_ptr diff --git a/ngraph/core/include/ngraph/op/util/attr_types.hpp b/ngraph/core/include/ngraph/op/util/attr_types.hpp index 5456247bb836c8..48117bafe0dc7b 100644 --- a/ngraph/core/include/ngraph/op/util/attr_types.hpp +++ b/ngraph/core/include/ngraph/op/util/attr_types.hpp @@ -66,7 +66,9 @@ namespace ngraph /// Floor(num_dims/2) at the beginning and /// Ceil(num_dims/2) at the end /// VALID - No padding - /// + /// AUTO - Deprecated. User should not use it in the future + /// NOTSET - Deprecated. User should not use it in the future + enum class PadType { EXPLICIT = 0, diff --git a/ngraph/core/include/ngraph/pattern/op/branch.hpp b/ngraph/core/include/ngraph/pattern/op/branch.hpp index 4afcd128af8d07..d73f6baa0a7e18 100644 --- a/ngraph/core/include/ngraph/pattern/op/branch.hpp +++ b/ngraph/core/include/ngraph/pattern/op/branch.hpp @@ -44,7 +44,7 @@ namespace ngraph Branch() : Pattern(OutputVector{}) { - set_output_type(0, element::f32, Shape{}); + set_output_type(0, element::Type_t::f32, Shape{}); } void set_destination(const Output& destination) diff --git a/ngraph/core/include/ngraph/pattern/op/label.hpp b/ngraph/core/include/ngraph/pattern/op/label.hpp index e172f9702828fd..9ced55996a020a 100644 --- a/ngraph/core/include/ngraph/pattern/op/label.hpp +++ b/ngraph/core/include/ngraph/pattern/op/label.hpp @@ -47,7 +47,7 @@ namespace ngraph /// Example: /// \code{.cpp} /// auto add = a + b; // a and b are op::Parameter in this example - /// auto label = std::make_shared(element::f32, + /// auto label = std::make_shared(element::Type_t::f32, /// Shape{2,2}, /// nullptr, /// OutputVector{add}); @@ -61,7 +61,7 @@ namespace ngraph set_output_type(0, type, s); } - explicit Label(const element::Type& type = element::dynamic, + explicit Label(const element::Type& type = element::Type_t::dynamic, const PartialShape& s = PartialShape::dynamic()) : Label(type, s, [](const Output&) { return true; }, OutputVector()) { diff --git a/ngraph/core/include/ngraph/specialize_function.hpp b/ngraph/core/include/ngraph/specialize_function.hpp index 2270e132a8baed..820d6fc5a44c23 100644 --- a/ngraph/core/include/ngraph/specialize_function.hpp +++ b/ngraph/core/include/ngraph/specialize_function.hpp @@ -76,10 +76,12 @@ namespace ngraph /// because when we reconstruct the new x node, it will see that the shapes are inconsistent /// for elementwise add. /// - /// Specialization of element types is also possible: `element::dynamic` can be specialized + /// Specialization of element types is also possible: `element::Type_t::dynamic` can be + /// specialized /// to a concrete element type or left dynamic; but a concrete element type can only be - /// specialized to itself (e.g., specialization does not allow you to change `element::i32` - /// to `element::i64`). + /// specialized to itself (e.g., specialization does not allow you to change + /// `element::Type_t::i32` + /// to `element::Type_t::i64`). /// /// Finally, it is possible to specialize parameter values. If the ith element of /// `parameter_values` is not `nullptr`, and fully static element type and shape has been diff --git a/ngraph/core/include/ngraph/type/element_type.hpp b/ngraph/core/include/ngraph/type/element_type.hpp index 9bd5edbca84342..34ce17e48bea54 100644 --- a/ngraph/core/include/ngraph/type/element_type.hpp +++ b/ngraph/core/include/ngraph/type/element_type.hpp @@ -91,11 +91,12 @@ namespace ngraph size_t bitwidth() const; // The name of this type, the enum name of this type const std::string& get_type_name() const; + bool operator==(const Type_t& other) const; + bool operator!=(const Type_t& other) const { return !(*this == other); } bool operator==(const Type& other) const; bool operator!=(const Type& other) const { return !(*this == other); } bool operator<(const Type& other) const; friend NGRAPH_API std::ostream& operator<<(std::ostream&, const Type&); - static std::vector get_known_types(); /// \brief Checks whether this element type is merge-compatible with `t`. /// \param t The element type to compare this element type to. @@ -130,21 +131,50 @@ namespace ngraph typedef std::vector TypeVector; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::undefined instead.") extern NGRAPH_API const Type undefined; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::dynamic instead.") extern NGRAPH_API const Type dynamic; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::boolean instead.") extern NGRAPH_API const Type boolean; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::bf16 instead.") extern NGRAPH_API const Type bf16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::f16 instead.") extern NGRAPH_API const Type f16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::f32 instead.") extern NGRAPH_API const Type f32; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::f64 instead.") extern NGRAPH_API const Type f64; + NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::i8 instead.") extern NGRAPH_API const Type i8; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::i16 instead.") extern NGRAPH_API const Type i16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::i32 instead.") extern NGRAPH_API const Type i32; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::i64 instead.") extern NGRAPH_API const Type i64; + NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::u1 instead.") extern NGRAPH_API const Type u1; + NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::u8 instead.") extern NGRAPH_API const Type u8; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::u16 instead.") extern NGRAPH_API const Type u16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::u32 instead.") extern NGRAPH_API const Type u32; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::u64 instead.") extern NGRAPH_API const Type u64; template diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/gather.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/gather.hpp index 3dbfecf836bf7e..577f317b62bf5e 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/gather.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/gather.hpp @@ -51,13 +51,10 @@ namespace ngraph const Shape& out_shape, size_t axis) { - using namespace std; // prepare shape of params_prime (remove first "axis" dimensions) - Shape params_prime_shape(params_shape); - params_prime_shape.erase(params_prime_shape.begin(), - params_prime_shape.begin() + axis); + const Shape params_prime_shape(params_shape.begin() + axis, params_shape.end()); // prepare shape of indices_prime - size_t indices_ndim = static_cast(indices_shape.size()); + const size_t indices_ndim = indices_shape.size(); Shape indices_prime_shape; // prepare shape of out_prime (same as params_prime except for first dim) Shape out_prime_shape(params_prime_shape); @@ -73,8 +70,8 @@ namespace ngraph indices_prime_shape.emplace_back(1); // Create a CoordinateTransform for "out" that visits the outer "axis" dimensions - size_t out_ndim = static_cast(out_shape.size()); - Coordinate out_outer_start_corner(out_ndim, 0); + const size_t out_ndim = out_shape.size(); + const Coordinate out_outer_start_corner(out_ndim, 0); Coordinate out_outer_end_corner(out_shape); for (size_t i = axis; i < out_ndim; i++) { @@ -90,44 +87,43 @@ namespace ngraph out_outer_axis_order); // Create a CoordinateTransform for "params" that visits the outer "axis" dimensions - size_t params_ndim = static_cast(params_shape.size()); - Coordinate params_outer_start_corner(params_ndim, 0); + const size_t params_ndim = params_shape.size(); + const Coordinate params_outer_start_corner(params_ndim, 0); Coordinate params_outer_end_corner(params_shape); for (size_t i = axis; i < params_ndim; i++) { params_outer_end_corner[i] = 1; } - Strides params_outer_strides(params_ndim, 1); + const Strides params_outer_strides(params_ndim, 1); AxisVector params_outer_axis_order(params_ndim); std::iota(params_outer_axis_order.begin(), params_outer_axis_order.end(), 0); - CoordinateTransform params_outer_transform(params_shape, - params_outer_start_corner, - params_outer_end_corner, - params_outer_strides, - params_outer_axis_order); + const CoordinateTransform params_outer_transform(params_shape, + params_outer_start_corner, + params_outer_end_corner, + params_outer_strides, + params_outer_axis_order); // Create a CoordinateTransform for "indices" that visits only the first element // along inner most axis - Coordinate indices_outer_start_corner(indices_ndim, 0); + const Coordinate indices_outer_start_corner(indices_ndim, 0); Coordinate indices_outer_end_corner(indices_shape); if (indices_ndim > 0) { indices_outer_end_corner[indices_ndim - 1] = 1; } - Strides indices_outer_strides(indices_ndim, 1); + const Strides indices_outer_strides(indices_ndim, 1); AxisVector indices_outer_axis_order(indices_ndim); std::iota(indices_outer_axis_order.begin(), indices_outer_axis_order.end(), 0); - CoordinateTransform indices_outer_transform(indices_shape, - indices_outer_start_corner, - indices_outer_end_corner, - indices_outer_strides, - indices_outer_axis_order); + const CoordinateTransform indices_outer_transform(indices_shape, + indices_outer_start_corner, + indices_outer_end_corner, + indices_outer_strides, + indices_outer_axis_order); // Create an inner CoordinateTransfrom for "out" - size_t out_inner_ndim = out_ndim - axis; - Shape out_inner_shape(out_shape); - out_inner_shape.erase(out_inner_shape.begin(), out_inner_shape.begin() + axis); - Coordinate out_inner_start_corner(out_inner_ndim, 0); + const size_t out_inner_ndim = out_ndim - axis; + const Shape out_inner_shape(out_shape.begin() + axis, out_shape.end()); + const Coordinate out_inner_start_corner(out_inner_ndim, 0); Coordinate out_inner_end_corner(out_inner_shape); if (indices_ndim > 0) { @@ -137,14 +133,14 @@ namespace ngraph { out_inner_end_corner[i] = 1; } - Strides out_inner_strides(out_inner_ndim, 1); + const Strides out_inner_strides(out_inner_ndim, 1); AxisVector out_inner_axis_order(out_inner_ndim); std::iota(out_inner_axis_order.begin(), out_inner_axis_order.end(), 0); - CoordinateTransform out_inner_transform(out_inner_shape, - out_inner_start_corner, - out_inner_end_corner, - out_inner_strides, - out_inner_axis_order); + const CoordinateTransform out_inner_transform(out_inner_shape, + out_inner_start_corner, + out_inner_end_corner, + out_inner_strides, + out_inner_axis_order); auto out_outer_coord_iter = out_outer_transform.begin(); for (const Coordinate& params_outer_coord : params_outer_transform) @@ -169,11 +165,11 @@ namespace ngraph params_prime_shape, indices_prime_shape, out_prime_shape); - out_inner_coord_iter++; + ++out_inner_coord_iter; } - out_outer_coord_iter++; + ++out_outer_coord_iter; } } - } - } -} + } // namespace reference + } // namespace runtime +} // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/gather_nd.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/gather_nd.hpp index 7857aa06e72809..805c035c5a61b9 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/gather_nd.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/gather_nd.hpp @@ -16,6 +16,8 @@ #pragma once +#include +#include #include #include "ngraph/coordinate_transform.hpp" @@ -26,171 +28,169 @@ namespace ngraph { namespace reference { - // foreach leaf_vector_index in indices.shape[:-1] - // vector = indices[leaf_vector_index] - // out[leaf_vector_index:] = params[vector] - template - void gather_nd_batch(const T* params, - const U* indices, - T* out, - const Shape& params_shape, - const Shape& indices_shape, - const Shape& out_shape) + namespace { - using namespace std; - // Create a CoordinateTransform for "indices" that visits only the first element - // along inner most axis - size_t indices_ndim = static_cast(indices_shape.size()); - Coordinate indices_outer_start_corner(indices_ndim, 0); - Coordinate indices_outer_end_corner(indices_shape); - size_t slice_rank = indices_shape[indices_ndim - 1]; - indices_outer_end_corner[indices_ndim - 1] = 1; - Strides indices_strides(indices_ndim, 1); - AxisVector indices_axis_order(indices_ndim); - std::iota(indices_axis_order.begin(), indices_axis_order.end(), 0); - CoordinateTransform indices_outer_transform(indices_shape, - indices_outer_start_corner, - indices_outer_end_corner, - indices_strides, - indices_axis_order); - - // Create a matching CoordinateTransform for "out" that visits the same outer - // coordinates - size_t out_ndim = static_cast(out_shape.size()); - Coordinate out_start_corner(out_ndim, 0); - Coordinate out_end_corner(out_shape); - for (size_t i = indices_ndim - 1; i < out_ndim; i++) + template + using Required = typename std::enable_if::type; + + template + struct IsRandomAccessIt { - out_end_corner[i] = 1; - } - Strides out_strides(out_ndim, 1); - AxisVector out_axis_order(out_ndim); - std::iota(out_axis_order.begin(), out_axis_order.end(), 0); - CoordinateTransform out_transform( - out_shape, out_start_corner, out_end_corner, out_strides, out_axis_order); - size_t params_ndim = static_cast(params_shape.size()); - Strides params_strides(params_ndim, 1); - AxisVector params_axis_order(params_ndim); - std::iota(params_axis_order.begin(), params_axis_order.end(), 0); - - // Gather slices from "params" and copy to "out" - auto out_coord_iter = out_transform.begin(); - for (const Coordinate& indices_coord : indices_outer_transform) + static constexpr bool value = + std::is_same::value; + }; + + template ::value> = true> + class Span { - Coordinate params_start_corner(params_ndim, 0); - Coordinate params_end_corner(params_shape); - auto indices_index = indices_outer_transform.index(indices_coord); - for (size_t i = 0; i < slice_rank; i++) + public: + Span(Iterator begin, Iterator end) + : m_begin{begin} + , m_end{end} + { + } + + Iterator begin() const { return m_begin; } + Iterator end() const { return m_end; }; + typename Iterator::value_type operator[](size_t idx) const + { + return *next(m_begin, idx); + } + + typename Iterator::difference_type size() const { - U index = indices[indices_index]; - // take care of negative indices - index = index >= 0 ? index : index + params_shape[i]; - params_start_corner[i] = index; - params_end_corner[i] = index + 1; - indices_index++; + return std::distance(m_begin, m_end); } - CoordinateTransform params_transform(params_shape, - params_start_corner, - params_end_corner, - params_strides, - params_axis_order); - if (out_coord_iter == out_transform.end()) - break; - auto out_index = out_transform.index(*out_coord_iter); - for (const Coordinate& params_coord : params_transform) + + private: + Iterator m_begin; + Iterator m_end; + }; + + template + Span span(Iterator begin, Iterator end) + { + return Span{begin, end}; + }; + + template + std::vector get_indices_offsets(const Iterator beg, + const Iterator end, + size_t last_slice_size) + { + auto next_e = beg; + auto i = std::distance(beg, end); + std::vector offsets(i + 1, last_slice_size); + while (i-- > 0) { - out[out_index] = params[params_transform.index(params_coord)]; - out_index++; + offsets[i] = *next_e * offsets[i + 1]; + ++next_e; } - out_coord_iter++; + + return offsets; } - } + } // namespace + /// + /// Implementation find maximum length of *slice* of input *params* which might be + /// copied to *out* index by index. + /// +-------+--------------+-------+ + /// | batch | indices[:-1] | slice | + /// | shape | shape | shape | + /// +-------+--------------+-------+ + /// template - void gather_nd(const T* params, - const U* indices, - T* out, + void gather_nd(const T* const params, + const U* const indices, + T* const out, const Shape& params_shape, const Shape& indices_shape, const Shape& out_shape, - int batch_dims = 0) + const int batch_dims = 0) { - using namespace std; - if (batch_dims == 0) - { - gather_nd_batch(params, indices, out, params_shape, indices_shape, out_shape); - return; - } + using std::begin; + using std::end; + using std::next; + using std::prev; + const auto rbegin = [](const Shape& s) { // generic since C++14 + return s.rbegin(); + }; + + const Shape batch_shape(begin(params_shape), next(begin(params_shape), batch_dims)); + const auto batch_size = shape_size(batch_shape); - size_t indices_ndim = static_cast(indices_shape.size()); - Coordinate indices_outer_start_corner(indices_ndim, 0); - Coordinate indices_outer_end_corner(indices_shape); - for (size_t i = batch_dims; i < indices_ndim; i++) + if (batch_dims && batch_size != out_shape.front()) { - indices_outer_end_corner[i] = 1; + throw std::domain_error{ + "out_shape should have on first dim multiplication of batch number of first" + "dimensions of shape "}; } - Strides indices_strides(indices_ndim, 1); - AxisVector indices_axis_order(indices_ndim); - std::iota(indices_axis_order.begin(), indices_axis_order.end(), 0); - CoordinateTransform indices_outer_transform(indices_shape, - indices_outer_start_corner, - indices_outer_end_corner, - indices_strides, - indices_axis_order); - - size_t params_ndim = static_cast(params_shape.size()); - Coordinate params_outer_start_corner(params_ndim, 0); - Coordinate params_outer_end_corner(params_shape); - for (size_t i = batch_dims; i < params_ndim; i++) + + if (!std::equal(begin(params_shape), + next(begin(params_shape), batch_dims), + begin(indices_shape))) { - params_outer_end_corner[i] = 1; + throw std::domain_error{ + "dimensions in params and indices have to be equal on batch dimensions"}; } - Strides params_strides(params_ndim, 1); - AxisVector params_axis_order(params_ndim); - std::iota(params_axis_order.begin(), params_axis_order.end(), 0); - CoordinateTransform params_outer_transform(params_shape, - params_outer_start_corner, - params_outer_end_corner, - params_strides, - params_axis_order); - - size_t out_ndim = static_cast(out_shape.size()); - Coordinate out_start_corner(out_ndim, 0); - Coordinate out_end_corner(out_shape); - for (size_t i = 1; i < out_ndim; i++) + + const auto first_slice_index_in_params = batch_dims + indices_shape.back(); + + if (!(first_slice_index_in_params <= params_shape.size())) { - out_end_corner[i] = 1; + throw std::domain_error{ + "params_shape should have enough rank to be index by indices"}; } - Strides out_strides(out_ndim, 1); - AxisVector out_axis_order(out_ndim); - std::iota(out_axis_order.begin(), out_axis_order.end(), 0); - CoordinateTransform out_transform( - out_shape, out_start_corner, out_end_corner, out_strides, out_axis_order); - - Shape indices_shape_batch(indices_shape.begin() + batch_dims, indices_shape.end()); - Shape params_shape_batch(params_shape.begin() + batch_dims, params_shape.end()); - Shape output_shape_batch(out_shape.begin() + 1, out_shape.end()); - auto out_coord_iter = out_transform.begin(); - auto params_coord_iter = params_outer_transform.begin(); - for (const Coordinate& indices_coord : indices_outer_transform) + + const auto slice_shape = + span(next(begin(params_shape), first_slice_index_in_params), end(params_shape)); + const auto slice_size = shape_size(slice_shape); + + const auto dims_begin = next(rbegin(params_shape), slice_shape.size()); + const auto dims_end = next(dims_begin, indices_shape.back() - 1); + + const auto indices_offsets = get_indices_offsets(dims_begin, dims_end, slice_size); + + const auto batch_offset = indices_offsets.front() * params_shape[batch_dims]; + + const auto k_1_indices = + span(next(begin(indices_shape), batch_dims), prev(end(indices_shape))); + + const auto k_1_params = + span(next(begin(params_shape), batch_dims), prev(end(params_shape))); + + const auto number_of_slices_to_copy_in_one_batch = shape_size(k_1_indices); + + const auto coordinates_size = indices_shape.back(); + + for (size_t batch = 0; batch != batch_size; ++batch) { - if (params_coord_iter == params_outer_transform.end() || - out_coord_iter == out_transform.end()) - break; - auto indices_index = indices_outer_transform.index(indices_coord); - auto params_index = params_outer_transform.index(*params_coord_iter); - auto output_index = out_transform.index(*out_coord_iter); - gather_nd_batch(params + params_index, - indices + indices_index, - out + output_index, - params_shape_batch, - indices_shape_batch, - output_shape_batch); - - out_coord_iter++; - params_coord_iter++; + const auto input_batch_offset = batch * batch_offset; + const auto output_batch_offset = + batch * number_of_slices_to_copy_in_one_batch * slice_size; + const auto coordinates_batch_offset = + batch * number_of_slices_to_copy_in_one_batch * coordinates_size; + for (size_t slice = 0; slice != number_of_slices_to_copy_in_one_batch; ++slice) + { + const auto slice_coordinates = + next(indices, coordinates_batch_offset + slice * coordinates_size); + + size_t input_slice_offset = input_batch_offset; + for (size_t c = 0; c != coordinates_size; ++c) + { + const auto i_c = slice_coordinates[c]; + const auto index = i_c < 0 ? k_1_params[c] + i_c : i_c; + input_slice_offset += index * indices_offsets[c]; + } + const auto output_slice_offset = output_batch_offset + slice * slice_size; + std::copy(next(params, input_slice_offset), + next(params, input_slice_offset + slice_size), + next(out, output_slice_offset)); + } } } - } - } -} + + } // namespace reference + } // namespace runtime +} // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/roi_pooling.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/roi_pooling.hpp new file mode 100644 index 00000000000000..8ea19700e4d526 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/roi_pooling.hpp @@ -0,0 +1,231 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/shape.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + template + void roi_pooling(const T* feature_maps, + const T* rois, + T* output, + const Shape& feature_maps_shape, + const Shape& rois_shape, + const Shape& output_shape, + const float spatial_scale, + const std::string& pooling_method) + { + // Feature maps input shape: {N, C, H, W} + const int batches = feature_maps_shape[0]; + const int channels = feature_maps_shape[1]; + const int height = feature_maps_shape[2]; + const int width = feature_maps_shape[3]; + + // Output shape: {NUM_ROIS, C, pooled_h, pooled_w} + const int pooled_h = output_shape[2]; + const int pooled_w = output_shape[3]; + + // ROIs shape: {NUM_ROIS, 5} + const int num_rois = rois_shape[0]; + + for (unsigned int roi_num = 0; roi_num < num_rois; roi_num++) + { + // ROI tuple: [roi_batch_id, roi_w_start, roi_h_start, roi_w_end, roi_h_end] + // ROI index + int roi_idx = rois_shape[1] * roi_num; + + // ROI batch id + int roi_batch_id = rois[roi_idx + 0]; + + // ROI batch id must be in the range of [0, N-1] + NGRAPH_CHECK(0 <= roi_batch_id && roi_batch_id < batches, + "ROI batch id must be in the range of [0, N-1]"); + + if (pooling_method == "max") + { + // ROI coordinates scaled to input feature maps + int roi_w_start = std::round(rois[roi_idx + 1] * spatial_scale); + int roi_h_start = std::round(rois[roi_idx + 2] * spatial_scale); + int roi_w_end = std::round(rois[roi_idx + 3] * spatial_scale); + int roi_h_end = std::round(rois[roi_idx + 4] * spatial_scale); + + // Force malformed ROIs to be 1x1 + int roi_height = std::max(roi_h_end - roi_h_start + 1, 1); + int roi_width = std::max(roi_w_end - roi_w_start + 1, 1); + + // Divide ROIs into sub-regions for max pooling + T bin_size_h = static_cast(roi_height) / pooled_h; + T bin_size_w = static_cast(roi_width) / pooled_w; + + const T* batch_data = + feature_maps + roi_batch_id * channels * height * width; + + for (unsigned int c = 0; c < channels; c++) + { + for (unsigned int ph = 0; ph < pooled_h; ph++) + { + for (unsigned int pw = 0; pw < pooled_w; pw++) + { + // Compute pooling region for this output unit: + // start (included) = floor(ph * roi_height / pooled_h) + // end (excluded) = ceil((ph + 1) * roi_height / pooled_h) + int h_start = static_cast( + std::floor(static_cast(ph) * bin_size_h)); + int w_start = static_cast( + std::floor(static_cast(pw) * bin_size_w)); + int h_end = static_cast( + std::ceil(static_cast(ph + 1) * bin_size_h)); + int w_end = static_cast( + std::ceil(static_cast(pw + 1) * bin_size_w)); + + // Add ROI offsets and clip to input boundaries + h_start = std::min(std::max(h_start + roi_h_start, 0), height); + w_start = std::min(std::max(w_start + roi_w_start, 0), width); + h_end = std::min(std::max(h_end + roi_h_start, 0), height); + w_end = std::min(std::max(w_end + roi_w_start, 0), width); + + const size_t pool_index = + roi_num * channels * pooled_h * pooled_w + + c * pooled_h * pooled_w + ph * pooled_w + pw; + + // Define an empty pooling region to be zero + bool is_empty = (h_end <= h_start) || (w_end <= w_start); + output[pool_index] = + is_empty ? 0 : std::numeric_limits::lowest(); + + for (unsigned int h = h_start; h < h_end; h++) + { + for (unsigned int w = w_start; w < w_end; w++) + { + const size_t index = h * width + w; + output[pool_index] = + std::max(batch_data[index], output[pool_index]); + } + } + } + } + // Increment batch data pointer by one channel + batch_data += height * width; + } + } + else if (pooling_method == "bilinear") + { + // ROI coordinates, normalized + T roi_w_start = rois[roi_idx + 1]; + T roi_h_start = rois[roi_idx + 2]; + T roi_w_end = rois[roi_idx + 3]; + T roi_h_end = rois[roi_idx + 4]; + + T roi_height = (roi_h_end - roi_h_start) * (height - 1); + T roi_width = (roi_w_end - roi_w_start) * (width - 1); + + T roi_height_scale = (pooled_h > 1) ? roi_height / (pooled_h - 1) : 0; + T roi_width_scale = (pooled_w > 1) ? roi_width / (pooled_w - 1) : 0; + + for (unsigned int c = 0; c < channels; c++) + { + for (unsigned int ph = 0; ph < pooled_h; ph++) + { + for (unsigned int pw = 0; pw < pooled_w; pw++) + { + T in_y = + (pooled_h > 1) + ? (ph * roi_height_scale + roi_h_start * (height - 1)) + : 0.5 * (roi_h_start + roi_h_end) * (height - 1); + T in_x = + (pooled_w > 1) + ? (pw * roi_width_scale + roi_w_start * (width - 1)) + : 0.5 * (roi_w_end + roi_w_start) * (width - 1); + + const size_t pool_index = + roi_num * channels * pooled_h * pooled_w + + c * pooled_h * pooled_w + ph * pooled_w + pw; + // Define invalid pooling region to be zero + if (in_y < 0 || in_y > height - 1 || in_x < 0 || + in_x > width - 1) + { + output[pool_index] = 0; + } + else + { + int top_y_index = static_cast(std::floor(in_y)); + int bottom_y_index = static_cast(std::ceil(in_y)); + int left_x_index = static_cast(std::floor(in_x)); + int right_x_index = static_cast(std::ceil(in_x)); + + // Clip to input width boundaries + if (right_x_index > width - 1) + { + right_x_index = width - 1; + } + + // Clip to input height boundaries + if (bottom_y_index > height - 1) + { + bottom_y_index = height - 1; + } + + size_t top_left_idx = + roi_batch_id * channels * height * width + + c * height * width + top_y_index * width + left_x_index; + + size_t top_right_idx = + roi_batch_id * channels * height * width + + c * height * width + top_y_index * width + + right_x_index; + + size_t bottom_left_idx = + roi_batch_id * channels * height * width + + c * height * width + bottom_y_index * width + + left_x_index; + + size_t bottom_right_idx = + roi_batch_id * channels * height * width + + c * height * width + bottom_y_index * width + + right_x_index; + + const T top_left = feature_maps[top_left_idx]; + const T top_right = feature_maps[top_right_idx]; + const T bottom_left = feature_maps[bottom_left_idx]; + const T bottom_right = feature_maps[bottom_right_idx]; + + const T top = + top_left + + (top_right - top_left) * (in_x - left_x_index); + const T bottom = + bottom_left + + (bottom_right - bottom_left) * (in_x - left_x_index); + + output[pool_index] = + top + (bottom - top) * (in_y - top_y_index); + } + } + } + } + } + } + } + } // namespace reference + + } // namespace runtime + +} // namespace ngraph diff --git a/ngraph/core/reference/src/runtime/reference/loop.cpp b/ngraph/core/reference/src/runtime/reference/loop.cpp index 9731e8a659ad1a..d520387838a715 100644 --- a/ngraph/core/reference/src/runtime/reference/loop.cpp +++ b/ngraph/core/reference/src/runtime/reference/loop.cpp @@ -49,8 +49,8 @@ namespace ngraph input_descs.size() + (cur_iter_idx >= 0 ? !cur_iter_initial_value_exist : 0); HostTensorVector inputs_to_body; for (int64_t i = 0; i < inputs_count; ++i) - inputs_to_body.push_back( - std::make_shared(element::dynamic, PartialShape::dynamic())); + inputs_to_body.push_back(std::make_shared(element::Type_t::dynamic, + PartialShape::dynamic())); if (cur_iter_idx >= 0 && !cur_iter_initial_value_exist) { const auto& cur_iter = func->get_parameters().at(cur_iter_idx); @@ -90,12 +90,12 @@ namespace ngraph // Get TripCount int64_t trip_count = 0; - if (args[0]->get_element_type() == ngraph::element::i32) + if (args[0]->get_element_type() == ngraph::element::Type_t::i32) { auto* trip_count_p = args[0]->get_data_ptr(); trip_count = trip_count_p[0]; } - else if (args[0]->get_element_type() == ngraph::element::i64) + else if (args[0]->get_element_type() == ngraph::element::Type_t::i64) { auto* trip_count_p = args[0]->get_data_ptr(); trip_count = trip_count_p[0]; @@ -204,10 +204,10 @@ namespace ngraph { const auto& cur_iter_param = func->get_parameters().at(cur_iter_idx); int64_t iter_num = cur_iter + 1; - if (cur_iter_param->get_element_type() == element::i64) + if (cur_iter_param->get_element_type() == element::Type_t::i64) inputs_to_body.at(cur_iter_idx) ->write(&iter_num, cur_iter_param->get_element_type().size()); - else if (cur_iter_param->get_element_type() == element::i32) + else if (cur_iter_param->get_element_type() == element::Type_t::i32) { int32_t iter_num_i32 = static_cast(iter_num); inputs_to_body.at(cur_iter_idx) diff --git a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp index 55719a597ccc65..8c950c0b807215 100644 --- a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp +++ b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp @@ -326,7 +326,7 @@ namespace ngraph size_t selected_size = valid_outputs * 3; - if (output_type == ngraph::element::i64) + if (output_type == ngraph::element::Type_t::i64) { int64_t* indices_ptr = outputs[0]->get_data_ptr(); memcpy(indices_ptr, selected_indices.data(), selected_size * sizeof(int64_t)); @@ -381,7 +381,7 @@ namespace ngraph return; } - if (output_type == ngraph::element::i64) + if (output_type == ngraph::element::Type_t::i64) { int64_t* valid_outputs_ptr = outputs[2]->get_data_ptr(); *valid_outputs_ptr = valid_outputs; diff --git a/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp b/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp index 08f80cd70f6aed..c6e12f562b8e87 100644 --- a/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp +++ b/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp @@ -35,8 +35,8 @@ namespace ngraph { HostTensorVector inputs_to_body; for (int64_t i = 0; i < input_descs.size(); ++i) - inputs_to_body.push_back( - std::make_shared(element::dynamic, PartialShape::dynamic())); + inputs_to_body.push_back(std::make_shared(element::Type_t::dynamic, + PartialShape::dynamic())); // Port map processing: inputs and back edges struct BackEdge diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index c0f0461686e2f9..a7c10582a3e2b6 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -586,7 +586,7 @@ std::shared_ptr ngraph::make_zero(const element::Type& element_type, const if (shape.size() > 0) { return std::make_shared( - zero, op::Constant::create(element::u64, Shape{shape.size()}, shape)); + zero, op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape)); } return zero; } diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index 489df366205126..3913daa5a7f36e 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -213,8 +213,8 @@ descriptor::Output& Node::get_output_descriptor(size_t position) while (m_outputs.size() <= position) { size_t i = m_outputs.size(); - auto tensor_descriptor = - make_shared(element::dynamic, PartialShape::dynamic(), this, i); + auto tensor_descriptor = make_shared( + element::Type_t::dynamic, PartialShape::dynamic(), this, i); m_outputs.emplace_back(this, i, tensor_descriptor); } return m_outputs.at(position); diff --git a/ngraph/core/src/op/abs.cpp b/ngraph/core/src/op/abs.cpp index 071b10c724f6e5..d22a372c425480 100644 --- a/ngraph/core/src/op/abs.cpp +++ b/ngraph/core/src/op/abs.cpp @@ -71,6 +71,8 @@ namespace absop break; TYPE_CASE(f32)(arg0, out, count); break; + TYPE_CASE(bf16)(arg0, out, count); + break; default: rc = false; break; } return rc; diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index 4f91709a84692b..71db716778d24d 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -260,7 +260,7 @@ op::v1::Broadcast::Broadcast(const Output& arg, const AutoBroadcastSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, - op::v0::Constant::create(element::u8, Shape{}, {0})->output(0), + op::v0::Constant::create(element::Type_t::u8, Shape{}, {0})->output(0), to_broadcast_mode(broadcast_spec)} , m_broadcast_spec{broadcast_spec} { diff --git a/ngraph/core/src/op/bucketize.cpp b/ngraph/core/src/op/bucketize.cpp index fb1bd237fea5d0..38ae363ce1c918 100644 --- a/ngraph/core/src/op/bucketize.cpp +++ b/ngraph/core/src/op/bucketize.cpp @@ -45,7 +45,8 @@ void op::v3::Bucketize::validate_and_infer_types() const PartialShape& buckets_pshape = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64. Default is i64"); if (buckets_pshape.is_static()) diff --git a/ngraph/core/src/op/concat.cpp b/ngraph/core/src/op/concat.cpp index aa993f2377bb6c..e6bfad1d0bc666 100644 --- a/ngraph/core/src/op/concat.cpp +++ b/ngraph/core/src/op/concat.cpp @@ -50,7 +50,7 @@ void op::Concat::validate_and_infer_types() NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required."); PartialShape inputs_shape_scheme{PartialShape::dynamic()}; - element::Type inputs_et{element::dynamic}; + element::Type inputs_et{element::Type_t::dynamic}; Dimension concatenation_axis_output_dim{0}; for (uint64_t i = 0; i < get_input_size(); i++) diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index b3026a388e5268..133fcb3fc27fcd 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -482,7 +482,7 @@ Shape op::Constant::get_shape_val() const Strides op::Constant::get_strides_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + NGRAPH_CHECK(m_element_type == element::Type_t::i64); std::vector out_strides = cast_vector(); Strides output_strides(shape_size(m_shape)); std::transform(out_strides.begin(), @@ -494,7 +494,7 @@ Strides op::Constant::get_strides_val() const Coordinate op::Constant::get_coordinate_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + NGRAPH_CHECK(m_element_type == element::Type_t::i64); std::vector out_coordinate = cast_vector(); Coordinate output_coordinate(shape_size(m_shape)); std::transform(out_coordinate.begin(), @@ -506,7 +506,7 @@ Coordinate op::Constant::get_coordinate_val() const CoordinateDiff op::Constant::get_coordinate_diff_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + NGRAPH_CHECK(m_element_type == element::Type_t::i64); std::vector out_coordinate_diff = cast_vector(); CoordinateDiff output_coordinate_diff(shape_size(m_shape)); std::transform(out_coordinate_diff.begin(), diff --git a/ngraph/core/src/op/convert.cpp b/ngraph/core/src/op/convert.cpp index d03e77e0177be9..6992b1611f5dfe 100644 --- a/ngraph/core/src/op/convert.cpp +++ b/ngraph/core/src/op/convert.cpp @@ -113,6 +113,8 @@ namespace convert break; TYPE_CASE(i32)(arg, out); break; + TYPE_CASE(i16)(arg, out); + break; TYPE_CASE(i64)(arg, out); break; TYPE_CASE(u32)(arg, out); diff --git a/ngraph/core/src/op/cum_sum.cpp b/ngraph/core/src/op/cum_sum.cpp index c00b80766e3b0a..86fc0085e3624b 100644 --- a/ngraph/core/src/op/cum_sum.cpp +++ b/ngraph/core/src/op/cum_sum.cpp @@ -37,7 +37,7 @@ op::v0::CumSum::CumSum(const Output& arg, } op::v0::CumSum::CumSum(const Output& arg, const bool exclusive, const bool reverse) - : Op({arg, op::Constant::create(element::i32, Shape{}, {0})}) + : Op({arg, op::Constant::create(element::Type_t::i32, Shape{}, {0})}) , m_exclusive(exclusive) , m_reverse(reverse) { @@ -65,7 +65,7 @@ void op::v0::CumSum::validate_and_infer_types() const auto& axis_type = get_input_element_type(1); NODE_VALIDATION_CHECK(this, - axis_type == element::i32 || axis_type == element::i64, + axis_type == element::Type_t::i32 || axis_type == element::Type_t::i64, "axis element type must be either int64_t or int32_t but got (", axis_type, ")."); diff --git a/ngraph/core/src/op/detection_output.cpp b/ngraph/core/src/op/detection_output.cpp index 86a107deb5d92f..41c04467255969 100644 --- a/ngraph/core/src/op/detection_output.cpp +++ b/ngraph/core/src/op/detection_output.cpp @@ -49,11 +49,11 @@ void op::DetectionOutput::validate_and_infer_types() { auto box_logits_shape = get_input_partial_shape(0).to_shape(); set_output_type( - 0, element::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7}); + 0, element::Type_t::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); } } diff --git a/ngraph/core/src/op/divide.cpp b/ngraph/core/src/op/divide.cpp index f93912db65b696..b69c51d9588ff8 100644 --- a/ngraph/core/src/op/divide.cpp +++ b/ngraph/core/src/op/divide.cpp @@ -108,6 +108,8 @@ namespace divide break; TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec, pythondiv); break; + TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec, pythondiv); + break; default: rc = false; break; } return rc; diff --git a/ngraph/core/src/op/embedding_segments_sum.cpp b/ngraph/core/src/op/embedding_segments_sum.cpp index 6a2eca7a92b483..528b49b1e97936 100644 --- a/ngraph/core/src/op/embedding_segments_sum.cpp +++ b/ngraph/core/src/op/embedding_segments_sum.cpp @@ -56,18 +56,18 @@ op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output& emb_table void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - get_input_element_type(SEGMENT_IDS) == element::i64 || - get_input_element_type(SEGMENT_IDS) == element::i32, + get_input_element_type(SEGMENT_IDS) == element::Type_t::i64 || + get_input_element_type(SEGMENT_IDS) == element::Type_t::i32, "SEGMENT_IDS type must be i32 or i64"); NODE_VALIDATION_CHECK(this, - get_input_element_type(INDICES) == element::i64 || - get_input_element_type(INDICES) == element::i32, + get_input_element_type(INDICES) == element::Type_t::i64 || + get_input_element_type(INDICES) == element::Type_t::i32, "INDICES type must be i32 or i64"); NODE_VALIDATION_CHECK(this, - get_input_element_type(NUM_SEGMENTS) == element::i64 || - get_input_element_type(NUM_SEGMENTS) == element::i32, + get_input_element_type(NUM_SEGMENTS) == element::Type_t::i64 || + get_input_element_type(NUM_SEGMENTS) == element::Type_t::i32, "NUM_SEGMENTS type must be i32 or i64"); NODE_VALIDATION_CHECK( @@ -110,8 +110,8 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() if (get_input_size() >= 5) { NODE_VALIDATION_CHECK(this, - get_input_element_type(DEFAULT_INDEX) == element::i64 || - get_input_element_type(DEFAULT_INDEX) == element::i32, + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 || + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32, "DEFAULT_INDEX type must be i32 or i64"); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/equal.cpp b/ngraph/core/src/op/equal.cpp index 1627e5f05f6ff7..bb93b8fb1e69c4 100644 --- a/ngraph/core/src/op/equal.cpp +++ b/ngraph/core/src/op/equal.cpp @@ -65,7 +65,7 @@ namespace equal const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/fake_quantize.cpp b/ngraph/core/src/op/fake_quantize.cpp index fec59b9fd92cc4..5ed3f6fd7a9704 100644 --- a/ngraph/core/src/op/fake_quantize.cpp +++ b/ngraph/core/src/op/fake_quantize.cpp @@ -134,7 +134,7 @@ OutputVector op::FakeQuantize::decompose_op() const const auto dequant_scale = (output_high - output_low) / levels_minus_one; // zero_point type needs to match the quantization output type - const auto zero_point = Constant::create(element::i32, data.get_shape(), {0.0}); + const auto zero_point = Constant::create(element::Type_t::i32, data.get_shape(), {0.0}); const auto axes = get_default_order(input_data_shape); // clip the input data to the range @@ -148,7 +148,7 @@ OutputVector op::FakeQuantize::decompose_op() const make_shared(data, quant_scale, zero_point, - element::i32, + element::Type_t::i32, axes, op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN); diff --git a/ngraph/core/src/op/gather.cpp b/ngraph/core/src/op/gather.cpp index 45c971797bfb0e..82e7b6ec405ce3 100644 --- a/ngraph/core/src/op/gather.cpp +++ b/ngraph/core/src/op/gather.cpp @@ -167,7 +167,7 @@ namespace gather out->set_shape(out_shape); - if (arg1->get_element_type() == element::i64) + if (arg1->get_element_type() == element::Type_t::i64) { runtime::reference::gather(arg0->get_data_ptr(), arg1->get_data_ptr(), @@ -177,7 +177,7 @@ namespace gather out->get_shape(), axis); } - else if (arg1->get_element_type() == element::i32) + else if (arg1->get_element_type() == element::Type_t::i32) { runtime::reference::gather(arg0->get_data_ptr(), arg1->get_data_ptr(), @@ -280,7 +280,7 @@ namespace gather if (indices_shape.empty()) { // gathering a scalar - const auto axes = op::Constant::create(element::i64, Shape{1}, {0}); + const auto axes = op::Constant::create(element::Type_t::i64, Shape{1}, {0}); gathered = make_shared(gathered_concat_input, axes); } diff --git a/ngraph/core/src/op/greater.cpp b/ngraph/core/src/op/greater.cpp index e84dd2ea474148..ae7a0afeaa7ce3 100644 --- a/ngraph/core/src/op/greater.cpp +++ b/ngraph/core/src/op/greater.cpp @@ -65,7 +65,7 @@ namespace greaterop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/greater_eq.cpp b/ngraph/core/src/op/greater_eq.cpp index 97dc3caf21ac00..f3ce8cbb1801da 100644 --- a/ngraph/core/src/op/greater_eq.cpp +++ b/ngraph/core/src/op/greater_eq.cpp @@ -65,7 +65,7 @@ namespace greater_equalop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/grn.cpp b/ngraph/core/src/op/grn.cpp index 3668d227238ae0..3710b2bb6c60d6 100644 --- a/ngraph/core/src/op/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -78,7 +78,7 @@ OutputVector op::GRN::decompose_op() const data = builder::opset1::reshape(data, data_shape); } - const auto axis_set_const = op::Constant::create(element::i64, {}, {1}); + const auto axis_set_const = op::Constant::create(element::Type_t::i64, {}, {1}); // Calculate l2 norm across channels. shared_ptr norm = builder::opset1::l2_norm(data, axis_set_const, m_bias); // Get back reduced axis. diff --git a/ngraph/core/src/op/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp index f84c4dee2ae34b..d70e115c7db4c0 100644 --- a/ngraph/core/src/op/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -119,7 +119,7 @@ void op::v3::GRUCell::validate_and_infer_types() } auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Get input partial shape for all inputs const auto& x_pshape = get_input_partial_shape(0); diff --git a/ngraph/core/src/op/gru_sequence.cpp b/ngraph/core/src/op/gru_sequence.cpp index fc7cb620d3dd73..4446c3fb7fcfa2 100644 --- a/ngraph/core/src/op/gru_sequence.cpp +++ b/ngraph/core/src/op/gru_sequence.cpp @@ -74,7 +74,7 @@ void op::v5::GRUSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; auto x_pshape = get_input_partial_shape(0); auto ht_pshape = get_input_partial_shape(1); diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index 3cffb5e3e26ac3..14b58b9381bd64 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -221,8 +221,8 @@ void op::v4::Interpolate::validate_and_infer_types() { element::Type input_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, - input_et == element::f32 || input_et == element::f16 || - input_et == element::i8, + input_et == element::Type_t::f32 || input_et == element::Type_t::f16 || + input_et == element::Type_t::i8, "Input element type must be f32, f16, or i8"); PartialShape input_shape = PartialShape(get_input_partial_shape(0)); diff --git a/ngraph/core/src/op/less.cpp b/ngraph/core/src/op/less.cpp index 468a78c7e879ba..61ac88cba1cf96 100644 --- a/ngraph/core/src/op/less.cpp +++ b/ngraph/core/src/op/less.cpp @@ -65,7 +65,7 @@ namespace lessop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/less_eq.cpp b/ngraph/core/src/op/less_eq.cpp index 7ec143ee4d007a..5aa4acf11d6ae7 100644 --- a/ngraph/core/src/op/less_eq.cpp +++ b/ngraph/core/src/op/less_eq.cpp @@ -65,7 +65,7 @@ namespace less_equalop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/lrn.cpp b/ngraph/core/src/op/lrn.cpp index 0ebe097acded45..a28694ffc14f9a 100644 --- a/ngraph/core/src/op/lrn.cpp +++ b/ngraph/core/src/op/lrn.cpp @@ -25,7 +25,7 @@ using namespace ngraph; constexpr NodeTypeInfo op::LRN::type_info; op::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) - : LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) + : LRN(arg, op::Constant::create(element::Type_t::i64, Shape{1}, {1}), alpha, beta, bias, size) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); } diff --git a/ngraph/core/src/op/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp index 0d2b24d53eae9a..235763125f11e2 100644 --- a/ngraph/core/src/op/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -156,7 +156,7 @@ void op::v0::LSTMCell::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information // for further validation @@ -457,7 +457,7 @@ void op::v4::LSTMCell::validate_and_infer_types() } auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Get input partial shape for all inputs const auto& x_pshape = get_input_partial_shape(0); diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index ab3607c425eacf..7994cae95da506 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -131,8 +131,10 @@ shared_ptr op::v0::LSTMSequence::get_masked_node(const Output& data, // Create predicate nodes. The condition is whether current time step value // is greater than sequence length for respective batch inputs. - shared_ptr curr_time_step_node = opset1::Constant::create( - element::i32, data.get_shape(), vector(shape_size(data.get_shape()), time_step)); + shared_ptr curr_time_step_node = + opset1::Constant::create(element::Type_t::i32, + data.get_shape(), + vector(shape_size(data.get_shape()), time_step)); Output batch_seq_length = builder::opset1::legacy_broadcast_for_binary_operation( curr_time_step_node, input_value(3).get_node_shared_ptr(), batch_axis); @@ -270,7 +272,7 @@ void op::v0::LSTMSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Copy all inputs without peephole and initial_cell_state information for further validation for (size_t i = 0; i < get_input_size() - 1; i++) @@ -468,7 +470,7 @@ void op::v5::LSTMSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Copy all inputs without initial_cell_state information for further validation for (size_t i = 0; i < get_input_size(); i++) diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index ba7c0b29c5b65e..c1b34159f9c02a 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -68,16 +68,6 @@ op::v1::MaxPool::MaxPool(const Output& arg, constructor_validate_and_infer_types(); } -op::v1::MaxPool::MaxPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - op::RoundingType rounding_type) - : v1::MaxPool(arg, strides, pads_begin, pads_end, kernel, rounding_type, op::PadType::EXPLICIT) -{ -} - bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) { visitor.on_attribute("strides", m_strides); diff --git a/ngraph/core/src/op/mod.cpp b/ngraph/core/src/op/mod.cpp index 30284534137be8..ff573124da89f2 100644 --- a/ngraph/core/src/op/mod.cpp +++ b/ngraph/core/src/op/mod.cpp @@ -52,8 +52,9 @@ OutputVector op::v1::Mod::decompose_op() const const auto divisor = make_shared(input_value(1)); // truncated(a / b) - auto division = make_shared( - make_shared(dividend, divisor, m_auto_broadcast), ngraph::element::i64); + auto division = + make_shared(make_shared(dividend, divisor, m_auto_broadcast), + ngraph::element::Type_t::i64); division = make_shared(division, dividend_et); // truncated(a / b) * b const auto multiplication = make_shared(division, divisor, m_auto_broadcast); diff --git a/ngraph/core/src/op/multiply.cpp b/ngraph/core/src/op/multiply.cpp index c80763ce5144e2..4c8b4be21e8092 100644 --- a/ngraph/core/src/op/multiply.cpp +++ b/ngraph/core/src/op/multiply.cpp @@ -80,6 +80,8 @@ namespace multiplyop break; TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec); break; + TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec); + break; default: rc = false; break; } return rc; diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index d5e715b6865696..2e158f30b208f5 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -52,9 +52,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression( const bool sort_result_descending) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}) + op::Constant::create(element::Type_t::i64, Shape{}, {0}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f})}) , m_box_encoding{box_encoding} , m_sort_result_descending{sort_result_descending} { @@ -71,13 +71,13 @@ std::shared_ptr const auto& arg2 = new_args.size() > 2 ? new_args.at(2) - : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + : ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); const auto& arg3 = new_args.size() > 3 ? new_args.at(3) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); const auto& arg4 = new_args.size() > 4 ? new_args.at(4) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); return std::make_shared( new_args.at(0), new_args.at(1), arg2, arg3, arg4, m_box_encoding, m_sort_result_descending); @@ -98,7 +98,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() // the spec doesn't say what exact type should be used for the output of this op // that's why we're setting it to 64-bit integer to provide the maximum range of values support // this will be changed (configurable) in the next version of this op - const auto& output_element_type = element::i64; + const auto& output_element_type = element::Type_t::i64; // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] @@ -249,9 +249,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression( const element::Type& output_type) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}) + op::Constant::create(element::Type_t::i64, Shape{}, {0}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f})}) , m_box_encoding{box_encoding} , m_sort_result_descending{sort_result_descending} , m_output_type{output_type} @@ -269,13 +269,13 @@ std::shared_ptr const auto& arg2 = new_args.size() > 2 ? new_args.at(2) - : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + : ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); const auto& arg3 = new_args.size() > 3 ? new_args.at(3) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); const auto& arg4 = new_args.size() > 4 ? new_args.at(4) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -301,7 +301,8 @@ void op::v3::NonMaxSuppression::validate() const auto scores_ps = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) @@ -468,9 +469,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression( const element::Type& output_type) : op::v3::NonMaxSuppression(boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::i64, Shape{}, {0}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), box_encoding, sort_result_descending, output_type) @@ -488,13 +489,13 @@ std::shared_ptr const auto& arg2 = new_args.size() > 2 ? new_args.at(2) - : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + : ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); const auto& arg3 = new_args.size() > 3 ? new_args.at(3) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); const auto& arg4 = new_args.size() > 4 ? new_args.at(4) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -693,7 +694,7 @@ namespace inline bool is_float_type_admissible(const element::Type& t) { - return t == element::f32 || t == element::f16 || t == element::bf16; + return t == element::Type_t::f32 || t == element::Type_t::f16 || t == element::Type_t::bf16; } inline bool is_scalar_or_1d_tensor_with_1_element(const PartialShape& p) @@ -715,7 +716,8 @@ void op::v5::NonMaxSuppression::validate() const auto scores_ps = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) @@ -920,7 +922,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types() } set_output_type(0, m_output_type, out_shape); - set_output_type(1, element::f32, out_shape); + set_output_type(1, element::Type_t::f32, out_shape); set_output_type(2, m_output_type, Shape{1}); } diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index 9e544abc0136ba..55831236118599 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -62,7 +62,8 @@ void op::v3::NonZero::validate_and_infer_types() "NonZero input data type needs to be a numeric type. Got: ", input_et); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); // For scalar non-zero value case, onnx test case expects output shape {1, 1} diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 643e92bbe0fced..44dae5c95cc765 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -65,7 +65,7 @@ namespace not_equalop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/power.cpp b/ngraph/core/src/op/power.cpp index 9403df667fae92..193c6ded5edf20 100644 --- a/ngraph/core/src/op/power.cpp +++ b/ngraph/core/src/op/power.cpp @@ -83,6 +83,8 @@ namespace power break; TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec); break; + TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec); + break; default: rc = false; break; } return rc; diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index 437678880c9d33..5e0a5580070c6c 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -72,14 +72,14 @@ void op::PriorBox::validate_and_infer_types() auto layer_shape = const_shape->get_shape_val(); set_output_type(0, - element::f32, + element::Type_t::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * static_cast(number_of_priors(m_attrs))}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); } } diff --git a/ngraph/core/src/op/prior_box_clustered.cpp b/ngraph/core/src/op/prior_box_clustered.cpp index 4b173c6a007774..ec41d3b074d2c4 100644 --- a/ngraph/core/src/op/prior_box_clustered.cpp +++ b/ngraph/core/src/op/prior_box_clustered.cpp @@ -80,11 +80,11 @@ void op::PriorBoxClustered::validate_and_infer_types() // {Prior boxes, variances-adjusted prior boxes} const auto num_priors = m_attrs.widths.size(); set_output_type( - 0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); + 0, element::Type_t::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); } } diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index b8083cafff022c..0da2373ef95f04 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -363,7 +363,7 @@ void op::v0::Range::validate_and_infer_types() set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(2); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; NODE_VALIDATION_CHECK( this, @@ -373,7 +373,7 @@ void op::v0::Range::validate_and_infer_types() "Element types for start, stop, and step do not match."); NODE_VALIDATION_CHECK(this, - result_et != element::boolean, + result_et != element::Type_t::boolean, "Element type for start, stop, and step, must not be boolean."); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index a83d94200bb3b1..666b818efb7488 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const auto& axes = inputs[1]; const auto& out = outputs[0]; - if (data->get_element_type() != element::boolean || + if (data->get_element_type() != element::Type_t::boolean || !axes->get_element_type().is_integral_number()) { return false; diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index ba3efba782f0a1..f1c731cc24997d 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const auto& axes = inputs[1]; const auto& out = outputs[0]; - if (data->get_element_type() != element::boolean || + if (data->get_element_type() != element::Type_t::boolean || !axes->get_element_type().is_integral_number()) { return false; diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index fe929235617550..212a6befe041a0 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -59,7 +59,7 @@ void op::v1::Reverse::validate_and_infer_types() if (m_mode == Mode::MASK) { NODE_VALIDATION_CHECK(this, - get_input_element_type(1) == element::boolean, + get_input_element_type(1) == element::Type_t::boolean, "In 'mask' mode the second input must contain boolean values."); } diff --git a/ngraph/core/src/op/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp index 80dba75a894307..482929fd03e2e5 100644 --- a/ngraph/core/src/op/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -92,7 +92,7 @@ void op::v0::RNNCell::validate_and_infer_types() } auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Get input partial shape for all inputs const auto& x_pshape = get_input_partial_shape(0); diff --git a/ngraph/core/src/op/rnn_sequence.cpp b/ngraph/core/src/op/rnn_sequence.cpp index 5087b631d1e1d1..cfbbb1d7f95519 100644 --- a/ngraph/core/src/op/rnn_sequence.cpp +++ b/ngraph/core/src/op/rnn_sequence.cpp @@ -71,7 +71,7 @@ void op::v5::RNNSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; auto x_pshape = get_input_partial_shape(0); auto ht_pshape = get_input_partial_shape(1); diff --git a/ngraph/core/src/op/roi_pooling.cpp b/ngraph/core/src/op/roi_pooling.cpp index 92cc49bf1e7d60..31dc072ea091bd 100644 --- a/ngraph/core/src/op/roi_pooling.cpp +++ b/ngraph/core/src/op/roi_pooling.cpp @@ -36,32 +36,104 @@ op::ROIPooling::ROIPooling(const Output& input, void op::ROIPooling::validate_and_infer_types() { - auto input_et = get_input_element_type(0); - if (get_input_partial_shape(0).is_static() && get_input_partial_shape(1).is_static()) + auto feat_maps_et = get_input_element_type(0); + auto coords_et = get_input_element_type(1); + NODE_VALIDATION_CHECK( + this, + feat_maps_et.is_real() && coords_et.is_real(), + "The data type for input and ROIs is expected to be a floating point type. Got: ", + feat_maps_et, + " and: ", + coords_et); + + NODE_VALIDATION_CHECK( + this, + feat_maps_et == coords_et, + "Type of feature maps (inputs) and rois is expected to be the same. Got: ", + feat_maps_et, + " and: ", + coords_et); + + NODE_VALIDATION_CHECK(this, + m_output_size.size() == 2, + "The dimension of pooled size is expected to be equal to 2. Got: ", + m_output_size.size()); + + NODE_VALIDATION_CHECK(this, + m_output_size[0] > 0 && m_output_size[1] > 0, + "Pooled size attributes pooled_h and pooled_w should should be " + "non-negative integers. Got: ", + m_output_size[0], + " and: ", + m_output_size[1], + "respectively"); + + NODE_VALIDATION_CHECK( + this, + m_spatial_scale > 0, + "The spatial scale attribute should be a positive floating point number. Got: ", + m_spatial_scale); + + NODE_VALIDATION_CHECK( + this, + m_method == "max" || m_method == "bilinear", + "Pooling method attribute should be either \'max\' or \'bilinear\'. Got: ", + m_method); + + const auto& feat_maps_ps = get_input_partial_shape(0); + NODE_VALIDATION_CHECK(this, + feat_maps_ps.rank().compatible(4), + "Expected a 4D tensor for the feature maps input. Got: ", + feat_maps_ps); + + const auto& coords_ps = get_input_partial_shape(1); + NODE_VALIDATION_CHECK(this, + coords_ps.rank().compatible(2), + "Expected a 2D tensor for the ROIs input with box coordinates. Got: ", + coords_ps); + + if (coords_ps.rank().is_static()) + { + const auto coords_second_dim = coords_ps[1]; + NODE_VALIDATION_CHECK( + this, + coords_second_dim.compatible(5), + "The second dimension of ROIs input should contain batch id and box coordinates. ", + "This dimension is expected to be equal to 5. Got: ", + coords_second_dim); + } + + // output shape should be {NUM_ROIS, C, pooled_h, pooled_w} + auto output_shape = PartialShape{{Dimension::dynamic(), + Dimension::dynamic(), + Dimension{static_cast(m_output_size[0])}, + Dimension{static_cast(m_output_size[1])}}}; + + if (coords_ps.rank().is_static() && coords_ps[0].is_static()) + { + output_shape[0] = coords_ps[0]; + } + + if (feat_maps_ps.rank().is_static() && feat_maps_ps[1].is_static()) { - Shape input_shape = get_input_partial_shape(0).to_shape(); - Shape coords_shape = get_input_partial_shape(1).to_shape(); - NODE_VALIDATION_CHECK(this, - input_shape.size() >= 3, - "ROIPooling expects 3 or higher dimensions for input. Got ", - input_shape.size()); - NODE_VALIDATION_CHECK(this, - coords_shape.size() == 2, - "ROIPooling expects 2 dimensions for box coordinates. Got ", - coords_shape.size()); - NODE_VALIDATION_CHECK(this, - input_shape.size() - 2 == m_output_size.size(), - "Spatial dimensions on input: ", - input_shape.size() - 2, - " doesn't match dimensions on requested output_size: ", - m_output_size.size()); - Shape output_shape{coords_shape[0], input_shape[1]}; - output_shape.insert(output_shape.end(), m_output_size.begin(), m_output_size.end()); - set_output_type(0, input_et, output_shape); + output_shape[1] = feat_maps_ps[1]; } - else + + set_output_size(1); + set_output_type(0, feat_maps_et, output_shape); + + // if channel dimension, C, not known + // feature maps input is used by shape specialization pass + if (feat_maps_ps.rank().is_static() && feat_maps_ps[1].is_dynamic()) + { + set_input_is_relevant_to_shape(0); + } + + // if number of ROIs, NUM_ROIS, not known + // coordinate input is used by shape specialization pass + if (coords_ps.rank().is_static() && coords_ps[0].is_dynamic()) { - set_output_type(0, input_et, PartialShape::dynamic()); + set_input_is_relevant_to_shape(1); } } diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index e8f0c0a4407962..7352ec5be7bbc9 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -46,7 +46,7 @@ void op::v1::Select::validate_and_infer_types() // Condition element type check NODE_VALIDATION_CHECK(this, get_input_element_type(0).is_dynamic() || - get_input_element_type(0) == element::boolean, + get_input_element_type(0) == element::Type_t::boolean, "Argument 0 must have boolean element type (element type: ", get_input_element_type(0), ")."); @@ -184,7 +184,7 @@ void op::v0::Select::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, get_input_element_type(0).is_dynamic() || - get_input_element_type(0) == element::boolean, + get_input_element_type(0) == element::Type_t::boolean, "Argument 0 must have boolean element type (element type: ", get_input_element_type(0), ")."); diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index 78923352831d98..84134080bfbe25 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -42,7 +42,8 @@ op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) void op::v3::ShapeOf::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); set_input_is_relevant_to_value(0, false); set_output_type(0, m_output_type, PartialShape{get_input_partial_shape(0).rank()}); @@ -141,7 +142,7 @@ namespace shape_of auto index = std::make_shared( output_type, Shape{1}, std::vector{i}); auto axis = std::make_shared( - element::i64, Shape{}, std::vector{0}); + element::Type_t::i64, Shape{}, std::vector{0}); auto temp = make_shared(shape_of, index, axis); temp->set_friendly_name("DynDim/" + temp->get_name()); dimensions.push_back(temp); @@ -182,7 +183,7 @@ op::v0::ShapeOf::ShapeOf(const Output& arg) void op::v0::ShapeOf::validate_and_infer_types() { set_input_is_relevant_to_value(0, false); - set_output_type(0, element::i64, PartialShape{get_input_partial_shape(0).rank()}); + set_output_type(0, element::Type_t::i64, PartialShape{get_input_partial_shape(0).rank()}); } bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor) diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index 7adfe93ce7ad5f..5cf640d2932d82 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -126,7 +126,7 @@ OutputVector op::Squeeze::decompose_op() const auto output_data_shape = get_output_shape(0); return {make_shared( data, - op::Constant::create(element::u64, {output_data_shape.size()}, output_data_shape), + op::Constant::create(element::Type_t::u64, {output_data_shape.size()}, output_data_shape), false)}; } diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index 8dc5ca05b976a5..b4af01c84c4b24 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -77,12 +77,13 @@ namespace { NGRAPH_CHECK(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1, "Begin input must be 1D"); - return std::make_shared(op::Constant::create(element::i64, {}, {1}), - std::make_shared(begin)); + return std::make_shared( + op::Constant::create(element::Type_t::i64, {}, {1}), + std::make_shared(begin)); } return op::Constant::create( - element::i64, Shape{strides_length}, vector(strides_length, 1)); + element::Type_t::i64, Shape{strides_length}, vector(strides_length, 1)); } } diff --git a/ngraph/core/src/op/subtract.cpp b/ngraph/core/src/op/subtract.cpp index 79ccaaa4c8a1b8..3c100f2b23efe0 100644 --- a/ngraph/core/src/op/subtract.cpp +++ b/ngraph/core/src/op/subtract.cpp @@ -86,6 +86,8 @@ namespace subtract break; TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec); break; + TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec); + break; default: rc = false; break; } return rc; diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index 9a47674e57d31f..e6b3bab597756d 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -320,8 +320,9 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, const element::Type& k_element_type) const { NODE_VALIDATION_CHECK(this, - k_element_type == element::i8 || k_element_type == element::i32 || - k_element_type == element::i64, + k_element_type == element::Type_t::i8 || + k_element_type == element::Type_t::i32 || + k_element_type == element::Type_t::i64, "K input element type must be i8, i32 or i64 (got ", k_element_type, ")."); @@ -400,7 +401,7 @@ size_t op::v1::TopK::get_k() const void op::v1::TopK::set_k(size_t k) { this->input(1).replace_source_output( - op::Constant::create(element::i64, Shape{}, {k})->output(0)); + op::Constant::create(element::Type_t::i64, Shape{}, {k})->output(0)); } bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const diff --git a/ngraph/core/src/op/util/arithmetic_reduction.cpp b/ngraph/core/src/op/util/arithmetic_reduction.cpp index 09b17f952978c4..dac51ff772434c 100644 --- a/ngraph/core/src/op/util/arithmetic_reduction.cpp +++ b/ngraph/core/src/op/util/arithmetic_reduction.cpp @@ -29,7 +29,7 @@ op::util::ArithmeticReduction::ArithmeticReduction(const Output& arg, const AxisSet& reduction_axes) : Op({arg, op::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) ->output(0)}) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); @@ -62,9 +62,10 @@ const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_axes) { - this->input(1).replace_source_output( - op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) - ->output(0)); + this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64, + Shape{reduction_axes.size()}, + reduction_axes.to_vector()) + ->output(0)); } void op::util::ArithmeticReduction::validate_and_infer_types() diff --git a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp index 7f9b4afbeec0c7..18af758956f394 100644 --- a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp @@ -44,7 +44,7 @@ void op::util::BinaryElementwiseArithmetic::validate_and_infer_elementwise_arith PartialShape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, - args_et.is_dynamic() || args_et != element::boolean, + args_et.is_dynamic() || args_et != element::Type_t::boolean, "Arguments cannot have boolean element type (argument element type: ", args_et, ")."); diff --git a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp index f8f35d99721b3f..74c4e239dfb3f1 100644 --- a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp @@ -39,7 +39,7 @@ void op::util::BinaryElementwiseComparison::validate_and_infer_types() auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob); PartialShape& args_pshape = std::get<1>(args_et_pshape); - set_output_type(0, element::boolean, args_pshape); + set_output_type(0, element::Type_t::boolean, args_pshape); } bool op::util::BinaryElementwiseComparison::visit_attributes(AttributeVisitor& visitor) diff --git a/ngraph/core/src/op/util/binary_elementwise_logical.cpp b/ngraph/core/src/op/util/binary_elementwise_logical.cpp index 6c7dc0bf51fce5..666b8c1daa8c0f 100644 --- a/ngraph/core/src/op/util/binary_elementwise_logical.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_logical.cpp @@ -44,12 +44,12 @@ void op::util::BinaryElementwiseLogical::validate_and_infer_elementwise_logical( NODE_VALIDATION_CHECK( this, - args_et.is_dynamic() || args_et == element::boolean, + args_et.is_dynamic() || args_et == element::Type_t::boolean, "Operands for logical operators must have boolean element type but have element type ", args_et, "."); - set_output_type(0, element::boolean, args_pshape); + set_output_type(0, element::Type_t::boolean, args_pshape); } void op::util::BinaryElementwiseLogical::validate_and_infer_types() diff --git a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp index 3fa1b09ba78364..8834496a2cba1c 100644 --- a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp @@ -52,13 +52,13 @@ op::util::EmbeddingBagOffsetsBase::EmbeddingBagOffsetsBase(const Output& e void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - get_input_element_type(OFFSETS) == element::i64 || - get_input_element_type(OFFSETS) == element::i32, + get_input_element_type(OFFSETS) == element::Type_t::i64 || + get_input_element_type(OFFSETS) == element::Type_t::i32, "OFFSETS type must be i32 or i64"); NODE_VALIDATION_CHECK(this, - get_input_element_type(INDICES) == element::i64 || - get_input_element_type(INDICES) == element::i32, + get_input_element_type(INDICES) == element::Type_t::i64 || + get_input_element_type(INDICES) == element::Type_t::i32, "INDICES type must be i32 or i64"); NODE_VALIDATION_CHECK( @@ -83,8 +83,8 @@ void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() if (get_input_size() >= 4) { NODE_VALIDATION_CHECK(this, - get_input_element_type(DEFAULT_INDEX) == element::i64 || - get_input_element_type(DEFAULT_INDEX) == element::i32, + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 || + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32, "DEFAULT_INDEX type must be i32 or i64"); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp index 7b9afd0f7add74..48d7e5d196372d 100644 --- a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp @@ -40,8 +40,8 @@ op::util::EmbeddingBagPackedBase::EmbeddingBagPackedBase(const Output& emb void op::util::EmbeddingBagPackedBase::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - get_input_element_type(INDICES) == element::i64 || - get_input_element_type(INDICES) == element::i32, + get_input_element_type(INDICES) == element::Type_t::i64 || + get_input_element_type(INDICES) == element::Type_t::i32, "INDICES type must be i32 or i64"); NODE_VALIDATION_CHECK(this, diff --git a/ngraph/core/src/op/util/index_reduction.cpp b/ngraph/core/src/op/util/index_reduction.cpp index f4fd0ab5dc10ff..f0e11361c7c5aa 100644 --- a/ngraph/core/src/op/util/index_reduction.cpp +++ b/ngraph/core/src/op/util/index_reduction.cpp @@ -68,8 +68,8 @@ void op::util::IndexReduction::validate_and_infer_types() rank, ")."); NODE_VALIDATION_CHECK(this, - m_index_element_type == element::i32 || - m_index_element_type == element::i64, + m_index_element_type == element::Type_t::i32 || + m_index_element_type == element::Type_t::i64, "Index element is neither i64 or i32."); PartialShape output_shape{PartialShape::dynamic()}; diff --git a/ngraph/core/src/op/util/logical_reduction.cpp b/ngraph/core/src/op/util/logical_reduction.cpp index dbb12c3e025bfc..c53f68be53bc97 100644 --- a/ngraph/core/src/op/util/logical_reduction.cpp +++ b/ngraph/core/src/op/util/logical_reduction.cpp @@ -28,7 +28,7 @@ op::util::LogicalReduction::LogicalReduction() op::util::LogicalReduction::LogicalReduction(const Output& arg, const AxisSet& reduction_axes) : Op({arg, op::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) ->output(0)}) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); @@ -57,9 +57,10 @@ const AxisSet op::util::LogicalReduction::get_reduction_axes() const void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axes) { - this->input(1).replace_source_output( - op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) - ->output(0)); + this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64, + Shape{reduction_axes.size()}, + reduction_axes.to_vector()) + ->output(0)); } void op::util::LogicalReduction::validate_and_infer_types() @@ -111,8 +112,8 @@ void op::util::LogicalReduction::validate_and_infer_types() set_input_is_relevant_to_shape(1); NODE_VALIDATION_CHECK(this, - get_input_element_type(0).compatible(element::boolean), + get_input_element_type(0).compatible(element::Type_t::boolean), "Input element type must be boolean."); - set_output_type(0, element::boolean, result_shape); + set_output_type(0, element::Type_t::boolean, result_shape); } diff --git a/ngraph/core/src/op/util/rnn_cell_base.cpp b/ngraph/core/src/op/util/rnn_cell_base.cpp index 9a9c56e018d8cb..12ae26565aaf31 100644 --- a/ngraph/core/src/op/util/rnn_cell_base.cpp +++ b/ngraph/core/src/op/util/rnn_cell_base.cpp @@ -46,7 +46,7 @@ std::shared_ptr ngraph::op::util::convert_lstm_node_format(const Output(element::i64, Shape{}, axis); + auto axis_const = std::make_shared(element::Type_t::i64, Shape{}, axis); OutputVector splitted_node = std::make_shared(node, axis_const, num_gates)->outputs(); OutputVector nodes_in_new_format(num_gates); diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index 2bb6b9cb8af3ce..7a95b0a35fad79 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -50,7 +50,7 @@ void op::util::ScatterNDBase::validate_and_infer_types() const PartialShape& updates_shape = get_input_partial_shape(UPDATES); NODE_VALIDATION_CHECK(this, - indices_et == element::i32 || indices_et == element::i64, + indices_et == element::Type_t::i32 || indices_et == element::Type_t::i64, "Indices element type must be i64 or i32"); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp index 6ececc9b273ce7..1c79c1e76576d8 100644 --- a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp @@ -36,7 +36,7 @@ void op::util::UnaryElementwiseArithmetic::validate_and_infer_elementwise_arithm PartialShape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, - args_et.is_dynamic() || args_et != element::boolean, + args_et.is_dynamic() || args_et != element::Type_t::boolean, "Arguments cannot have boolean element type (argument element type: ", args_et, ")."); diff --git a/ngraph/core/src/pass/convert_fp32_to_fp16.cpp b/ngraph/core/src/pass/convert_fp32_to_fp16.cpp index 60d87ed5c1d5e2..8a908bb3cb3a42 100644 --- a/ngraph/core/src/pass/convert_fp32_to_fp16.cpp +++ b/ngraph/core/src/pass/convert_fp32_to_fp16.cpp @@ -25,8 +25,8 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertFP32ToFP16, "ConvertFP32ToFP16", 0); void pass::ConvertFP32ToFP16::convert_constants_precision() { - auto constant = - std::make_shared(element::f32, Shape{1}, std::vector{0}); + auto constant = std::make_shared( + element::Type_t::f32, Shape{1}, std::vector{0}); ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { auto constant = std::dynamic_pointer_cast(m.get_match_root()); @@ -35,7 +35,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision() return false; } - if (constant->get_element_type() == element::f32) + if (constant->get_element_type() == element::Type_t::f32) { auto data = constant->get_vector(); std::vector new_data(data.size()); @@ -44,7 +44,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision() new_data[i] = ngraph::float16(data[i]); } auto new_const = std::make_shared( - element::f16, constant->get_shape(), new_data); + element::Type_t::f16, constant->get_shape(), new_data); new_const->set_friendly_name(constant->get_friendly_name()); ngraph::replace_node(constant, new_const); return true; @@ -60,13 +60,13 @@ void pass::ConvertFP32ToFP16::convert_constants_precision() void pass::ConvertFP32ToFP16::convert_parameters_precision() { - auto constant = std::make_shared(element::f32, Shape{1}); + auto constant = std::make_shared(element::Type_t::f32, Shape{1}); ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { auto parameter = std::dynamic_pointer_cast(m.get_match_root()); - if (parameter && parameter->get_element_type() == element::f32) + if (parameter && parameter->get_element_type() == element::Type_t::f32) { - parameter->set_element_type(element::f16); + parameter->set_element_type(element::Type_t::f16); return true; } return false; diff --git a/ngraph/core/src/pattern/op/label.cpp b/ngraph/core/src/pattern/op/label.cpp index 52d807afa74f47..129e9d5c57a551 100644 --- a/ngraph/core/src/pattern/op/label.cpp +++ b/ngraph/core/src/pattern/op/label.cpp @@ -68,5 +68,6 @@ std::shared_ptr pattern::any_input() std::shared_ptr pattern::any_input(const pattern::op::ValuePredicate& pred) { - return std::make_shared(element::dynamic, PartialShape::dynamic(), pred); + return std::make_shared( + element::Type_t::dynamic, PartialShape::dynamic(), pred); } diff --git a/ngraph/core/src/runtime/host_tensor.cpp b/ngraph/core/src/runtime/host_tensor.cpp index 5a8c7fe8505693..2af9bcd4b39b77 100644 --- a/ngraph/core/src/runtime/host_tensor.cpp +++ b/ngraph/core/src/runtime/host_tensor.cpp @@ -62,7 +62,7 @@ runtime::HostTensor::HostTensor(const element::Type& element_type, } runtime::HostTensor::HostTensor(const std::string& name) - : HostTensor(element::dynamic, PartialShape::dynamic()) + : HostTensor(element::Type_t::dynamic, PartialShape::dynamic()) { } diff --git a/ngraph/core/src/type/element_type.cpp b/ngraph/core/src/type/element_type.cpp index 828c3b7c787760..81a3d01345c1ae 100644 --- a/ngraph/core/src/type/element_type.cpp +++ b/ngraph/core/src/type/element_type.cpp @@ -26,6 +26,7 @@ using namespace ngraph; using namespace std; +NGRAPH_SUPPRESS_DEPRECATED_START const element::Type element::undefined(element::Type_t::undefined); const element::Type element::dynamic(element::Type_t::dynamic); const element::Type element::boolean(element::Type_t::boolean); @@ -42,6 +43,7 @@ const element::Type element::u8(element::Type_t::u8); const element::Type element::u16(element::Type_t::u16); const element::Type element::u32(element::Type_t::u32); const element::Type element::u64(element::Type_t::u64); +NGRAPH_SUPPRESS_DEPRECATED_END constexpr DiscreteTypeInfo AttributeAdapter::type_info; @@ -102,26 +104,6 @@ static const element_types_map_t& get_type_info_map() return s_type_info_map; }; -std::vector element::Type::get_known_types() -{ - std::vector rc = {&element::dynamic, - &element::boolean, - &element::bf16, - &element::f16, - &element::f32, - &element::f64, - &element::i8, - &element::i16, - &element::i32, - &element::i64, - &element::u1, - &element::u8, - &element::u16, - &element::u32, - &element::u64}; - return rc; -} - element::Type::Type(size_t bitwidth, bool is_real, bool is_signed, @@ -145,6 +127,11 @@ const std::string& element::Type::c_type_string() const return get_type_info_map().at(m_type).m_cname; } +bool element::Type::operator==(const element::Type_t& other) const +{ + return m_type == other; +} + bool element::Type::operator==(const element::Type& other) const { return m_type == other.m_type; @@ -292,7 +279,7 @@ bool element::Type::is_real() const bool element::Type::is_integral_number() const { - return is_integral() && (m_type != element::boolean); + return is_integral() && (m_type != element::Type_t::boolean); } bool element::Type::is_signed() const diff --git a/ngraph/core/src/util.cpp b/ngraph/core/src/util.cpp index 5cedea190ac94e..6ab6f7aef6484f 100644 --- a/ngraph/core/src/util.cpp +++ b/ngraph/core/src/util.cpp @@ -481,7 +481,7 @@ vector read_float_vector(shared_ptr tv) vector float_vec; element::Type element_type = tv->get_element_type(); - if (element_type == element::boolean) + if (element_type == element::Type_t::boolean) { vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast @@ -491,12 +491,12 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::bf16) + else if (element_type == element::Type_t::bf16) { vector vec = read_vector(tv); float_vec = bfloat16::to_float_vector(vec); } - else if (element_type == element::f16) + else if (element_type == element::Type_t::f16) { vector vec = read_vector(tv); for (float16 value : vec) @@ -504,7 +504,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::f32) + else if (element_type == element::Type_t::f32) { vector vec = read_vector(tv); for (float value : vec) @@ -512,7 +512,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::f64) + else if (element_type == element::Type_t::f64) { vector vec = read_vector(tv); for (double value : vec) @@ -520,7 +520,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i8) + else if (element_type == element::Type_t::i8) { vector vec = read_vector(tv); for (int8_t value : vec) @@ -528,7 +528,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i16) + else if (element_type == element::Type_t::i16) { vector vec = read_vector(tv); for (int16_t value : vec) @@ -536,7 +536,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i32) + else if (element_type == element::Type_t::i32) { vector vec = read_vector(tv); for (int32_t value : vec) @@ -544,7 +544,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i64) + else if (element_type == element::Type_t::i64) { vector vec = read_vector(tv); for (int64_t value : vec) @@ -552,7 +552,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u8) + else if (element_type == element::Type_t::u8) { vector vec = read_vector(tv); for (uint8_t value : vec) @@ -560,7 +560,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u16) + else if (element_type == element::Type_t::u16) { vector vec = read_vector(tv); for (uint16_t value : vec) @@ -568,7 +568,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u32) + else if (element_type == element::Type_t::u32) { vector vec = read_vector(tv); for (uint32_t value : vec) @@ -576,7 +576,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u64) + else if (element_type == element::Type_t::u64) { vector vec = read_vector(tv); for (uint64_t value : vec) @@ -597,7 +597,7 @@ vector read_index_vector(shared_ptr tv) vector index_vec; element::Type element_type = tv->get_element_type(); - if (element_type == element::boolean) + if (element_type == element::Type_t::boolean) { vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast @@ -607,7 +607,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::bf16) + else if (element_type == element::Type_t::bf16) { vector vec = read_vector(tv); vector float_vec = bfloat16::to_float_vector(vec); @@ -616,7 +616,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::f16) + else if (element_type == element::Type_t::f16) { vector vec = read_vector(tv); for (float16 value : vec) @@ -624,7 +624,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(static_cast(value))); } } - else if (element_type == element::f32) + else if (element_type == element::Type_t::f32) { vector vec = read_vector(tv); for (float value : vec) @@ -632,7 +632,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::f64) + else if (element_type == element::Type_t::f64) { vector vec = read_vector(tv); for (double value : vec) @@ -640,7 +640,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i8) + else if (element_type == element::Type_t::i8) { vector vec = read_vector(tv); for (int8_t value : vec) @@ -648,7 +648,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i16) + else if (element_type == element::Type_t::i16) { vector vec = read_vector(tv); for (int16_t value : vec) @@ -656,7 +656,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i32) + else if (element_type == element::Type_t::i32) { vector vec = read_vector(tv); for (int32_t value : vec) @@ -664,11 +664,11 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i64) + else if (element_type == element::Type_t::i64) { index_vec = read_vector(tv); } - else if (element_type == element::u8) + else if (element_type == element::Type_t::u8) { vector vec = read_vector(tv); for (uint8_t value : vec) @@ -676,7 +676,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::u16) + else if (element_type == element::Type_t::u16) { vector vec = read_vector(tv); for (uint16_t value : vec) @@ -684,7 +684,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::u32) + else if (element_type == element::Type_t::u32) { vector vec = read_vector(tv); for (uint32_t value : vec) @@ -692,7 +692,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::u64) + else if (element_type == element::Type_t::u64) { vector vec = read_vector(tv); for (uint64_t value : vec) diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp index 67890b719b5e28..d8415d54319d2d 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp @@ -531,7 +531,7 @@ namespace ngraph return static_cast(m_tensor_proto->data_type()); } - const element::Type& get_ng_type() const + element::Type get_ng_type() const { if (!m_tensor_proto->has_data_type()) { @@ -540,29 +540,29 @@ namespace ngraph switch (m_tensor_proto->data_type()) { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: - return element::boolean; + return element::Type_t::boolean; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: - return element::f32; + return element::Type_t::f32; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: - return element::f16; + return element::Type_t::f16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: - return element::f64; + return element::Type_t::f64; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: - return element::i8; + return element::Type_t::i8; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: - return element::i16; + return element::Type_t::i16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: - return element::i32; + return element::Type_t::i32; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: - return element::i64; + return element::Type_t::i64; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: - return element::u8; + return element::Type_t::u8; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: - return element::u16; + return element::Type_t::u16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: - return element::u32; + return element::Type_t::u32; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: - return element::u64; + return element::Type_t::u64; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED: throw error::tensor::data_type_undefined{}; default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; @@ -575,29 +575,29 @@ namespace ngraph switch (m_tensor_proto->data_type()) { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: - return make_ng_constant(element::boolean); + return make_ng_constant(element::Type_t::boolean); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: - return make_ng_constant(element::f32); + return make_ng_constant(element::Type_t::f32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: - return make_ng_constant(element::f16); + return make_ng_constant(element::Type_t::f16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: - return make_ng_constant(element::f64); + return make_ng_constant(element::Type_t::f64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: - return make_ng_constant(element::i8); + return make_ng_constant(element::Type_t::i8); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: - return make_ng_constant(element::i16); + return make_ng_constant(element::Type_t::i16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: - return make_ng_constant(element::i32); + return make_ng_constant(element::Type_t::i32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: - return make_ng_constant(element::i64); + return make_ng_constant(element::Type_t::i64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: - return make_ng_constant(element::u8); + return make_ng_constant(element::Type_t::u8); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: - return make_ng_constant(element::u16); + return make_ng_constant(element::Type_t::u16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: - return make_ng_constant(element::u32); + return make_ng_constant(element::Type_t::u32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: - return make_ng_constant(element::u64); + return make_ng_constant(element::Type_t::u64); default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; } } diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/transform.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/transform.hpp index 7c77dbdca8882a..d40c8dda94e909 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/transform.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/transform.hpp @@ -54,7 +54,7 @@ namespace ngraph void expand_onnx_functions(ONNX_NAMESPACE::ModelProto& model_proto); static const std::vector legacy_ops_to_fixup = { - "DetectionOutput", "FakeQuantize", "GroupNorm", "Normalize", "PriorBox"}; + "DetectionOutput", "FakeQuantize", "GroupNorm", "Normalize", "PriorBox", "Swish"}; /// \brief Add support for models with custom operators mistakenly registered in /// "ai.onnx" domain. diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp index 1d98c16d364128..c45287d988b6c4 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp @@ -75,7 +75,7 @@ namespace ngraph const std::string& get_name() const { return m_value_info_proto->name(); } const PartialShape& get_shape() const { return m_partial_shape; } - const element::Type& get_element_type() const + element::Type get_element_type() const { if (!m_value_info_proto->type().tensor_type().has_elem_type()) { diff --git a/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp b/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp index 762d3b6c91686d..6556db9864c25b 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp @@ -43,7 +43,8 @@ namespace ngraph return {std::make_shared( data, indices, - default_opset::Constant::create(element::i64, Shape{}, {valid_axis}))}; + default_opset::Constant::create( + element::Type_t::i64, Shape{}, {valid_axis}))}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp b/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp index 079148225dcc0a..3a4ab0174a0cb1 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp @@ -33,10 +33,10 @@ namespace ngraph inline OutputVector identity(const Node& node) { auto input = node.get_ng_inputs().at(0); - if (input.get_element_type() == ngraph::element::boolean) + if (input.get_element_type() == ngraph::element::Type_t::boolean) { - const auto logic_zero = - default_opset::Constant::create(ngraph::element::boolean, {}, {false}); + const auto logic_zero = default_opset::Constant::create( + ngraph::element::Type_t::boolean, {}, {false}); return {std::make_shared(input, logic_zero)}; } const auto zero = diff --git a/ngraph/frontend/onnx_import/include/onnx_import/op/org.openvinotoolkit/swish.hpp b/ngraph/frontend/onnx_import/include/onnx_import/op/org.openvinotoolkit/swish.hpp new file mode 100644 index 00000000000000..dafd6eb6eeb468 --- /dev/null +++ b/ngraph/frontend/onnx_import/include/onnx_import/op/org.openvinotoolkit/swish.hpp @@ -0,0 +1,39 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/node.hpp" +#include "onnx_import/core/node.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace op + { + namespace set_1 + { + OutputVector swish(const Node& node); + } // namespace set_1 + + } // namespace op + + } // namespace onnx_import + +} // namespace ngraph diff --git a/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp b/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp index a0157a2c0f92ae..45ef95a7328c8d 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp @@ -37,7 +37,7 @@ namespace ngraph { namespace common { - const ngraph::element::Type& get_ngraph_element_type(std::int64_t onnx_type); + const ngraph::element::Type get_ngraph_element_type(std::int64_t onnx_type); /// \brief Return a monotonic sequence. /// diff --git a/ngraph/frontend/onnx_import/src/core/graph.cpp b/ngraph/frontend/onnx_import/src/core/graph.cpp index 7647917ebc4356..30c4f56541e5d0 100644 --- a/ngraph/frontend/onnx_import/src/core/graph.cpp +++ b/ngraph/frontend/onnx_import/src/core/graph.cpp @@ -95,9 +95,15 @@ namespace ngraph } catch (const ngraph::ngraph_error& exc) { - NGRAPH_WARN << "Could not create an nGraph Constant for initializer '" - << initializer_tensor.name() << "'. Detailed error:\n" - << exc.what(); + NGRAPH_WARN + << "\nCould not create an nGraph Constant for initializer '" + << initializer_tensor.name() << "'. \n" + << "Constant with a 0 value was created, make sure connected input is " + "optional.\n" + << "Otherwise verify if the initializer contains a correct number of " + "elements matching the initializer's shape. \n" + << "Detailed error:\n" + << exc.what(); ng_constant = default_opset::Constant::create(tensor.get_ng_type(), Shape{}, {0}); } diff --git a/ngraph/frontend/onnx_import/src/op/constant.cpp b/ngraph/frontend/onnx_import/src/op/constant.cpp index 3a1718f6154fe2..2a33e52232a48d 100644 --- a/ngraph/frontend/onnx_import/src/op/constant.cpp +++ b/ngraph/frontend/onnx_import/src/op/constant.cpp @@ -62,84 +62,84 @@ namespace ngraph inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::f16, tensor); + return __make_ng_constant(element::Type_t::f16, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::f32, tensor); + return __make_ng_constant(element::Type_t::f32, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::f64, tensor); + return __make_ng_constant(element::Type_t::f64, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i8, tensor); + return __make_ng_constant(element::Type_t::i8, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i16, tensor); + return __make_ng_constant(element::Type_t::i16, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i32, tensor); + return __make_ng_constant(element::Type_t::i32, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i64, tensor); + return __make_ng_constant(element::Type_t::i64, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u8, tensor); + return __make_ng_constant(element::Type_t::u8, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u16, tensor); + return __make_ng_constant(element::Type_t::u16, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u32, tensor); + return __make_ng_constant(element::Type_t::u32, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u64, tensor); + return __make_ng_constant(element::Type_t::u64, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::boolean, tensor); + return __make_ng_constant(element::Type_t::boolean, tensor); } inline std::shared_ptr diff --git a/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp b/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp index cf6b91f10978f1..8b33b027fd9251 100644 --- a/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp +++ b/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp @@ -39,7 +39,8 @@ namespace ngraph } else { - constant_value = default_opset::Constant::create(element::f32, {}, {0}); + constant_value = + default_opset::Constant::create(element::Type_t::f32, {}, {0}); } return {std::make_shared(constant_value, node.get_ng_inputs().at(0))}; diff --git a/ngraph/frontend/onnx_import/src/op/conv_integer.cpp b/ngraph/frontend/onnx_import/src/op/conv_integer.cpp index 76d55a15618769..e6b52ea5acb11c 100644 --- a/ngraph/frontend/onnx_import/src/op/conv_integer.cpp +++ b/ngraph/frontend/onnx_import/src/op/conv_integer.cpp @@ -63,10 +63,11 @@ namespace ngraph padding_above); const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1); - auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1); + auto scale_one = make_constant(ngraph::element::Type_t::f32, Shape{}, 1); auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0); auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0); - auto output_zero_point = make_constant(ngraph::element::i32, Shape{}, 0); + auto output_zero_point = + make_constant(ngraph::element::Type_t::i32, Shape{}, 0); if (num_inputs == 2) { @@ -84,7 +85,7 @@ namespace ngraph filters_zero_point, scale_one, output_zero_point, - ngraph::element::i32, + ngraph::element::Type_t::i32, ngraph::AxisSet{}, ngraph::AxisSet{}, ngraph::AxisSet{})}; @@ -110,7 +111,7 @@ namespace ngraph filters_zero_point, scale_one, output_zero_point, - ngraph::element::i32, + ngraph::element::Type_t::i32, ngraph::AxisSet{}, ngraph::AxisSet{}, ngraph::AxisSet{})}; diff --git a/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp b/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp index 8b7b2ea7516095..3bc7974a1a6e75 100644 --- a/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp +++ b/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp @@ -74,7 +74,7 @@ namespace ngraph data, filters, default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape), + element::Type_t::i64, Shape{output_shape.size()}, output_shape), strides, dilations, auto_pad_type, @@ -113,7 +113,7 @@ namespace ngraph data, filters, default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape), + element::Type_t::i64, Shape{output_shape.size()}, output_shape), strides, pads_begin, pads_end, @@ -144,10 +144,10 @@ namespace ngraph std::make_shared(filters); const auto filters_rank = std::make_shared(filters_shape); - const auto one_node = - default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto zero_node = - default_opset::Constant::create(element::i64, Shape{1}, {0}); + const auto one_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {1}); + const auto zero_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {0}); std::shared_ptr in_c_dim = std::make_shared( @@ -166,8 +166,8 @@ namespace ngraph std::vector{0}); // end mask // Apply shape layout transformation: - const auto groups_node = - default_opset::Constant::create(element::i64, Shape{1}, {groups}); + const auto groups_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {groups}); in_c_dim = std::make_shared(in_c_dim, groups_node); @@ -192,7 +192,7 @@ namespace ngraph new_bias_shape[1] = conv_pshape[1].get_length(); bias_shape_node = default_opset::Constant::create( - element::i64, Shape{new_bias_shape.size()}, new_bias_shape); + element::Type_t::i64, Shape{new_bias_shape.size()}, new_bias_shape); } else { @@ -201,10 +201,10 @@ namespace ngraph std::make_shared(conv_shape); // Prepare new bias shape base: [1, 1, 1, 1, ... ] - const auto one_node = - default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto two_node = - default_opset::Constant::create(element::i64, Shape{1}, {2}); + const auto one_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {1}); + const auto two_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {2}); const auto remaining_shape_length = std::make_shared(conv_rank, two_node); const auto remaining_bias_shape_ones = diff --git a/ngraph/frontend/onnx_import/src/op/cum_sum.cpp b/ngraph/frontend/onnx_import/src/op/cum_sum.cpp index 3397f666b96a53..06337928d74467 100644 --- a/ngraph/frontend/onnx_import/src/op/cum_sum.cpp +++ b/ngraph/frontend/onnx_import/src/op/cum_sum.cpp @@ -41,8 +41,8 @@ namespace ngraph } else { - axis = - default_opset::Constant::create(element::i64, Shape{}, {0}); // default + axis = default_opset::Constant::create( + element::Type_t::i64, Shape{}, {0}); // default } return OutputVector{ std::make_shared(data, axis, exclusive, reverse)}; diff --git a/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp b/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp index cbe0c49e529f93..9ea2340ba0334e 100644 --- a/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp +++ b/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp @@ -41,17 +41,17 @@ namespace ngraph { auto zero_point = inputs[2]; - if (zero_point.get_element_type() != element::f32) + if (zero_point.get_element_type() != element::Type_t::f32) { - zero_point = - std::make_shared(zero_point, element::f32); + zero_point = std::make_shared( + zero_point, element::Type_t::f32); } return zero_point; } else { - return default_opset::Constant::create(element::f32, Shape{}, {0}); + return default_opset::Constant::create(element::Type_t::f32, Shape{}, {0}); } } } @@ -70,12 +70,13 @@ namespace ngraph const auto scale = inputs[1]; const auto zero_point = get_zero_point(inputs); - common::validate_scalar_input( - "Dequantization scale", scale.get_node_shared_ptr(), {element::f32}); + common::validate_scalar_input("Dequantization scale", + scale.get_node_shared_ptr(), + {element::Type_t::f32}); common::validate_scalar_input("Zero point", zero_point.get_node_shared_ptr()); const auto converted_x = - std::make_shared(x, element::f32); + std::make_shared(x, element::Type_t::f32); return {std::make_shared( std::make_shared(converted_x, zero_point), scale)}; @@ -163,7 +164,7 @@ namespace ngraph } const auto target_shape = default_opset::Constant::create( - element::i64, Shape{target_dims.size()}, target_dims); + element::Type_t::i64, Shape{target_dims.size()}, target_dims); return std::make_shared(input, target_shape, true); } @@ -198,7 +199,7 @@ namespace ngraph zero_point = reshape_input(zero_point, axis, x_shape); const auto converted_x = - std::make_shared(x, element::f32); + std::make_shared(x, element::Type_t::f32); return {std::make_shared( std::make_shared(converted_x, zero_point), scale)}; diff --git a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp index 30b6d4b317f6b0..8b68552de159e2 100644 --- a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp @@ -57,7 +57,7 @@ namespace ngraph auto reduce_axes_vector = std::vector(data_spatial_rank); std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); auto reduce_axes = default_opset::Constant::create( - element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector); return {std::make_shared(data, reduce_axes, true)}; } diff --git a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp index 53af9d601142c3..9b92ee22eb9758 100644 --- a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp @@ -57,7 +57,7 @@ namespace ngraph auto reduce_axes_vector = std::vector(data_spatial_rank); std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); auto reduce_axes = default_opset::Constant::create( - element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector); return {std::make_shared(data, reduce_axes, true)}; } diff --git a/ngraph/frontend/onnx_import/src/op/hardmax.cpp b/ngraph/frontend/onnx_import/src/op/hardmax.cpp index 0f4ea157b5875d..9baf0dcfe76c8f 100644 --- a/ngraph/frontend/onnx_import/src/op/hardmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/hardmax.cpp @@ -50,22 +50,22 @@ namespace ngraph std::make_shared(coerced_tensor); Output row_size = std::make_shared( coerced_tensor_shape, - default_opset::Constant::create(element::i64, {1}, {1}), - default_opset::Constant::create(element::i64, {}, {0})); + default_opset::Constant::create(element::Type_t::i64, {1}, {1}), + default_opset::Constant::create(element::Type_t::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); const auto indices_axis = 1; const auto topk = std::make_shared( coerced_tensor, - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}), + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}), indices_axis, default_opset::TopK::Mode::MAX, default_opset::TopK::SortType::NONE); const auto on_value = - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}); const auto off_value = - default_opset::Constant::create(ngraph::element::i64, Shape{}, {0}); + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {0}); const auto results = std::make_shared( topk->output(1), row_size, on_value, off_value, indices_axis); diff --git a/ngraph/frontend/onnx_import/src/op/instance_norm.cpp b/ngraph/frontend/onnx_import/src/op/instance_norm.cpp index 9516ea52a9ae5e..4a1ca7aba6ac1a 100644 --- a/ngraph/frontend/onnx_import/src/op/instance_norm.cpp +++ b/ngraph/frontend/onnx_import/src/op/instance_norm.cpp @@ -99,7 +99,7 @@ namespace ngraph if (data_pshape.is_static()) { data_shape_node = std::make_shared( - element::i64, + element::Type_t::i64, Shape{static_cast(data_pshape.rank().get_length())}, data_pshape.to_shape()); } @@ -112,11 +112,13 @@ namespace ngraph scale = std::make_shared( scale, data_shape_node, - std::make_shared(element::i64, Shape{1}, 1)); + std::make_shared( + element::Type_t::i64, Shape{1}, 1)); bias = std::make_shared( bias, data_shape_node, - std::make_shared(element::i64, Shape{1}, 1)); + std::make_shared( + element::Type_t::i64, Shape{1}, 1)); // scale * mvn + bias std::shared_ptr result = diff --git a/ngraph/frontend/onnx_import/src/op/log_softmax.cpp b/ngraph/frontend/onnx_import/src/op/log_softmax.cpp index c19ca2b86c0d4e..a083613e2be5fd 100644 --- a/ngraph/frontend/onnx_import/src/op/log_softmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/log_softmax.cpp @@ -32,7 +32,8 @@ namespace ngraph { const auto coerced_data = ngraph::builder::opset1::flatten(data, axis); - const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1}); + const auto axis_1 = + default_opset::Constant::create(element::Type_t::i64, Shape{1}, {1}); const auto max = std::make_shared(coerced_data, axis_1, true); diff --git a/ngraph/frontend/onnx_import/src/op/loop.cpp b/ngraph/frontend/onnx_import/src/op/loop.cpp index 2039b12b46ff65..519b28c6318efe 100644 --- a/ngraph/frontend/onnx_import/src/op/loop.cpp +++ b/ngraph/frontend/onnx_import/src/op/loop.cpp @@ -62,7 +62,7 @@ namespace ngraph ->input_value(1) .get_node_shared_ptr(); if (ngraph::op::is_constant(second_input) && - second_input->get_element_type() == element::boolean && + second_input->get_element_type() == element::Type_t::boolean && as_type_ptr(second_input) ->cast_vector() .at(0) == false) @@ -90,7 +90,8 @@ namespace ngraph if (ngraph::op::is_null(ng_inputs.at(0))) // trip count skipped { // -1 means infinite Loop - trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1}); + trip_count = + ngraph::op::Constant::create(ngraph::element::Type_t::i64, {1}, {-1}); } else { @@ -102,8 +103,8 @@ namespace ngraph if (ngraph::op::is_null( ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped { - termination_cond = - ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); + termination_cond = ngraph::op::Constant::create( + ngraph::element::Type_t::boolean, {1}, {true}); } else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && as_type_ptr( @@ -130,8 +131,8 @@ namespace ngraph } const int64_t concat_axis = 0; - const auto concat_axis_const = - ngraph::op::Constant::create(ngraph::element::i64, {1}, {concat_axis}); + const auto concat_axis_const = ngraph::op::Constant::create( + ngraph::element::Type_t::i64, {1}, {concat_axis}); // provide scalar handing for scan outputs for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size(); ++i) @@ -149,8 +150,8 @@ namespace ngraph // optimization allow to improve nG Loop shape inference if (is_termination_condition_always_true(body_loop_out_cond)) { - body_outputs[0] = - ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); + body_outputs[0] = ngraph::op::Constant::create( + ngraph::element::Type_t::boolean, {1}, {true}); } CHECK_VALID_NODE(node, diff --git a/ngraph/frontend/onnx_import/src/op/lp_norm.cpp b/ngraph/frontend/onnx_import/src/op/lp_norm.cpp index 3bdd9a71a673fa..11f941fcf136c0 100644 --- a/ngraph/frontend/onnx_import/src/op/lp_norm.cpp +++ b/ngraph/frontend/onnx_import/src/op/lp_norm.cpp @@ -58,12 +58,14 @@ namespace ngraph "Only normalization of 1st or 2nd order is supported."); const auto normalize_axis_const = - default_opset::Constant::create(element::i64, {}, {normalize_axis}); + default_opset::Constant::create(element::Type_t::i64, {}, {normalize_axis}); std::shared_ptr norm = ngraph::builder::opset1::lp_norm( data, normalize_axis_const, static_cast(p_norm)); - const auto target_shape = default_opset::Constant::create( - element::i64, Shape{size_t(data_rank_value)}, data_shape.to_shape()); + const auto target_shape = + default_opset::Constant::create(element::Type_t::i64, + Shape{size_t(data_rank_value)}, + data_shape.to_shape()); // Create a default axes order matching the data tensor rank and erase the // element at the 'normalize_axis' position. The erased element indicates the @@ -74,7 +76,7 @@ namespace ngraph axes_values.erase(axes_values.begin() + normalize_axis); const auto axes_mapping = default_opset::Constant::create( - element::i64, Shape{axes_values.size()}, axes_values); + element::Type_t::i64, Shape{axes_values.size()}, axes_values); norm = std::make_shared( norm, target_shape, axes_mapping); diff --git a/ngraph/frontend/onnx_import/src/op/lp_pool.cpp b/ngraph/frontend/onnx_import/src/op/lp_pool.cpp index aa7337572d0dd4..65ab066240cc24 100644 --- a/ngraph/frontend/onnx_import/src/op/lp_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/lp_pool.cpp @@ -75,7 +75,7 @@ namespace ngraph output_shape.at(0) = data_shape[0].get_length(); const auto reshape_pattern = default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape); + element::Type_t::i64, Shape{output_shape.size()}, output_shape); slice = std::make_shared(slice, reshape_pattern, false); diff --git a/ngraph/frontend/onnx_import/src/op/lstm.cpp b/ngraph/frontend/onnx_import/src/op/lstm.cpp index c67b260e78c16e..e53963ba0b5e89 100644 --- a/ngraph/frontend/onnx_import/src/op/lstm.cpp +++ b/ngraph/frontend/onnx_import/src/op/lstm.cpp @@ -211,7 +211,7 @@ namespace ngraph m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = default_opset::Constant::create( - element::i32, + element::Type_t::i32, Shape{m_dim_map[LSTMInputDimension::BATCH_SIZE]}, std::vector( m_dim_map[LSTMInputDimension::BATCH_SIZE], diff --git a/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp b/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp index b41b409a13677f..3a3383913283ca 100644 --- a/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp +++ b/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp @@ -49,7 +49,7 @@ namespace ngraph else { max_output_boxes_per_class = - default_opset::Constant::create(element::i64, Shape{}, {0}); + default_opset::Constant::create(element::Type_t::i64, Shape{}, {0}); } Output iou_threshold; @@ -61,7 +61,7 @@ namespace ngraph else { iou_threshold = - default_opset::Constant::create(element::f32, Shape{}, {.0f}); + default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f}); } Output score_threshold; @@ -73,7 +73,7 @@ namespace ngraph else { score_threshold = - default_opset::Constant::create(element::f32, Shape{}, {.0f}); + default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f}); } const auto center_point_box = diff --git a/ngraph/frontend/onnx_import/src/op/non_zero.cpp b/ngraph/frontend/onnx_import/src/op/non_zero.cpp index 2c96ec1c106326..e72b5da9208c0e 100644 --- a/ngraph/frontend/onnx_import/src/op/non_zero.cpp +++ b/ngraph/frontend/onnx_import/src/op/non_zero.cpp @@ -30,7 +30,7 @@ namespace ngraph OutputVector non_zero(const Node& node) { const auto data = node.get_ng_inputs().at(0); - return {std::make_shared(data, element::i64)}; + return {std::make_shared(data, element::Type_t::i64)}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/src/op/onehot.cpp b/ngraph/frontend/onnx_import/src/op/onehot.cpp index 229b4ed6d90485..018b4d99fbe060 100644 --- a/ngraph/frontend/onnx_import/src/op/onehot.cpp +++ b/ngraph/frontend/onnx_import/src/op/onehot.cpp @@ -32,13 +32,14 @@ namespace ngraph OutputVector onehot(const Node& node) { OutputVector inputs{node.get_ng_inputs()}; - auto indices = - std::make_shared(inputs.at(0), element::i64); + auto indices = std::make_shared(inputs.at(0), + element::Type_t::i64); auto depth = reshape::interpret_as_scalar(inputs.at(1)); // Rank 1 tensor containing exactly two elements: [off_value, on_value] auto values = inputs.at(2); - auto split_axis = default_opset::Constant::create(element::i64, {}, {0}); + auto split_axis = + default_opset::Constant::create(element::Type_t::i64, {}, {0}); auto off_on_values = std::make_shared(values, split_axis, 2); auto off_value = reshape::interpret_as_scalar(off_on_values->output(0)); diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp index bdc0294d92fa86..225c6ddc1f52f1 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp @@ -55,13 +55,13 @@ namespace ngraph new_shape.push_back(shape[i]); } return default_opset::Constant::create( - element::i64, Shape{new_shape.size()}, new_shape); + element::Type_t::i64, Shape{new_shape.size()}, new_shape); } auto shape = std::make_shared(data); auto splits = builder::opset1::split(shape, rank_size); - auto num_groups_const = - default_opset::Constant::create(element::i64, Shape{1}, {num_groups}); + auto num_groups_const = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {num_groups}); NodeVector new_shape{ splits[0].get_node_shared_ptr(), num_groups_const, @@ -98,7 +98,7 @@ namespace ngraph { auto shape = data_pshape.to_shape(); data_shape_node = default_opset::Constant::create( - element::u64, Shape{shape.size()}, shape); + element::Type_t::u64, Shape{shape.size()}, shape); } else { diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp index 226658d7f55e82..ffec771b1421fc 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp @@ -66,7 +66,7 @@ namespace ngraph weights_shape.push_back(1); } auto new_shape = std::make_shared( - element::i64, Shape{weights_shape.size()}, weights_shape); + element::Type_t::i64, Shape{weights_shape.size()}, weights_shape); weights = std::make_shared(inputs[1], new_shape, true); } @@ -75,7 +75,7 @@ namespace ngraph if (!across_spatial) { axes = std::make_shared( - element::i64, Shape{1}, std::vector{1}); + element::Type_t::i64, Shape{1}, std::vector{1}); } else { diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp index 33ae3dc25a4b97..222e84cf598b36 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp @@ -36,9 +36,9 @@ namespace ngraph return std::make_shared( node, default_opset::Constant::create( - element::i64, Shape{1}, std::vector{start}), + element::Type_t::i64, Shape{1}, std::vector{start}), default_opset::Constant::create( - element::i64, Shape{1}, std::vector{end}), + element::Type_t::i64, Shape{1}, std::vector{end}), std::vector{0}, // begin mask std::vector{0}); // end mask } @@ -75,7 +75,7 @@ namespace ngraph attrs.density = node.get_attribute_value>("density", {}); auto axes = default_opset::Constant::create( - element::i64, Shape{1}, std::vector{0}); + element::Type_t::i64, Shape{1}, std::vector{0}); return {std::make_shared( std::make_shared( diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp new file mode 100644 index 00000000000000..0856d25b223314 --- /dev/null +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp @@ -0,0 +1,55 @@ +//***************************************************************************** +// Copyright 2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "ngraph/op/normalize_l2.hpp" +#include "onnx_import/default_opset.hpp" +#include "onnx_import/default_opset.hpp" +#include "onnx_import/op/org.openvinotoolkit/normalize.hpp" +#include "onnx_import/utils/common.hpp" +#include "onnx_import/utils/reshape.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace op + { + namespace set_1 + { + OutputVector swish(const Node& node) + { + OutputVector ng_inputs{node.get_ng_inputs()}; + + Output beta; + if (ng_inputs.size() > 1) + { + beta = ngraph::onnx_import::reshape::interpret_as_scalar(ng_inputs.at(1)); + } + else + { + beta = + default_opset::Constant::create(element::Type_t::f32, Shape{}, {1.0}); + } + + return {std::make_shared(ng_inputs.at(0), beta)}; + } + + } // namespace set_1 + } // namespace op + + } // namespace onnx_import + +} // namespace ngraph diff --git a/ngraph/frontend/onnx_import/src/op/pad.cpp b/ngraph/frontend/onnx_import/src/op/pad.cpp index 2143d194f4af2e..73b264893171fe 100644 --- a/ngraph/frontend/onnx_import/src/op/pad.cpp +++ b/ngraph/frontend/onnx_import/src/op/pad.cpp @@ -83,9 +83,13 @@ namespace ngraph return {std::make_shared( data, std::make_shared( - element::i64, ngraph::Shape{padding_below.size()}, padding_below), + element::Type_t::i64, + ngraph::Shape{padding_below.size()}, + padding_below), std::make_shared( - element::i64, ngraph::Shape{padding_above.size()}, padding_above), + element::Type_t::i64, + ngraph::Shape{padding_above.size()}, + padding_above), std::make_shared( data.get_element_type(), ngraph::Shape{}, std::vector{value}), pad_mode)}; @@ -125,20 +129,20 @@ namespace ngraph pads_vector.begin() + half_size, pads_vector.end()); padding_begin = default_opset::Constant::create( - element::i64, ngraph::Shape{half_size}, padding_begin_values); + element::Type_t::i64, ngraph::Shape{half_size}, padding_begin_values); padding_end = default_opset::Constant::create( - element::i64, ngraph::Shape{half_size}, padding_end_values); + element::Type_t::i64, ngraph::Shape{half_size}, padding_end_values); } else { - auto axis = - default_opset::Constant::create(element::i64, ngraph::Shape{}, {0}); + auto axis = default_opset::Constant::create( + element::Type_t::i64, ngraph::Shape{}, {0}); OutputVector padding = builder::opset1::split(pads, 2, 0); - padding_begin = - std::make_shared(padding.at(0), element::i64); - padding_end = - std::make_shared(padding.at(1), element::i64); + padding_begin = std::make_shared( + padding.at(0), element::Type_t::i64); + padding_end = std::make_shared( + padding.at(1), element::Type_t::i64); } const std::string mode = diff --git a/ngraph/frontend/onnx_import/src/op/quant_conv.cpp b/ngraph/frontend/onnx_import/src/op/quant_conv.cpp index a7478b9a8dc658..042679e0c21bf6 100644 --- a/ngraph/frontend/onnx_import/src/op/quant_conv.cpp +++ b/ngraph/frontend/onnx_import/src/op/quant_conv.cpp @@ -69,15 +69,15 @@ namespace ngraph const Output& bias = nullptr) { ngraph::element::Type output_type; - if (data.get_element_type() == ngraph::element::u8 && - filters.get_element_type() == ngraph::element::i8) + if (data.get_element_type() == ngraph::element::Type_t::u8 && + filters.get_element_type() == ngraph::element::Type_t::i8) { - output_type = ngraph::element::i8; + output_type = ngraph::element::Type_t::i8; } - else if (data.get_element_type() == ngraph::element::u8 && - filters.get_element_type() == ngraph::element::u8) + else if (data.get_element_type() == ngraph::element::Type_t::u8 && + filters.get_element_type() == ngraph::element::Type_t::u8) { - output_type = ngraph::element::u8; + output_type = ngraph::element::Type_t::u8; } if (groups > 1) { diff --git a/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp b/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp index 4115b9c62bb793..5f4126f667da82 100644 --- a/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp +++ b/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp @@ -48,7 +48,7 @@ namespace ngraph else { return std::make_shared( - element::u8, Shape{1}, std::uint8_t(0)); + element::Type_t::u8, Shape{1}, std::uint8_t(0)); } } @@ -59,7 +59,8 @@ namespace ngraph CHECK_VALID_NODE( onnx_node, y_zero_point_et.is_static() && - (y_zero_point_et == element::u8 || y_zero_point_et == element::i8), + (y_zero_point_et == element::Type_t::u8 || + y_zero_point_et == element::Type_t::i8), "\"y_zero_point\" input data type must be static and of 8-bit " "integer type."); } @@ -72,9 +73,10 @@ namespace ngraph CHECK_VALID_NODE(onnx_node, y_scale_et.is_static(), "\"y_scale\" input data type must be static."); - if (y_scale_et != element::f32) + if (y_scale_et != element::Type_t::f32) { - return std::make_shared(y_scale, element::f32); + return std::make_shared(y_scale, + element::Type_t::f32); } return y_scale; } @@ -87,9 +89,10 @@ namespace ngraph data_et.is_static(), "\"x\" input data type must be static."); - if (data_et != element::f32) + if (data_et != element::Type_t::f32) { - return std::make_shared(data, element::f32); + return std::make_shared(data, + element::Type_t::f32); } return data; } @@ -101,7 +104,7 @@ namespace ngraph std::shared_ptr output_low; std::shared_ptr output_high; - if (destination_type == element::i8) + if (destination_type == element::Type_t::i8) { output_low = std::make_shared( data_type, Shape{1}, -128); diff --git a/ngraph/frontend/onnx_import/src/op/reduce.cpp b/ngraph/frontend/onnx_import/src/op/reduce.cpp index 28058c697e2f9d..9ee53014cf3479 100644 --- a/ngraph/frontend/onnx_import/src/op/reduce.cpp +++ b/ngraph/frontend/onnx_import/src/op/reduce.cpp @@ -61,7 +61,7 @@ namespace ngraph } return default_opset::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes); + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes); } template diff --git a/ngraph/frontend/onnx_import/src/op/reshape.cpp b/ngraph/frontend/onnx_import/src/op/reshape.cpp index df893c954e8585..83e84ad78d45a4 100644 --- a/ngraph/frontend/onnx_import/src/op/reshape.cpp +++ b/ngraph/frontend/onnx_import/src/op/reshape.cpp @@ -51,7 +51,7 @@ namespace ngraph node.get_attribute_value>("shape", {}); pattern = default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape); + element::Type_t::i64, Shape{output_shape.size()}, output_shape); } return {std::make_shared(data, pattern, true)}; diff --git a/ngraph/frontend/onnx_import/src/op/resize.cpp b/ngraph/frontend/onnx_import/src/op/resize.cpp index ff288d82d3fa42..d84084b833e7e1 100644 --- a/ngraph/frontend/onnx_import/src/op/resize.cpp +++ b/ngraph/frontend/onnx_import/src/op/resize.cpp @@ -166,7 +166,7 @@ namespace ngraph std::floor(data_static_shape.at(i) * scales_vector.at(i))); } auto output_shape_const = default_opset::Constant::create( - element::u64, Shape({output_shape.size()}), output_shape); + element::Type_t::u64, Shape({output_shape.size()}), output_shape); return output_shape_const; } @@ -175,8 +175,8 @@ namespace ngraph std::make_shared(data), scales.get_element_type()); const auto multiply = std::make_shared(shape_of_data, scales); - const auto output_shape = - std::make_shared(multiply, ngraph::element::i64); + const auto output_shape = std::make_shared( + multiply, ngraph::element::Type_t::i64); return output_shape; } @@ -207,19 +207,20 @@ namespace ngraph scales.push_back(scale); } auto scales_const = default_opset::Constant::create( - element::f32, Shape({scales.size()}), scales); + element::Type_t::f32, Shape({scales.size()}), scales); return scales_const; } const auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); - const auto converted_sizes = - std::make_shared(sizes, ngraph::element::f32); + std::make_shared(data), + ngraph::element::Type_t::f32); + const auto converted_sizes = std::make_shared( + sizes, ngraph::element::Type_t::f32); const auto divide = std::make_shared(converted_sizes, shape_of_data); const auto eps_node = std::make_shared( - ngraph::element::f32, Shape{}, epsilon); + ngraph::element::Type_t::f32, Shape{}, epsilon); const auto scales = std::make_shared(divide, eps_node); return scales; diff --git a/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp b/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp index ad61af22bdaf4a..1f7d45ae6e0f25 100644 --- a/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp +++ b/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp @@ -38,7 +38,7 @@ namespace ngraph const auto sequence_lengths = node.get_ng_inputs().at(1); // nGraph supports only int32 type of sequence_lengths const auto sequence_lengths_i32 = std::make_shared( - node.get_ng_inputs().at(1), element::i32); + node.get_ng_inputs().at(1), element::Type_t::i32); const auto data_rank = data.get_partial_shape().rank(); const auto batch_axis = node.get_attribute_value("batch_axis", 1); diff --git a/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp b/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp index 984c6f1b9a8ed7..601429508089ea 100644 --- a/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp +++ b/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp @@ -36,7 +36,7 @@ namespace ngraph const auto axis = node.get_attribute_value("axis", 0); const auto axis_node = - default_opset::Constant::create(element::i64, Shape{}, {axis}); + default_opset::Constant::create(element::Type_t::i64, Shape{}, {axis}); return {std::make_shared( data, indices, updates, axis_node)}; diff --git a/ngraph/frontend/onnx_import/src/op/shape.cpp b/ngraph/frontend/onnx_import/src/op/shape.cpp index c02df889f9a563..f6643a972e1fee 100644 --- a/ngraph/frontend/onnx_import/src/op/shape.cpp +++ b/ngraph/frontend/onnx_import/src/op/shape.cpp @@ -39,7 +39,7 @@ namespace ngraph { const auto static_data_shape = data_shape.to_shape(); - return {default_opset::Constant::create(ngraph::element::i64, + return {default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{static_data_shape.size()}, static_data_shape)}; } diff --git a/ngraph/frontend/onnx_import/src/op/size.cpp b/ngraph/frontend/onnx_import/src/op/size.cpp index b1331f3c3af124..1c892087489f9b 100644 --- a/ngraph/frontend/onnx_import/src/op/size.cpp +++ b/ngraph/frontend/onnx_import/src/op/size.cpp @@ -38,7 +38,7 @@ namespace ngraph static_cast(shape_size(data.get_shape()))}; return {std::make_shared( - ngraph::element::i64, + ngraph::element::Type_t::i64, Shape{}, std::vector{tensor_elements_count})}; } diff --git a/ngraph/frontend/onnx_import/src/op/slice.cpp b/ngraph/frontend/onnx_import/src/op/slice.cpp index 20478b523419af..20ae2a65e2160f 100644 --- a/ngraph/frontend/onnx_import/src/op/slice.cpp +++ b/ngraph/frontend/onnx_import/src/op/slice.cpp @@ -139,15 +139,16 @@ namespace ngraph // expected_output_shape: {3, 3, 1, 1} OutputVector adjusted_indices(slice_indices_length); std::vector target_axes(axes); - const auto gather_axis = default_opset::Constant::create(element::i64, {}, {0}); + const auto gather_axis = + default_opset::Constant::create(element::Type_t::i64, {}, {0}); int added_indices_number = 0; for (int i = 0; i < slice_indices_length; ++i) { if (std::find(std::begin(axes), std::end(axes), i) == axes.end()) { - adjusted_indices[i] = - default_opset::Constant::create(element::i64, {1}, {fill_in_value}); + adjusted_indices[i] = default_opset::Constant::create( + element::Type_t::i64, {1}, {fill_in_value}); target_axes.insert(std::next(target_axes.begin(), i), i); ++added_indices_number; } @@ -156,7 +157,7 @@ namespace ngraph adjusted_indices[i] = std::make_shared( indices, default_opset::Constant::create( - element::i64, {1}, {i - added_indices_number}), + element::Type_t::i64, {1}, {i - added_indices_number}), gather_axis); } } @@ -202,7 +203,7 @@ namespace ngraph "Data rank must be static when axes input is not provided"); const size_t data_rank_value = data_rank.get_length(); axes = default_opset::Constant::create( - element::i64, + element::Type_t::i64, {data_rank_value}, common::get_monotonic_range(data_rank_value)); } @@ -225,7 +226,7 @@ namespace ngraph else { steps = default_opset::Constant::create( - element::i64, + element::Type_t::i64, {slice_indices_length}, std::vector(slice_indices_length, 1)); } @@ -252,9 +253,9 @@ namespace ngraph std::shared_ptr starts = std::make_shared( - element::i64, Shape{starts_atr.size()}, starts_atr); + element::Type_t::i64, Shape{starts_atr.size()}, starts_atr); std::shared_ptr ends = std::make_shared( - element::i64, Shape{ends_atr.size()}, ends_atr); + element::Type_t::i64, Shape{ends_atr.size()}, ends_atr); auto axes = node.get_attribute_value>( "axes", std::vector()); @@ -277,7 +278,7 @@ namespace ngraph const auto begin_end_mask = axes_to_mask(normalized_axes, slice_indices_length); std::shared_ptr strides = default_opset::Constant::create( - element::i64, + element::Type_t::i64, Shape{slice_indices_length}, std::vector(slice_indices_length, 1)); diff --git a/ngraph/frontend/onnx_import/src/op/softmax.cpp b/ngraph/frontend/onnx_import/src/op/softmax.cpp index 87c7e5192f7521..24daa2cd1c66b1 100644 --- a/ngraph/frontend/onnx_import/src/op/softmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/softmax.cpp @@ -32,7 +32,8 @@ namespace ngraph { const auto coerced_data = ngraph::builder::opset1::flatten(data, axis); - const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1}); + const auto axis_1 = + default_opset::Constant::create(element::Type_t::i64, Shape{1}, {1}); const auto max = std::make_shared(coerced_data, axis_1, true); diff --git a/ngraph/frontend/onnx_import/src/op/squeeze.cpp b/ngraph/frontend/onnx_import/src/op/squeeze.cpp index 035f5902957d70..8dc6ac87b00ca0 100644 --- a/ngraph/frontend/onnx_import/src/op/squeeze.cpp +++ b/ngraph/frontend/onnx_import/src/op/squeeze.cpp @@ -39,7 +39,7 @@ namespace ngraph std::vector normalized_axes = ngraph::normalize_axes(node.get_description(), axes, data_rank); auto axes_node = std::make_shared( - element::u64, Shape{normalized_axes.size()}, normalized_axes); + element::Type_t::u64, Shape{normalized_axes.size()}, normalized_axes); return {std::make_shared(data, axes_node)}; } diff --git a/ngraph/frontend/onnx_import/src/op/tile.cpp b/ngraph/frontend/onnx_import/src/op/tile.cpp index e14af18e72629b..2d9faa381c73fb 100644 --- a/ngraph/frontend/onnx_import/src/op/tile.cpp +++ b/ngraph/frontend/onnx_import/src/op/tile.cpp @@ -35,7 +35,8 @@ namespace ngraph // Workaround for backends which require repeats to be i64. // Remove the following line when no longer needed. - repeats = std::make_shared(repeats, element::i64); + repeats = + std::make_shared(repeats, element::Type_t::i64); return {std::make_shared(input, repeats)}; } diff --git a/ngraph/frontend/onnx_import/src/op/topk.cpp b/ngraph/frontend/onnx_import/src/op/topk.cpp index 8dfb1ecb4ecc1e..3267b97f479fba 100644 --- a/ngraph/frontend/onnx_import/src/op/topk.cpp +++ b/ngraph/frontend/onnx_import/src/op/topk.cpp @@ -63,7 +63,8 @@ namespace ngraph { auto data = node.get_ng_inputs().at(0); std::int64_t k{node.get_attribute_value("k")}; - auto k_node = default_opset::Constant::create(element::i64, Shape{}, {k}); + auto k_node = + default_opset::Constant::create(element::Type_t::i64, Shape{}, {k}); auto axis = get_axis(node); std::shared_ptr top_k = std::make_shared( @@ -72,7 +73,7 @@ namespace ngraph axis, default_opset::TopK::Mode::MAX, default_opset::TopK::SortType::SORT_VALUES, - element::i64); + element::Type_t::i64); return {top_k->output(0), top_k->output(1)}; } @@ -92,7 +93,7 @@ namespace ngraph axis, default_opset::TopK::Mode::MAX, default_opset::TopK::SortType::SORT_VALUES, - element::i64); + element::Type_t::i64); return {top_k->output(0), top_k->output(1)}; } @@ -120,7 +121,7 @@ namespace ngraph : default_opset::TopK::Mode::MIN; std::shared_ptr top_k = std::make_shared( - data, k, axis, mode, sort_type, element::i64); + data, k, axis, mode, sort_type, element::Type_t::i64); return {top_k->output(0), top_k->output(1)}; } diff --git a/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp b/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp index ba2a64778e8648..150dd5684db8ec 100644 --- a/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp +++ b/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp @@ -35,7 +35,7 @@ namespace ngraph auto data = node.get_ng_inputs().at(0); auto axes = node.get_attribute_value>("axes", {}); auto axes_node = std::make_shared( - element::i64, Shape{axes.size()}, axes); + element::Type_t::i64, Shape{axes.size()}, axes); return {std::make_shared(data, axes_node)}; } diff --git a/ngraph/frontend/onnx_import/src/op/upsample.cpp b/ngraph/frontend/onnx_import/src/op/upsample.cpp index ff749771b97997..5c635d71501c9f 100644 --- a/ngraph/frontend/onnx_import/src/op/upsample.cpp +++ b/ngraph/frontend/onnx_import/src/op/upsample.cpp @@ -111,24 +111,26 @@ namespace ngraph std::floor(data_static_shape.at(i) * scales.at(i))); } auto output_shape_const = default_opset::Constant::create( - element::u64, Shape({output_shape.size()}), output_shape); + element::Type_t::u64, Shape({output_shape.size()}), output_shape); const auto scales_const = default_opset::Constant::create( - ngraph::element::f32, Shape({scales.size()}), scales); + ngraph::element::Type_t::f32, Shape({scales.size()}), scales); return {std::make_shared( data, output_shape_const, scales_const, attrs)}; } const auto scales_const = default_opset::Constant::create( - ngraph::element::f32, Shape({scales.size()}), scales); + ngraph::element::Type_t::f32, Shape({scales.size()}), scales); auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); + std::make_shared(data), + ngraph::element::Type_t::f32); auto multiply = std::make_shared(shape_of_data, scales_const); auto output_shape = std::make_shared( - std::make_shared(multiply), ngraph::element::i64); + std::make_shared(multiply), + ngraph::element::Type_t::i64); return {std::make_shared( data, output_shape, scales_const, attrs)}; @@ -188,18 +190,20 @@ namespace ngraph std::floor(data_static_shape.at(i) * scales_vector.at(i))); } auto output_shape_const = default_opset::Constant::create( - element::u64, Shape({output_shape.size()}), output_shape); + element::Type_t::u64, Shape({output_shape.size()}), output_shape); return {std::make_shared( data, output_shape_const, scales, attrs)}; } auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); + std::make_shared(data), + ngraph::element::Type_t::f32); auto multiply = std::make_shared(shape_of_data, scales); auto output_shape = std::make_shared( - std::make_shared(multiply), ngraph::element::i64); + std::make_shared(multiply), + ngraph::element::Type_t::i64); return {std::make_shared( data, output_shape, scales, attrs)}; diff --git a/ngraph/frontend/onnx_import/src/ops_bridge.cpp b/ngraph/frontend/onnx_import/src/ops_bridge.cpp index 14481a79458752..7c113f0a0c9c0f 100644 --- a/ngraph/frontend/onnx_import/src/ops_bridge.cpp +++ b/ngraph/frontend/onnx_import/src/ops_bridge.cpp @@ -148,6 +148,7 @@ #include "onnx_import/op/org.openvinotoolkit/group_norm.hpp" #include "onnx_import/op/org.openvinotoolkit/normalize.hpp" #include "onnx_import/op/org.openvinotoolkit/prior_box.hpp" +#include "onnx_import/op/org.openvinotoolkit/swish.hpp" namespace ngraph { @@ -457,6 +458,7 @@ namespace ngraph REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "GroupNorm", 1, group_norm); REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "Normalize", 1, normalize); REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "PriorBox", 1, prior_box); + REGISTER_OPERATOR_WITH_DOMAIN(OPENVINO_ONNX_DOMAIN, "Swish", 1, swish); } #undef REGISTER_OPERATOR diff --git a/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp b/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp index c8695011ea9b65..ef7649e41a164c 100644 --- a/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp +++ b/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp @@ -45,20 +45,22 @@ namespace ngraph ArgMinMaxFactory::make_topk_subgraph(default_opset::TopK::Mode mode) const { const auto k_node = - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}); const auto topk = std::make_shared( m_input_node, k_node, m_axis, mode, default_opset::TopK::SortType::NONE); if (m_keep_dims == 0) { - const auto axis_to_remove = - default_opset::Constant::create(element::u64, Shape{}, {topk->get_axis()}); + const auto axis_to_remove = default_opset::Constant::create( + element::Type_t::u64, Shape{}, {topk->get_axis()}); const auto reshaped_indices = std::make_shared(topk->output(1), axis_to_remove); - return std::make_shared(reshaped_indices, element::i64); + return std::make_shared(reshaped_indices, + element::Type_t::i64); } - return std::make_shared(topk->output(1), element::i64); + return std::make_shared(topk->output(1), + element::Type_t::i64); } } } diff --git a/ngraph/frontend/onnx_import/src/utils/common.cpp b/ngraph/frontend/onnx_import/src/utils/common.cpp index a25248e2fba9c2..882914fa49032a 100644 --- a/ngraph/frontend/onnx_import/src/utils/common.cpp +++ b/ngraph/frontend/onnx_import/src/utils/common.cpp @@ -25,23 +25,24 @@ namespace ngraph { namespace common { - const ngraph::element::Type& get_ngraph_element_type(int64_t onnx_type) + const ngraph::element::Type get_ngraph_element_type(int64_t onnx_type) { switch (onnx_type) { - case ONNX_NAMESPACE::TensorProto_DataType_BOOL: return element::boolean; - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: return element::f64; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: return element::f16; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: return element::f32; - case ONNX_NAMESPACE::TensorProto_DataType_INT8: return element::i8; - case ONNX_NAMESPACE::TensorProto_DataType_INT16: return element::i16; - case ONNX_NAMESPACE::TensorProto_DataType_INT32: return element::i32; - case ONNX_NAMESPACE::TensorProto_DataType_INT64: return element::i64; - case ONNX_NAMESPACE::TensorProto_DataType_UINT8: return element::u8; - case ONNX_NAMESPACE::TensorProto_DataType_UINT16: return element::u16; - case ONNX_NAMESPACE::TensorProto_DataType_UINT32: return element::u32; - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: return element::u64; - case ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED: return element::dynamic; + case ONNX_NAMESPACE::TensorProto_DataType_BOOL: return element::Type_t::boolean; + case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: return element::Type_t::f64; + case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: return element::Type_t::f16; + case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: return element::Type_t::f32; + case ONNX_NAMESPACE::TensorProto_DataType_INT8: return element::Type_t::i8; + case ONNX_NAMESPACE::TensorProto_DataType_INT16: return element::Type_t::i16; + case ONNX_NAMESPACE::TensorProto_DataType_INT32: return element::Type_t::i32; + case ONNX_NAMESPACE::TensorProto_DataType_INT64: return element::Type_t::i64; + case ONNX_NAMESPACE::TensorProto_DataType_UINT8: return element::Type_t::u8; + case ONNX_NAMESPACE::TensorProto_DataType_UINT16: return element::Type_t::u16; + case ONNX_NAMESPACE::TensorProto_DataType_UINT32: return element::Type_t::u32; + case ONNX_NAMESPACE::TensorProto_DataType_UINT64: return element::Type_t::u64; + case ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED: + return element::Type_t::dynamic; } #ifdef NGRAPH_USE_PROTOBUF_LITE throw ngraph_error("unsupported element type"); @@ -61,15 +62,15 @@ namespace ngraph const auto range_value = get_monotonic_range( value.get_partial_shape().rank().get_length(), start_value, step); return default_opset::Constant::create( - element::i64, {range_value.size()}, range_value); + element::Type_t::i64, {range_value.size()}, range_value); } const auto value_shape = std::make_shared(value); return std::make_shared( - default_opset::Constant::create(element::i64, {}, {start_value}), + default_opset::Constant::create(element::Type_t::i64, {}, {start_value}), std::make_shared(value_shape), - default_opset::Constant::create(element::i64, {}, {step}), - element::i64); + default_opset::Constant::create(element::Type_t::i64, {}, {step}), + element::Type_t::i64); } void validate_scalar_input(const char* input_name, diff --git a/ngraph/frontend/onnx_import/src/utils/recurrent.cpp b/ngraph/frontend/onnx_import/src/utils/recurrent.cpp index 3ddc467b1a6db3..8ebd20b893c351 100644 --- a/ngraph/frontend/onnx_import/src/utils/recurrent.cpp +++ b/ngraph/frontend/onnx_import/src/utils/recurrent.cpp @@ -81,7 +81,9 @@ namespace ngraph else { m_map[OpInput::SEQ_LENGTHS] = std::make_shared( - element::i32, Shape{batch_size}, m_map[OpInput::X].get_shape().at(1)); + element::Type_t::i32, + Shape{batch_size}, + m_map[OpInput::X].get_shape().at(1)); } // The initial value of the hidden. if (ng_inputs.size() > 5 && !ngraph::op::is_null(ng_inputs.at(5))) diff --git a/ngraph/frontend/onnx_import/src/utils/reshape.cpp b/ngraph/frontend/onnx_import/src/utils/reshape.cpp index ddd4674a868f94..4f42aa4573fddf 100644 --- a/ngraph/frontend/onnx_import/src/utils/reshape.cpp +++ b/ngraph/frontend/onnx_import/src/utils/reshape.cpp @@ -126,8 +126,10 @@ namespace ngraph // reshape the node with shape {C} to {1, C, 1, 1, ..., 1} std::vector reshape_pattern_values(expected_rank, 1U); reshape_pattern_values[1] = node.get_shape().front(); - const auto reshape_pattern = default_opset::Constant::create( - element::u64, Shape{reshape_pattern_values.size()}, reshape_pattern_values); + const auto reshape_pattern = + default_opset::Constant::create(element::Type_t::u64, + Shape{reshape_pattern_values.size()}, + reshape_pattern_values); return std::make_shared(node, reshape_pattern, false); } return node; diff --git a/ngraph/python/src/ngraph/impl/__init__.py b/ngraph/python/src/ngraph/impl/__init__.py index 283a8f618639ce..027c438cedeb6a 100644 --- a/ngraph/python/src/ngraph/impl/__init__.py +++ b/ngraph/python/src/ngraph/impl/__init__.py @@ -45,5 +45,6 @@ from _pyngraph import AxisSet from _pyngraph import AxisVector from _pyngraph import Coordinate +from _pyngraph import Output from _pyngraph import util diff --git a/ngraph/python/src/ngraph/utils/node_factory.py b/ngraph/python/src/ngraph/utils/node_factory.py index 550e887b962a96..6bb7589148cd12 100644 --- a/ngraph/python/src/ngraph/utils/node_factory.py +++ b/ngraph/python/src/ngraph/utils/node_factory.py @@ -1,9 +1,9 @@ from functools import partial -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from _pyngraph import NodeFactory as _NodeFactory -from ngraph.impl import Node +from ngraph.impl import Node, Output DEFAULT_OPSET = "opset5" @@ -19,7 +19,10 @@ def __init__(self, opset_version: str = DEFAULT_OPSET) -> None: self.factory = _NodeFactory(opset_version) def create( - self, op_type_name: str, arguments: List[Node], attributes: Optional[Dict[str, Any]] = None + self, + op_type_name: str, + arguments: List[Union[Node, Output]], + attributes: Optional[Dict[str, Any]] = None, ) -> Node: """! Create node object from provided description. @@ -33,6 +36,8 @@ def create( """ if attributes is None: attributes = {} + + arguments = self._arguments_as_outputs(arguments) node = self.factory.create(op_type_name, arguments, attributes) # Currently we don't support any attribute getters & setters for TensorIterator node. @@ -49,12 +54,16 @@ def create( # Please see test_dyn_attributes.py for more usage examples. all_attributes = node._get_attributes() for attr_name in all_attributes.keys(): - setattr(node, - self._normalize_attr_name_getter(attr_name), - partial(NodeFactory._get_node_attr_value, node, attr_name)) - setattr(node, - self._normalize_attr_name_setter(attr_name), - partial(NodeFactory._set_node_attr_value, node, attr_name)) + setattr( + node, + self._normalize_attr_name_getter(attr_name), + partial(NodeFactory._get_node_attr_value, node, attr_name), + ) + setattr( + node, + self._normalize_attr_name_setter(attr_name), + partial(NodeFactory._set_node_attr_value, node, attr_name), + ) # Setup helper members for caching attribute values. # The cache would be lazily populated at first access attempt. @@ -63,6 +72,16 @@ def create( return node + @staticmethod + def _arguments_as_outputs(arguments: List[Union[Node, Output]]) -> List[Output]: + outputs = [] + for argument in arguments: + if issubclass(type(argument), Output): + outputs.append(argument) + else: + outputs.extend(argument.outputs()) + return outputs + @staticmethod def _normalize_attr_name(attr_name: str, prefix: str) -> str: """! Normalize attribute name. diff --git a/ngraph/python/src/ngraph/utils/types.py b/ngraph/python/src/ngraph/utils/types.py index 265de69f6360d6..323ef0da6cc6db 100644 --- a/ngraph/python/src/ngraph/utils/types.py +++ b/ngraph/python/src/ngraph/utils/types.py @@ -21,7 +21,7 @@ import numpy as np from ngraph.exceptions import NgraphTypeError -from ngraph.impl import Node, Shape +from ngraph.impl import Node, Shape, Output from ngraph.impl import Type as NgraphType from ngraph.impl.op import Constant @@ -148,6 +148,8 @@ def as_node(input_value: NodeInput) -> Node: """! Return input values as nodes. Scalars will be converted to Constant nodes.""" if issubclass(type(input_value), Node): return input_value + if issubclass(type(input_value), Output): + return input_value return make_constant_node(input_value) diff --git a/ngraph/python/src/pyngraph/node_factory.cpp b/ngraph/python/src/pyngraph/node_factory.cpp index f896acd2d9de79..3c418e683b2fe4 100644 --- a/ngraph/python/src/pyngraph/node_factory.cpp +++ b/ngraph/python/src/pyngraph/node_factory.cpp @@ -49,7 +49,7 @@ namespace } std::shared_ptr create(const std::string op_type_name, - const ngraph::NodeVector& arguments, + const ngraph::OutputVector& arguments, const py::dict& attributes = py::dict()) { std::shared_ptr op_node = @@ -63,7 +63,7 @@ namespace if (op_type_name == "TensorIterator") { // XXX: How to differentiate opsets? - return util::TensorIteratorBuilder(arguments, attributes) + return util::TensorIteratorBuilder(as_node_vector(arguments), attributes) .configure(std::static_pointer_cast(op_node)); } diff --git a/ngraph/python/src/pyngraph/ops/constant.cpp b/ngraph/python/src/pyngraph/ops/constant.cpp index 1f6dd6b08504af..4e061767912ff4 100644 --- a/ngraph/python/src/pyngraph/ops/constant.cpp +++ b/ngraph/python/src/pyngraph/ops/constant.cpp @@ -117,51 +117,52 @@ void regclass_pyngraph_op_Constant(py::module m) constant.def("get_vector", [](const ngraph::op::Constant& self) { auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) + if (element_type == ngraph::element::Type_t::boolean) { return _cast_vector(self); } - else if (element_type == ngraph::element::f16) + else if (element_type == ngraph::element::Type_t::f16) { return _cast_vector(self); } - else if (element_type == ngraph::element::f32) + else if (element_type == ngraph::element::Type_t::f32) { return _cast_vector(self); } - else if (element_type == ngraph::element::f64) + else if (element_type == ngraph::element::Type_t::f64) { return _cast_vector(self); } - else if (element_type == ngraph::element::i8) + else if (element_type == ngraph::element::Type_t::i8) { return _cast_vector(self); } - else if (element_type == ngraph::element::i16) + else if (element_type == ngraph::element::Type_t::i16) { return _cast_vector(self); } - else if (element_type == ngraph::element::i32) + else if (element_type == ngraph::element::Type_t::i32) { return _cast_vector(self); } - else if (element_type == ngraph::element::i64) + else if (element_type == ngraph::element::Type_t::i64) { return _cast_vector(self); } - else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) + else if (element_type == ngraph::element::Type_t::u8 || + element_type == ngraph::element::Type_t::u1) { return _cast_vector(self); } - else if (element_type == ngraph::element::u16) + else if (element_type == ngraph::element::Type_t::u16) { return _cast_vector(self); } - else if (element_type == ngraph::element::u32) + else if (element_type == ngraph::element::Type_t::u32) { return _cast_vector(self); } - else if (element_type == ngraph::element::u64) + else if (element_type == ngraph::element::Type_t::u64) { return _cast_vector(self); } @@ -174,51 +175,52 @@ void regclass_pyngraph_op_Constant(py::module m) // Provide buffer access constant.def_buffer([](const ngraph::op::Constant& self) -> py::buffer_info { auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) + if (element_type == ngraph::element::Type_t::boolean) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::f16) + else if (element_type == ngraph::element::Type_t::f16) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::f32) + else if (element_type == ngraph::element::Type_t::f32) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::f64) + else if (element_type == ngraph::element::Type_t::f64) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i8) + else if (element_type == ngraph::element::Type_t::i8) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i16) + else if (element_type == ngraph::element::Type_t::i16) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i32) + else if (element_type == ngraph::element::Type_t::i32) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i64) + else if (element_type == ngraph::element::Type_t::i64) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) + else if (element_type == ngraph::element::Type_t::u8 || + element_type == ngraph::element::Type_t::u1) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u16) + else if (element_type == ngraph::element::Type_t::u16) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u32) + else if (element_type == ngraph::element::Type_t::u32) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u64) + else if (element_type == ngraph::element::Type_t::u64) { return _get_buffer_info(self); } diff --git a/ngraph/python/src/pyngraph/types/element_type.cpp b/ngraph/python/src/pyngraph/types/element_type.cpp index ce72aacc7158c7..d5f25dad35b7b3 100644 --- a/ngraph/python/src/pyngraph/types/element_type.cpp +++ b/ngraph/python/src/pyngraph/types/element_type.cpp @@ -27,19 +27,19 @@ void regclass_pyngraph_Type(py::module m) { py::class_> type(m, "Type"); type.doc() = "ngraph.impl.Type wraps ngraph::element::Type"; - type.attr("boolean") = ngraph::element::boolean; - type.attr("f16") = ngraph::element::f16; - type.attr("f32") = ngraph::element::f32; - type.attr("f64") = ngraph::element::f64; - type.attr("i8") = ngraph::element::i8; - type.attr("i16") = ngraph::element::i16; - type.attr("i32") = ngraph::element::i32; - type.attr("i64") = ngraph::element::i64; - type.attr("u1") = ngraph::element::u1; - type.attr("u8") = ngraph::element::u8; - type.attr("u16") = ngraph::element::u16; - type.attr("u32") = ngraph::element::u32; - type.attr("u64") = ngraph::element::u64; + type.attr("boolean") = ngraph::element::Type(ngraph::element::Type_t::boolean); + type.attr("f16") = ngraph::element::Type(ngraph::element::Type_t::f16); + type.attr("f32") = ngraph::element::Type(ngraph::element::Type_t::f32); + type.attr("f64") = ngraph::element::Type(ngraph::element::Type_t::f64); + type.attr("i8") = ngraph::element::Type(ngraph::element::Type_t::i8); + type.attr("i16") = ngraph::element::Type(ngraph::element::Type_t::i16); + type.attr("i32") = ngraph::element::Type(ngraph::element::Type_t::i32); + type.attr("i64") = ngraph::element::Type(ngraph::element::Type_t::i64); + type.attr("u1") = ngraph::element::Type(ngraph::element::Type_t::u1); + type.attr("u8") = ngraph::element::Type(ngraph::element::Type_t::u8); + type.attr("u16") = ngraph::element::Type(ngraph::element::Type_t::u16); + type.attr("u32") = ngraph::element::Type(ngraph::element::Type_t::u32); + type.attr("u64") = ngraph::element::Type(ngraph::element::Type_t::u64); type.def("__repr__", [](const ngraph::element::Type& self) { std::string bitwidth = std::to_string(self.bitwidth()); diff --git a/ngraph/python/tests/__init__.py b/ngraph/python/tests/__init__.py index 76d527fac36439..ba015a4152310e 100644 --- a/ngraph/python/tests/__init__.py +++ b/ngraph/python/tests/__init__.py @@ -88,7 +88,8 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_35924 = xfail_test(reason="Assertion error - elu results mismatch") xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch") xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable") -xfail_issue_35929 = xfail_test(reason="RuntimeError: Incorrect precision f64!") +xfail_issue_40319 = xfail_test(reason="RuntimeError: [NOT_IMPLEMENTED] Input image format FP64 is not " + "supported yet...") xfail_issue_35930 = xfail_test(reason="onnx.onnx_cpp2py_export.checker.ValidationError: " "Required attribute 'to' is missing.") xfail_issue_40485 = xfail_test(reason="Computation missmatch") diff --git a/ngraph/python/tests/test_ngraph/test_basic.py b/ngraph/python/tests/test_ngraph/test_basic.py index e5e22e53cd4bda..4c09abe3af70a2 100644 --- a/ngraph/python/tests/test_ngraph/test_basic.py +++ b/ngraph/python/tests/test_ngraph/test_basic.py @@ -26,9 +26,9 @@ from ngraph.impl.op import Parameter from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import (xfail_issue_35929, - xfail_issue_36476, - xfail_issue_36480) +from tests import (xfail_issue_36476, + xfail_issue_36480, + xfail_issue_40319) def test_ngraph_function_api(): @@ -59,7 +59,7 @@ def test_ngraph_function_api(): "dtype", [ np.float32, - pytest.param(np.float64, marks=xfail_issue_35929), + pytest.param(np.float64, marks=xfail_issue_40319), np.int8, np.int16, np.int32, @@ -155,9 +155,9 @@ def test_convert_to_bool(destination_type, input_data): "destination_type, rand_range, in_dtype, expected_type", [ pytest.param(np.float32, (-8, 8), np.int32, np.float32), - pytest.param(np.float64, (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929), + pytest.param(np.float64, (-16383, 16383), np.int64, np.float64), pytest.param("f32", (-8, 8), np.int32, np.float32), - pytest.param("f64", (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929), + pytest.param("f64", (-16383, 16383), np.int64, np.float64), ], ) def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type): @@ -169,7 +169,6 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type) assert np.array(result).dtype == expected_type -@xfail_issue_35929 @pytest.mark.parametrize( "destination_type, expected_type", [ @@ -185,14 +184,13 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type) ) def test_convert_to_int(destination_type, expected_type): np.random.seed(133391) - input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16) + input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32) expected = np.array(input_data, dtype=expected_type) result = run_op_node([input_data], ng.convert, destination_type) assert np.allclose(result, expected) assert np.array(result).dtype == expected_type -@xfail_issue_35929 @pytest.mark.parametrize( "destination_type, expected_type", [ @@ -208,7 +206,7 @@ def test_convert_to_int(destination_type, expected_type): ) def test_convert_to_uint(destination_type, expected_type): np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32) expected = np.array(input_data, dtype=expected_type) result = run_op_node([input_data], ng.convert, destination_type) assert np.allclose(result, expected) @@ -409,3 +407,22 @@ def test_runtime_info(): runtime_info_after = relu_node.get_rt_info() assert runtime_info_after["affinity"] == "test_affinity" + + +def test_mutiple_outputs(): + input_shape = [4, 4] + input_data = np.arange(-8, 8).reshape(input_shape) + + expected_output = np.split(input_data, 2, axis=1)[0] + expected_output[expected_output < 0] = 0 + + test_param = ng.parameter(input_shape, dtype=np.float32, name="A") + split = ng.split(test_param, axis=1, num_splits=2) + split_first_output = split.output(0) + relu = ng.relu(split_first_output) + + runtime = get_runtime() + computation = runtime.computation(relu, test_param) + output = computation(input_data) + + assert np.equal(output, expected_output).all() diff --git a/ngraph/python/tests/test_ngraph/test_node_factory.py b/ngraph/python/tests/test_ngraph/test_node_factory.py index 44065e10482144..8ae0809bfa03e3 100644 --- a/ngraph/python/tests/test_ngraph/test_node_factory.py +++ b/ngraph/python/tests/test_ngraph/test_node_factory.py @@ -14,9 +14,9 @@ # limitations under the License. # ****************************************************************************** import numpy as np -from _pyngraph import NodeFactory as _NodeFactory - import ngraph as ng +from ngraph.utils.node_factory import NodeFactory +from _pyngraph import NodeFactory as _NodeFactory def test_node_factory_add(): @@ -26,7 +26,8 @@ def test_node_factory_add(): parameter_b = ng.parameter(shape, dtype=dtype, name="B") factory = _NodeFactory("opset1") - node = factory.create("Add", [parameter_a, parameter_b], {}) + arguments = NodeFactory._arguments_as_outputs([parameter_a, parameter_b]) + node = factory.create("Add", arguments, {}) assert node.get_type_name() == "Add" assert node.get_output_size() == 1 @@ -52,7 +53,10 @@ def test_node_factory_topk(): data = ng.parameter([2, 10], dtype=dtype, name="A") k = ng.constant(3, dtype=dtype, name="B") factory = _NodeFactory("opset1") - node = factory.create("TopK", [data, k], {"axis": 1, "mode": "max", "sort": "value"}) + arguments = NodeFactory._arguments_as_outputs([data, k]) + node = factory.create( + "TopK", arguments, {"axis": 1, "mode": "max", "sort": "value"} + ) assert node.get_type_name() == "TopK" assert node.get_output_size() == 2 diff --git a/ngraph/python/tests/test_ngraph/test_normalization.py b/ngraph/python/tests/test_ngraph/test_normalization.py index c458b24c43492a..b5855b92d61310 100644 --- a/ngraph/python/tests/test_ngraph/test_normalization.py +++ b/ngraph/python/tests/test_ngraph/test_normalization.py @@ -19,7 +19,7 @@ import ngraph as ng from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_40957, xfail_issue_35929 +from tests import xfail_issue_40957 @xfail_issue_40957 @@ -103,15 +103,14 @@ def test_lrn_factory(): assert np.allclose(result, excepted) -@xfail_issue_35929 def test_batch_norm_inference(): - data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]) - gamma = np.array([2.0, 3.0, 4.0]) - beta = np.array([0.0, 0.0, 0.0]) - mean = np.array([0.0, 0.0, 0.0]) - variance = np.array([1.0, 1.0, 1.0]) + data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float32) + gamma = np.array([2.0, 3.0, 4.0], dtype=np.float32) + beta = np.array([0.0, 0.0, 0.0], dtype=np.float32) + mean = np.array([0.0, 0.0, 0.0], dtype=np.float32) + variance = np.array([1.0, 1.0, 1.0], dtype=np.float32) epsilon = 9.99e-06 - excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]]) + excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]], dtype=np.float32) result = run_op_node([data, gamma, beta, mean, variance], ng.batch_norm_inference, epsilon) diff --git a/ngraph/python/tests/test_ngraph/test_ops_reshape.py b/ngraph/python/tests/test_ngraph/test_ops_reshape.py index 42bfb9fbb9d985..a089a85c9effe6 100644 --- a/ngraph/python/tests/test_ngraph/test_ops_reshape.py +++ b/ngraph/python/tests/test_ngraph/test_ops_reshape.py @@ -19,7 +19,7 @@ import ngraph as ng from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node, run_op_numeric_data -from tests import xfail_issue_40957, xfail_issue_35929 +from tests import xfail_issue_40957 def test_concat(): @@ -51,7 +51,7 @@ def test_constant_from_bool(val_type, value): "val_type, value", [ pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957), - pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_35929), + pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_40957), pytest.param(np.int8, np.int8(-63), marks=xfail_issue_40957), pytest.param(np.int16, np.int16(-12345), marks=xfail_issue_40957), pytest.param(np.int32, np.int32(-123456), marks=xfail_issue_40957), @@ -72,7 +72,7 @@ def test_constant_from_scalar(val_type, value): "val_type", [ pytest.param(np.float32, marks=xfail_issue_40957), - pytest.param(np.float64, marks=xfail_issue_35929), + pytest.param(np.float64, marks=xfail_issue_40957), ], ) def test_constant_from_float_array(val_type): diff --git a/ngraph/python/tests/test_ngraph/test_ops_unary.py b/ngraph/python/tests/test_ngraph/test_ops_unary.py index 5adbd9ee0549dc..f0327de1be3c6f 100644 --- a/ngraph/python/tests/test_ngraph/test_ops_unary.py +++ b/ngraph/python/tests/test_ngraph/test_ops_unary.py @@ -19,21 +19,19 @@ import ngraph as ng from ngraph.impl import Shape, Type from tests.test_ngraph.util import run_op_node -from tests import xfail_issue_35929 -@xfail_issue_35929 @pytest.mark.parametrize( "ng_api_fn, numpy_fn, range_start, range_end", [ (ng.absolute, np.abs, -1, 1), (ng.abs, np.abs, -1, 1), (ng.acos, np.arccos, -1, 1), - (ng.acosh, np.arccosh, -1, 1), + (ng.acosh, np.arccosh, 1, 2), (ng.asin, np.arcsin, -1, 1), (ng.asinh, np.arcsinh, -1, 1), (ng.atan, np.arctan, -100.0, 100.0), - (ng.atanh, np.arctanh, -100.0, 100.0), + (ng.atanh, np.arctanh, 0.0, 1.0), (ng.ceiling, np.ceil, -100.0, 100.0), (ng.ceil, np.ceil, -100.0, 100.0), (ng.cos, np.cos, -100.0, 100.0), @@ -52,7 +50,7 @@ ) def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end): np.random.seed(133391) - input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start) + input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32) expected = numpy_fn(input_data) result = run_op_node([input_data], ng_api_fn) diff --git a/ngraph/python/tests/test_onnx/test_backend.py b/ngraph/python/tests/test_onnx/test_backend.py index 67b7bf29828ca1..68ebd44ca3ae5d 100644 --- a/ngraph/python/tests/test_onnx/test_backend.py +++ b/ngraph/python/tests/test_onnx/test_backend.py @@ -44,7 +44,6 @@ xfail_issue_36476, xfail_issue_36478, xfail_issue_38091, - xfail_issue_35929, xfail_issue_38699, xfail_issue_33596, xfail_issue_38701, @@ -76,6 +75,7 @@ xfail_issue_38732, xfail_issue_38734, xfail_issue_38735, + xfail_issue_40319, xfail_issue_40485, xfail_issue_41894) @@ -213,7 +213,8 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_dropout_default_ratio_cpu", "OnnxBackendNodeModelTest.test_training_dropout_default_cpu", "OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_cpu", - "OnnxBackendNodeModelTest.test_training_dropout_cpu"), + "OnnxBackendNodeModelTest.test_training_dropout_cpu", + "OnnxBackendNodeModelTest.test_eyelike_with_dtype_cpu"), (xfail_issue_35915, "OnnxBackendNodeModelTest.test_min_int16_cpu", "OnnxBackendNodeModelTest.test_min_uint8_cpu"), @@ -242,24 +243,21 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None (xfail_issue_38091, "OnnxBackendNodeModelTest.test_gather_negative_indices_cpu", "OnnxBackendNodeModelTest.test_mvn_cpu", - "OnnxBackendNodeModelTest.test_elu_example_cpu"), - (xfail_issue_35929, + "OnnxBackendNodeModelTest.test_elu_example_cpu",), + (xfail_issue_40319, "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_broadcast_cpu", "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu", "OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu", "OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu", "OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu", "OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu", "OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu", "OnnxBackendNodeModelTest.test_cumsum_1d_cpu", - "OnnxBackendNodeModelTest.test_cast_FLOAT_to_DOUBLE_cpu", "OnnxBackendNodeModelTest.test_cumsum_1d_reverse_cpu", "OnnxBackendNodeModelTest.test_cumsum_1d_exclusive_cpu", "OnnxBackendNodeModelTest.test_cumsum_1d_reverse_exclusive_cpu", "OnnxBackendNodeModelTest.test_cumsum_2d_axis_0_cpu", "OnnxBackendNodeModelTest.test_cumsum_2d_negative_axis_cpu", - "OnnxBackendNodeModelTest.test_eyelike_with_dtype_cpu", "OnnxBackendNodeModelTest.test_cumsum_2d_axis_1_cpu", "OnnxBackendNodeModelTest.test_mod_mixed_sign_float64_cpu", "OnnxBackendNodeModelTest.test_max_float64_cpu", diff --git a/ngraph/python/tests/test_onnx/test_ops_unary.py b/ngraph/python/tests/test_onnx/test_ops_unary.py index f99ef7f5391406..5b28c636d4389e 100644 --- a/ngraph/python/tests/test_onnx/test_ops_unary.py +++ b/ngraph/python/tests/test_onnx/test_ops_unary.py @@ -22,8 +22,7 @@ from ngraph.exceptions import NgraphTypeError from tests.runtime import get_runtime from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node -from tests import (xfail_issue_35929, - xfail_issue_40957, +from tests import (xfail_issue_40957, xfail_issue_35930) @@ -105,7 +104,6 @@ def test_neg(input_data): assert np.array_equal(ng_results, [expected_output]) -@xfail_issue_35929 @pytest.mark.parametrize( "input_data", [ @@ -115,13 +113,13 @@ def test_neg(input_data): ], ) def test_floor(input_data): + input_data = input_data.astype(np.float32) expected_output = np.floor(input_data) node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"]) ng_results = run_node(node, [input_data]) assert np.array_equal(ng_results, [expected_output]) -@xfail_issue_35929 @pytest.mark.parametrize( "input_data", [ @@ -131,6 +129,7 @@ def test_floor(input_data): ], ) def test_ceil(input_data): + input_data = input_data.astype(np.float32) expected_output = np.ceil(input_data) node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"]) ng_results = run_node(node, [input_data]) @@ -165,7 +164,6 @@ def test_clip_default(): assert np.allclose(result, [expected]) -@xfail_issue_35929 @pytest.mark.parametrize( "input_data", [ @@ -175,6 +173,7 @@ def test_clip_default(): ], ) def test_reciprocal(input_data): + input_data = input_data.astype(np.float32) expected_output = np.reciprocal(input_data) node = onnx.helper.make_node("Reciprocal", inputs=["x"], outputs=["y"]) ng_results = run_node(node, [input_data]) @@ -389,7 +388,8 @@ def test_cast_to_bool(val_type, input_data): "val_type, range_start, range_end, in_dtype", [ (np.dtype(np.float32), -8, 8, np.dtype(np.int32)), - pytest.param(np.dtype(np.float64), -16383, 16383, np.dtype(np.int64), marks=xfail_issue_35929), + pytest.param(np.dtype(np.float64), -16383, 16383, np.dtype(np.int64), + marks=pytest.mark.xfail(reason="RuntimeError: Unsupported type")), ], ) def test_cast_to_float(val_type, range_start, range_end, in_dtype): @@ -503,7 +503,7 @@ def test_cast_errors(): @pytest.mark.parametrize("value_type", [pytest.param(np.float32, marks=xfail_issue_40957), - pytest.param(np.float64, marks=xfail_issue_35929)]) + pytest.param(np.float64, marks=xfail_issue_40957)]) def test_constant(value_type): values = np.random.randn(5, 5).astype(value_type) node = onnx.helper.make_node( diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 81db61806dc2f5..336f9f86f16cea 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -79,6 +79,7 @@ set(SRC op_eval/reduce_l1.cpp op_eval/reduce_l2.cpp op_eval/roi_align.cpp + op_eval/roi_pooling.cpp op_eval/round.cpp op_eval/softplus.cpp op_eval/split.cpp @@ -162,6 +163,7 @@ set(SRC type_prop/reverse.cpp type_prop/reverse_sequence.cpp type_prop/roi_align.cpp + type_prop/roi_pooling.cpp type_prop/round.cpp type_prop/rnn_cell.cpp type_prop/rnn_sequence.cpp @@ -301,6 +303,7 @@ set(MULTI_TEST_SRC backend/lrn.in.cpp backend/matmul.in.cpp backend/maximum.in.cpp + backend/max_pool.in.cpp backend/minimum.in.cpp backend/multiple_backends.in.cpp backend/multiple_result.in.cpp @@ -328,6 +331,7 @@ set(MULTI_TEST_SRC backend/reshape.in.cpp backend/reverse_sequence.in.cpp backend/reverse.in.cpp + backend/roi_pooling.in.cpp backend/round.in.cpp backend/select.in.cpp backend/shape_of.in.cpp diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index 64a5a60451ddfa..34867de416b367 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -268,7 +268,7 @@ class Oracle : public op::Op m_result_vector); } - void validate_and_infer_types() override { set_output_type(0, element::i64, {}); } + void validate_and_infer_types() override { set_output_type(0, element::Type_t::i64, {}); } bool visit_attributes(AttributeVisitor& visitor) override { visitor.on_attribute("turing_model", m_turing_model); @@ -348,13 +348,13 @@ constexpr NodeTypeInfo Oracle::type_info; TEST(attributes, user_op) { FactoryRegistry::get().register_factory(); - auto program = make_shared(element::i32, Shape{200}); - auto data = make_shared(element::i32, Shape{200}); + auto program = make_shared(element::Type_t::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); auto result = make_shared(data); auto oracle = make_shared(program, data, TuringModel::XL1200, - element::f32, + element::Type_t::f32, element::Type_t::i64, "12AU7", true, @@ -438,8 +438,8 @@ TEST(attributes, user_op) TEST(attributes, matmul_op) { FactoryRegistry::get().register_factory(); - auto A = make_shared(element::f32, Shape{0, 2}); - auto B = make_shared(element::f32, Shape{2, 0}); + auto A = make_shared(element::Type_t::f32, Shape{0, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 0}); bool transpose_a = true; bool transpose_b = true; @@ -492,7 +492,7 @@ TEST(attributes, partial_shape) TEST(attributes, max_pool_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{64, 3, 5}); + auto data = make_shared(element::Type_t::f32, Shape{64, 3, 5}); auto strides = Strides{2}; auto pads_begin = Shape{1}; @@ -517,8 +517,8 @@ TEST(attributes, max_pool_op) TEST(attributes, mod_op) { FactoryRegistry::get().register_factory(); - auto A = make_shared(element::f32, Shape{0, 2}); - auto B = make_shared(element::f32, Shape{2, 0}); + auto A = make_shared(element::Type_t::f32, Shape{0, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 0}); auto auto_broadcast = op::AutoBroadcastType::NUMPY; @@ -532,8 +532,8 @@ TEST(attributes, mod_op) TEST(attributes, non_max_suppression_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto box_encoding = opset1::NonMaxSuppression::BoxEncodingType::CENTER; bool sort_result_descending = false; @@ -550,8 +550,8 @@ TEST(attributes, non_max_suppression_op_custom_attributes) TEST(attributes, non_max_suppression_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto nms = make_shared(boxes, scores); NodeBuilder builder(nms); @@ -564,12 +564,12 @@ TEST(attributes, non_max_suppression_op_default_attributes) TEST(attributes, non_max_suppression_v3_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto box_encoding = opset3::NonMaxSuppression::BoxEncodingType::CENTER; bool sort_result_descending = false; - element::Type output_type = element::i32; + element::Type output_type = element::Type_t::i32; auto nms = make_shared( boxes, scores, box_encoding, sort_result_descending, output_type); @@ -584,8 +584,8 @@ TEST(attributes, non_max_suppression_v3_op_custom_attributes) TEST(attributes, non_max_suppression_v3_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto nms = make_shared(boxes, scores); NodeBuilder builder(nms); @@ -599,8 +599,8 @@ TEST(attributes, non_max_suppression_v3_op_default_attributes) TEST(attributes, normalize_l2_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{1}); - const auto axes = make_shared(element::i32, Shape{}, vector{0}); + auto data = make_shared(element::Type_t::i32, Shape{1}); + const auto axes = make_shared(element::Type_t::i32, Shape{}, vector{0}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -616,10 +616,10 @@ TEST(attributes, normalize_l2_op) TEST(attributes, one_hot_op) { FactoryRegistry::get().register_factory(); - auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::Type_t::i64, Shape{1, 3, 2, 3}); + auto depth = op::Constant::create(element::Type_t::i64, Shape{}, {4}); + auto on_value = op::Constant::create(element::Type_t::f32, Shape{}, {1.0f}); + auto off_value = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); int64_t axis = 3; @@ -633,9 +633,9 @@ TEST(attributes, one_hot_op) TEST(attributes, pad_op) { FactoryRegistry::get().register_factory(); - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); auto pad_mode = op::PadMode::EDGE; @@ -649,8 +649,8 @@ TEST(attributes, pad_op) TEST(attributes, psroi_pooling_op) { FactoryRegistry::get().register_factory(); - auto input = make_shared(element::f32, Shape{1, 1024, 63, 38}); - auto coords = make_shared(element::f32, Shape{300, 5}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1024, 63, 38}); + auto coords = make_shared(element::Type_t::f32, Shape{300, 5}); const int64_t output_dim = 882; const int64_t group_size = 3; @@ -676,8 +676,8 @@ TEST(attributes, reduce_logical_and_op) { // ReduceLogicalAnd derives visit_attributes from op::util::LogicalReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -692,8 +692,8 @@ TEST(attributes, reduce_logical_or_op) { // ReduceLogicalOr derives visit_attributes from op::util::LogicalReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -708,8 +708,8 @@ TEST(attributes, reduce_max_op) { // ReduceMax derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -724,8 +724,8 @@ TEST(attributes, reduce_mean_op) { // ReduceMean derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -740,8 +740,8 @@ TEST(attributes, reduce_min_op) { // ReduceMin derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -756,8 +756,8 @@ TEST(attributes, reduce_prod_op) { // ReduceProd derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -772,8 +772,8 @@ TEST(attributes, reduce_sum_op) { // ReduceSum derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -787,7 +787,7 @@ TEST(attributes, reduce_sum_op) TEST(attributes, region_yolo_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{1, 255, 26, 26}); + auto data = make_shared(element::Type_t::f32, Shape{1, 255, 26, 26}); size_t num_coords = 4; size_t num_classes = 1; @@ -816,8 +816,8 @@ TEST(attributes, region_yolo_op) TEST(attributes, reshape_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4}); - auto pattern = make_shared(element::i32, Shape{2}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4}); + auto pattern = make_shared(element::Type_t::i32, Shape{2}); bool special_zero = true; @@ -831,8 +831,8 @@ TEST(attributes, reshape_op) TEST(attributes, reverse_op_enum_mode) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); - auto reversed_axes = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); + auto reversed_axes = make_shared(element::Type_t::i32, Shape{200}); auto reverse = make_shared(data, reversed_axes, opset1::Reverse::Mode::INDEX); NodeBuilder builder(reverse); @@ -844,8 +844,8 @@ TEST(attributes, reverse_op_enum_mode) TEST(attributes, reverse_op_string_mode) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); - auto reversed_axes = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); + auto reversed_axes = make_shared(element::Type_t::i32, Shape{200}); std::string mode = "index"; @@ -859,8 +859,8 @@ TEST(attributes, reverse_op_string_mode) TEST(attributes, reverse_sequence_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4, 2}); - auto seq_indices = make_shared(element::i32, Shape{4}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 2}); + auto seq_indices = make_shared(element::Type_t::i32, Shape{4}); auto batch_axis = 2; auto seq_axis = 1; @@ -879,10 +879,10 @@ TEST(attributes, reverse_sequence_op) TEST(attributes, rnn_cell_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto X = make_shared(element::f32, Shape{2, 3}); - auto H = make_shared(element::f32, Shape{2, 3}); - auto W = make_shared(element::f32, Shape{3, 3}); - auto R = make_shared(element::f32, Shape{3, 3}); + auto X = make_shared(element::Type_t::f32, Shape{2, 3}); + auto H = make_shared(element::Type_t::f32, Shape{2, 3}); + auto W = make_shared(element::Type_t::f32, Shape{3, 3}); + auto R = make_shared(element::Type_t::f32, Shape{3, 3}); const size_t hidden_size = 3; auto activations = std::vector{"sigmoid", "tanh"}; @@ -906,10 +906,10 @@ TEST(attributes, rnn_cell_op_custom_attributes) TEST(attributes, rnn_cell_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto X = make_shared(element::f32, Shape{2, 3}); - auto H = make_shared(element::f32, Shape{2, 3}); - auto W = make_shared(element::f32, Shape{3, 3}); - auto R = make_shared(element::f32, Shape{3, 3}); + auto X = make_shared(element::Type_t::f32, Shape{2, 3}); + auto H = make_shared(element::Type_t::f32, Shape{2, 3}); + auto W = make_shared(element::Type_t::f32, Shape{3, 3}); + auto R = make_shared(element::Type_t::f32, Shape{3, 3}); const size_t hidden_size = 3; @@ -928,7 +928,7 @@ TEST(attributes, rnn_cell_op_default_attributes) TEST(attributes, elu_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4}); double alpha = 0.1; @@ -942,11 +942,11 @@ TEST(attributes, elu_op) TEST(attributes, fake_quantize_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); auto levels = 5; auto auto_broadcast = op::AutoBroadcastType::NUMPY; @@ -963,8 +963,8 @@ TEST(attributes, fake_quantize_op) TEST(attributes, broadcast_v3) { FactoryRegistry::get().register_factory(); - const auto arg = make_shared(element::i64, Shape{1, 3, 1}); - const auto shape = make_shared(element::i64, Shape{3}); + const auto arg = make_shared(element::Type_t::i64, Shape{1, 3, 1}); + const auto shape = make_shared(element::Type_t::i64, Shape{3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -977,7 +977,7 @@ TEST(attributes, broadcast_v3) TEST(attributes, grn_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 3, 4, 5}); + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4, 5}); float bias = 1.25f; @@ -991,8 +991,8 @@ TEST(attributes, grn_op) TEST(attributes, group_conv_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{1, 12, 224, 224}); - auto filters = make_shared(element::f32, Shape{4, 1, 3, 5, 5}); + auto data = make_shared(element::Type_t::f32, Shape{1, 12, 224, 224}); + auto filters = make_shared(element::Type_t::f32, Shape{4, 1, 3, 5, 5}); auto strides = Strides{1, 1}; auto pads_begin = CoordinateDiff{1, 2}; auto pads_end = CoordinateDiff{1, 2}; @@ -1011,9 +1011,10 @@ TEST(attributes, group_conv_op) TEST(attributes, group_conv_backprop_data_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::f32, Shape{1, 20, 224, 224}); - const auto filter = make_shared(element::f32, Shape{4, 5, 2, 3, 3}); - const auto output_shape = make_shared(element::f32, Shape{1, 8, 447, 447}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 20, 224, 224}); + const auto filter = make_shared(element::Type_t::f32, Shape{4, 5, 2, 3, 3}); + const auto output_shape = + make_shared(element::Type_t::f32, Shape{1, 8, 447, 447}); const auto strides = Strides{2, 1}; const auto pads_begin = CoordinateDiff{3, 4}; @@ -1045,8 +1046,8 @@ TEST(attributes, group_conv_backprop_data_op) TEST(attributes, lrn_op) { FactoryRegistry::get().register_factory(); - const auto arg = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto axes = make_shared(element::i32, Shape{2}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto axes = make_shared(element::Type_t::i32, Shape{2}); const double alpha = 1.1; const double beta = 2.2; @@ -1066,12 +1067,12 @@ TEST(attributes, lrn_op) TEST(attributes, lstm_cell_op) { FactoryRegistry::get().register_factory(); - auto X = make_shared(element::f32, Shape{2, 3}); - auto H = make_shared(element::f32, Shape{2, 3}); - auto W = make_shared(element::f32, Shape{12, 3}); - auto R = make_shared(element::f32, Shape{12, 3}); - const auto initial_hidden_state = make_shared(element::f32, Shape{2, 3}); - const auto initial_cell_state = make_shared(element::f32, Shape{2, 3}); + auto X = make_shared(element::Type_t::f32, Shape{2, 3}); + auto H = make_shared(element::Type_t::f32, Shape{2, 3}); + auto W = make_shared(element::Type_t::f32, Shape{12, 3}); + auto R = make_shared(element::Type_t::f32, Shape{12, 3}); + const auto initial_hidden_state = make_shared(element::Type_t::f32, Shape{2, 3}); + const auto initial_cell_state = make_shared(element::Type_t::f32, Shape{2, 3}); const auto hidden_size = 3; const std::vector activations = {"tanh", "sigmoid", "tanh"}; @@ -1109,17 +1110,19 @@ TEST(attributes, lstm_sequence_op) const size_t hidden_size = 64; const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); - const auto initial_hidden_state = - make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto initial_cell_state = - make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, + make_shared(element::Type_t::f32, Shape{batch_size, seq_length, input_size}); + const auto initial_hidden_state = make_shared( + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto initial_cell_state = make_shared( + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); + const auto W = make_shared(element::Type_t::f32, Shape{num_directions, 4 * hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = op::RecurrentSequenceDirection::BIDIRECTIONAL; const std::vector activations_alpha = {1, 2, 3}; @@ -1154,7 +1157,7 @@ TEST(attributes, lstm_sequence_op) TEST(attributes, shuffle_channels_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); auto axis = 0; auto groups = 2; auto shuffle_channels = make_shared(data, axis, groups); @@ -1168,7 +1171,7 @@ TEST(attributes, shuffle_channels_op) TEST(attributes, softmax_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); auto axis = 0; auto softmax = make_shared(data, axis); NodeBuilder builder(softmax); @@ -1180,7 +1183,7 @@ TEST(attributes, softmax_op) TEST(attributes, space_to_depth_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 50, 50}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 50, 50}); auto block_size = 2; auto mode = opset1::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(data, mode, block_size); @@ -1194,8 +1197,8 @@ TEST(attributes, space_to_depth_op) TEST(attributes, split_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); - auto axis = make_shared(element::i32, Shape{}); + auto data = make_shared(element::Type_t::i32, Shape{200}); + auto axis = make_shared(element::Type_t::i32, Shape{}); auto num_splits = 2; auto split = make_shared(data, axis, num_splits); NodeBuilder builder(split); @@ -1207,8 +1210,8 @@ TEST(attributes, split_op) TEST(attributes, squared_difference_op) { FactoryRegistry::get().register_factory(); - auto x1 = make_shared(element::i32, Shape{200}); - auto x2 = make_shared(element::i32, Shape{200}); + auto x1 = make_shared(element::Type_t::i32, Shape{200}); + auto x2 = make_shared(element::Type_t::i32, Shape{200}); auto auto_broadcast = op::AutoBroadcastType::NUMPY; auto squared_difference = make_shared(x1, x2, auto_broadcast); NodeBuilder builder(squared_difference); @@ -1220,10 +1223,10 @@ TEST(attributes, squared_difference_op) TEST(attributes, strided_slice_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); - auto begin = make_shared(element::i32, Shape{2}); - auto end = make_shared(element::i32, Shape{2}); - auto stride = make_shared(element::i32, Shape{2}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 5}); + auto begin = make_shared(element::Type_t::i32, Shape{2}); + auto end = make_shared(element::Type_t::i32, Shape{2}); + auto stride = make_shared(element::Type_t::i32, Shape{2}); auto begin_mask = std::vector{0, 0}; auto end_mask = std::vector{0, 0}; @@ -1253,8 +1256,8 @@ TEST(attributes, strided_slice_op) TEST(attributes, topk_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); - auto k = make_shared(element::i32, Shape{}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 5}); + auto k = make_shared(element::Type_t::i32, Shape{}); auto axis = 0; auto mode = opset1::TopK::Mode::MAX; @@ -1272,8 +1275,8 @@ TEST(attributes, topk_op) TEST(attributes, logical_xor_op) { FactoryRegistry::get().register_factory(); - auto x1 = make_shared(element::boolean, Shape{200}); - auto x2 = make_shared(element::boolean, Shape{200}); + auto x1 = make_shared(element::Type_t::boolean, Shape{200}); + auto x2 = make_shared(element::Type_t::boolean, Shape{200}); auto auto_broadcast = op::AutoBroadcastType::NUMPY; @@ -1287,7 +1290,7 @@ TEST(attributes, logical_xor_op) TEST(attributes, extractimagepatches_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; @@ -1308,7 +1311,7 @@ TEST(attributes, extractimagepatches_op) TEST(attributes, mvn_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 5}); const auto axes = AxisSet{0, 1}; @@ -1326,7 +1329,7 @@ TEST(attributes, mvn_op) TEST(attributes, reorg_yolo_op_stride) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{1, 64, 26, 26}); + const auto data = make_shared(element::Type_t::i32, Shape{1, 64, 26, 26}); const auto op = make_shared(data, 2); NodeBuilder builder(op); @@ -1338,7 +1341,7 @@ TEST(attributes, reorg_yolo_op_stride) TEST(attributes, reorg_yolo_op_strides) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{1, 64, 26, 26}); + const auto data = make_shared(element::Type_t::i32, Shape{1, 64, 26, 26}); const auto op = make_shared(data, Strides{2}); NodeBuilder builder(op); @@ -1350,10 +1353,10 @@ TEST(attributes, reorg_yolo_op_strides) TEST(attributes, roi_pooling_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); - const auto coords = make_shared(element::i32, Shape{2, 3}); + const auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4, 5}); + const auto coords = make_shared(element::Type_t::f32, Shape{2, 5}); - const auto op = make_shared(data, coords, Shape{5, 5}, 0.123, "Bilinear"); + const auto op = make_shared(data, coords, Shape{5, 5}, 0.123, "bilinear"); NodeBuilder builder(op); const auto g_op = as_type_ptr(builder.create()); @@ -1365,7 +1368,7 @@ TEST(attributes, roi_pooling_op) TEST(attributes, constant_op) { vector data{5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f}; - auto k = make_shared(element::f32, Shape{2, 3}, data); + auto k = make_shared(element::Type_t::f32, Shape{2, 3}, data); NodeBuilder builder(k); auto g_k = as_type_ptr(builder.create()); g_k->validate_and_infer_types(); @@ -1379,8 +1382,8 @@ TEST(attributes, constant_op) TEST(attributes, bucketize_v3_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); auto bucketize = make_shared(data, buckets); NodeBuilder builder(bucketize); @@ -1393,9 +1396,9 @@ TEST(attributes, bucketize_v3_op_default_attributes) TEST(attributes, bucketize_v3_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); - element::Type output_type = element::i32; + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); + element::Type output_type = element::Type_t::i32; bool with_right_bound = false; auto bucketize = make_shared(data, buckets, output_type, with_right_bound); @@ -1412,8 +1415,8 @@ TEST(attributes, cum_sum_op_default_attributes) FactoryRegistry::get().register_factory(); Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); auto cs = make_shared(A, axis); NodeBuilder builder(cs); @@ -1428,8 +1431,8 @@ TEST(attributes, cum_sum_op_custom_attributes) FactoryRegistry::get().register_factory(); Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); bool exclusive = true; bool reverse = true; auto cs = make_shared(A, axis, exclusive, reverse); @@ -1444,8 +1447,8 @@ TEST(attributes, cum_sum_op_custom_attributes) TEST(attributes, interpolate_op) { FactoryRegistry::get().register_factory(); - auto img = make_shared(element::f32, Shape{1, 3, 32, 32}); - auto out_shape = make_shared(element::i32, Shape{2}); + auto img = make_shared(element::Type_t::f32, Shape{1, 3, 32, 32}); + auto out_shape = make_shared(element::Type_t::i32, Shape{2}); op::v0::InterpolateAttrs interp_atrs; interp_atrs.axes = AxisSet{1, 2}; @@ -1473,11 +1476,11 @@ TEST(attributes, interpolate_op) TEST(attributes, detection_output_op) { FactoryRegistry::get().register_factory(); - const auto box_logits = make_shared(element::f32, Shape{1, 3, 32, 32}); - const auto class_preds = make_shared(element::f32, Shape{32}); - const auto proposals = make_shared(element::f32, Shape{128, 2}); - const auto aux_class_preds = make_shared(element::f32, Shape{16}); - const auto aux_box_pred = make_shared(element::f32, Shape{32, 2}); + const auto box_logits = make_shared(element::Type_t::f32, Shape{1, 3, 32, 32}); + const auto class_preds = make_shared(element::Type_t::f32, Shape{32}); + const auto proposals = make_shared(element::Type_t::f32, Shape{128, 2}); + const auto aux_class_preds = make_shared(element::Type_t::f32, Shape{16}); + const auto aux_box_pred = make_shared(element::Type_t::f32, Shape{32, 2}); op::DetectionOutputAttrs attrs; attrs.num_classes = 32; @@ -1526,8 +1529,8 @@ TEST(attributes, detection_output_op) TEST(attributes, prior_box_op) { FactoryRegistry::get().register_factory(); - const auto layer_shape = make_shared(element::i64, Shape{128, 128}); - const auto image_shape = make_shared(element::i64, Shape{32, 32}); + const auto layer_shape = make_shared(element::Type_t::i64, Shape{128, 128}); + const auto image_shape = make_shared(element::Type_t::i64, Shape{32, 32}); op::PriorBoxAttrs attrs; attrs.min_size = vector{16.f, 32.f}; @@ -1567,8 +1570,8 @@ TEST(attributes, prior_box_op) TEST(attributes, prior_box_clustered_op) { FactoryRegistry::get().register_factory(); - const auto layer_shape = make_shared(element::i64, Shape{128, 128}); - const auto image_shape = make_shared(element::i64, Shape{32, 32}); + const auto layer_shape = make_shared(element::Type_t::i64, Shape{128, 128}); + const auto image_shape = make_shared(element::Type_t::i64, Shape{32, 32}); op::PriorBoxClusteredAttrs attrs; attrs.widths = vector{128.f, 512.f, 4096.f}; @@ -1598,9 +1601,11 @@ TEST(attributes, prior_box_clustered_op) TEST(attributes, proposal_op) { FactoryRegistry::get().register_factory(); - const auto class_probs = make_shared(element::i64, Shape{1024, 3, 128, 128}); - const auto class_logits = make_shared(element::i64, Shape{1024, 3, 128, 128}); - const auto image_shape = make_shared(element::i64, Shape{4}); + const auto class_probs = + make_shared(element::Type_t::i64, Shape{1024, 3, 128, 128}); + const auto class_logits = + make_shared(element::Type_t::i64, Shape{1024, 3, 128, 128}); + const auto image_shape = make_shared(element::Type_t::i64, Shape{4}); op::ProposalAttrs attrs; attrs.base_size = 224; diff --git a/ngraph/test/backend/abc.in.cpp b/ngraph/test/backend/abc.in.cpp index 4457ebc647bee2..8ce73fe72a9c05 100644 --- a/ngraph/test/backend/abc.in.cpp +++ b/ngraph/test/backend/abc.in.cpp @@ -31,9 +31,9 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, abc) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared((A + B) * C, ParameterVector{A, B, C}); std::vector a{1, 2, 3, 4}; @@ -62,9 +62,9 @@ NGRAPH_TEST(${BACKEND_NAME}, abc) NGRAPH_TEST(${BACKEND_NAME}, abc_int64) { Shape shape{2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); - auto C = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); + auto C = make_shared(element::Type_t::i64, shape); auto f = make_shared((A + B) * C, ParameterVector{A, B, C}); std::vector a{1, 2, 3, 4}; diff --git a/ngraph/test/backend/abs.in.cpp b/ngraph/test/backend/abs.in.cpp index 9c2d62c090f479..1ab328f996b2b9 100644 --- a/ngraph/test/backend/abs.in.cpp +++ b/ngraph/test/backend/abs.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, abs) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/acos.in.cpp b/ngraph/test/backend/acos.in.cpp index 893322c3d723de..530ce69b7ff685 100644 --- a/ngraph/test/backend/acos.in.cpp +++ b/ngraph/test/backend/acos.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, acos) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/acosh.in.cpp b/ngraph/test/backend/acosh.in.cpp index bcaf7b23aa6abc..1bfb63fc4d1704 100644 --- a/ngraph/test/backend/acosh.in.cpp +++ b/ngraph/test/backend/acosh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, acosh) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; diff --git a/ngraph/test/backend/add.in.cpp b/ngraph/test/backend/add.in.cpp index 93e9f0b591644d..e069038c609239 100644 --- a/ngraph/test/backend/add.in.cpp +++ b/ngraph/test/backend/add.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); vector a{1, 2, 3, 4}; @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, add) NGRAPH_TEST(${BACKEND_NAME}, add_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B, ParameterVector{A, B}); vector a{1, 2, 3, 4}; @@ -80,8 +80,8 @@ NGRAPH_TEST(${BACKEND_NAME}, add_overload) NGRAPH_TEST(${BACKEND_NAME}, add_in_place) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto T = A + B; auto T2 = T + T; auto T3 = T2 + T2; diff --git a/ngraph/test/backend/aliased_output.in.cpp b/ngraph/test/backend/aliased_output.in.cpp index 8409779339ebf4..42baf1aef64173 100644 --- a/ngraph/test/backend/aliased_output.in.cpp +++ b/ngraph/test/backend/aliased_output.in.cpp @@ -31,11 +31,11 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, aliased_output) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto C = A + B; auto D = A * B; - auto E = op::Constant::create(element::f32, shape, {1, 2, 3, 4}); + auto E = op::Constant::create(element::Type_t::f32, shape, {1, 2, 3, 4}); auto f = make_shared(NodeVector{C, C, D, D, C, E, E}, ParameterVector{A, B}); vector a{0, 1, 2, 3}; diff --git a/ngraph/test/backend/api.in.cpp b/ngraph/test/backend/api.in.cpp index 295ff6dfe7f1ab..fae7559f737b9e 100644 --- a/ngraph/test/backend/api.in.cpp +++ b/ngraph/test/backend/api.in.cpp @@ -35,8 +35,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -44,12 +44,12 @@ NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1) // Create some tensors for input/output vector av = {1, 2, 3, 4}; vector bv = {5, 6, 7, 8}; - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr b = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, av); copy_data(b, bv); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -60,18 +60,18 @@ NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1) NGRAPH_TEST(${BACKEND_NAME}, get_parameters_and_results) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared((A + B) * C, ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr b = backend->create_tensor(element::f32, shape); - shared_ptr c = backend->create_tensor(element::f32, shape); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr c = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{1, 2}, {3, 4}}).get_vector()); copy_data(b, test::NDArray({{5, 6}, {7, 8}}).get_vector()); diff --git a/ngraph/test/backend/asin.in.cpp b/ngraph/test/backend/asin.in.cpp index 5b6084e304063b..95ecbcc2668b0a 100644 --- a/ngraph/test/backend/asin.in.cpp +++ b/ngraph/test/backend/asin.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, asin) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/asinh.in.cpp b/ngraph/test/backend/asinh.in.cpp index 6dd0abe9568123..b716fce2874a8a 100644 --- a/ngraph/test/backend/asinh.in.cpp +++ b/ngraph/test/backend/asinh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, asinh) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; diff --git a/ngraph/test/backend/atan.in.cpp b/ngraph/test/backend/atan.in.cpp index e2f0c04b27f0b2..adb9bd107dcd5d 100644 --- a/ngraph/test/backend/atan.in.cpp +++ b/ngraph/test/backend/atan.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, atan) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/atanh.in.cpp b/ngraph/test/backend/atanh.in.cpp index ce7b5a82b64137..99e6ab8ce2509e 100644 --- a/ngraph/test/backend/atanh.in.cpp +++ b/ngraph/test/backend/atanh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, atanh) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; diff --git a/ngraph/test/backend/auto_broadcast.in.cpp b/ngraph/test/backend/auto_broadcast.in.cpp index 928218ccbf9e3b..723dd467dcd720 100644 --- a/ngraph/test/backend/auto_broadcast.in.cpp +++ b/ngraph/test/backend/auto_broadcast.in.cpp @@ -71,11 +71,11 @@ void check_auto_bcast( if (std::is_same::value) { - iet = element::boolean; + iet = element::Type_t::boolean; } if (std::is_same::value) { - oet = element::boolean; + oet = element::Type_t::boolean; } auto A = make_shared(iet, Shape{2, 3}); auto B = make_shared(iet, Shape{3}); @@ -110,17 +110,17 @@ NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic) { auto pshape_a = PartialShape::dynamic(); auto pshape_b = PartialShape::dynamic(); - auto a = make_shared(element::f32, pshape_a); - auto b = make_shared(element::f32, pshape_b); + auto a = make_shared(element::Type_t::f32, pshape_a); + auto b = make_shared(element::Type_t::f32, pshape_b); op::AutoBroadcastSpec autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, -1); auto f = make_shared(make_shared(a, b, autob), ParameterVector{a, b}); auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - auto t_a = backend->create_tensor(element::f32, Shape{2, 3}); - auto t_b = backend->create_tensor(element::f32, Shape{3}); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); + auto t_a = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto t_b = backend->create_tensor(element::Type_t::f32, Shape{3}); copy_data(t_a, vector{1, 2, 3, 4, 5, 6}); copy_data(t_b, vector{5, 6, 7}); ex->call_with_validate({t_r}, {t_a, t_b}); @@ -134,18 +134,18 @@ NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic) autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); f = make_shared(make_shared(a, b, autob), ParameterVector{a, b}); ex = backend->compile(f); - t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - t_a = backend->create_tensor(element::f32, Shape{2, 3, 4, 5}); - t_b = backend->create_tensor(element::f32, Shape{3, 4}); + t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); + t_a = backend->create_tensor(element::Type_t::f32, Shape{2, 3, 4, 5}); + t_b = backend->create_tensor(element::Type_t::f32, Shape{3, 4}); copy_data(t_a, vector(2 * 3 * 4 * 5, 1)); copy_data(t_b, vector(3 * 4, 1)); ex->call_with_validate({t_r}, {t_a, t_b}); ASSERT_EQ(t_r->get_shape(), (Shape{2, 3, 4, 5})); // a shape {2, 3, 4, 5}, b shape {3, 1} axis = 1 - t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - t_a = backend->create_tensor(element::f32, Shape{2, 3, 4, 5}); - t_b = backend->create_tensor(element::f32, Shape{3, 1}); + t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); + t_a = backend->create_tensor(element::Type_t::f32, Shape{2, 3, 4, 5}); + t_b = backend->create_tensor(element::Type_t::f32, Shape{3, 1}); copy_data(t_a, vector(2 * 3 * 4 * 5, 1)); copy_data(t_b, vector(3, 1)); ex->call_with_validate({t_r}, {t_a, t_b}); @@ -154,8 +154,8 @@ NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic) NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_string_cast) { - auto a = make_shared(element::f32, Shape{1}); - auto b = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); + auto b = make_shared(element::Type_t::f32, Shape{1}); auto add = make_shared(a, b, "NUMPY"); ASSERT_EQ(add->get_autob(), op::AutoBroadcastType::NUMPY); diff --git a/ngraph/test/backend/batch_norm.in.cpp b/ngraph/test/backend/batch_norm.in.cpp index d4b501c8c9d0fe..ee81eb2c48c5d2 100644 --- a/ngraph/test/backend/batch_norm.in.cpp +++ b/ngraph/test/backend/batch_norm.in.cpp @@ -161,7 +161,7 @@ class BatchNormInferenceTesterZeroEpsilon : public BatchNormInferenceTester NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f64) { using T = double; - auto& et = element::f64; + element::Type et = element::Type_t::f64; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -173,7 +173,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f64) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f32) { using T = float; - auto& et = element::f32; + element::Type et = element::Type_t::f32; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -255,7 +255,7 @@ class BatchNormInferenceTesterNonZeroEpsilon : public BatchNormInferenceTester bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -267,7 +267,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f64) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f32) { using T = float; - auto& et = element::f32; + element::Type et = element::Type_t::f32; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterNonZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -279,10 +279,10 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f32) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto mvgb_shape = Shape{2}; - auto mvgb = make_shared(element::f32, mvgb_shape); + auto mvgb = make_shared(element::Type_t::f32, mvgb_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; @@ -291,7 +291,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) auto f = make_shared(bn, ParameterVector{input, mvgb, mvgb, mvgb, mvgb}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -302,9 +302,9 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) 0.4375872f, 0.89177299f}); - auto _mvgb = backend->create_tensor(element::f32, mvgb_shape); + auto _mvgb = backend->create_tensor(element::Type_t::f32, mvgb_shape); copy_data(_mvgb, vector{1.0f, 1.0f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{0.54903894f, 0.71533161f, @@ -324,10 +324,10 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto mvgb_shape = Shape{2}; - auto mvgb = make_shared(element::f32, mvgb_shape); + auto mvgb = make_shared(element::Type_t::f32, mvgb_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; @@ -336,7 +336,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) auto f = make_shared(bn, ParameterVector{input, mvgb, mvgb, mvgb, mvgb}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -347,9 +347,9 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) 0.4375872f, 0.89177299f}); - auto _mvgb = backend->create_tensor(element::f32, mvgb_shape); + auto _mvgb = backend->create_tensor(element::Type_t::f32, mvgb_shape); copy_data(_mvgb, vector{1.0f, 1.0f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{0.54903894f, 0.71533161f, @@ -369,15 +369,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto gamma_shape = Shape{2}; - auto gamma = make_shared(element::f32, gamma_shape); + auto gamma = make_shared(element::Type_t::f32, gamma_shape); auto beta_shape = Shape{2}; - auto beta = make_shared(element::f32, beta_shape); + auto beta = make_shared(element::Type_t::f32, beta_shape); auto mean_shape = Shape{2}; - auto mean = make_shared(element::f32, mean_shape); + auto mean = make_shared(element::Type_t::f32, mean_shape); auto var_shape = Shape{2}; - auto var = make_shared(element::f32, var_shape); + auto var = make_shared(element::Type_t::f32, var_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; auto bn = make_shared(input, gamma, beta, mean, var, eps); @@ -385,7 +385,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) auto f = make_shared(bn, ParameterVector{input, gamma, beta, mean, var}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -396,15 +396,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) 0.4375872f, 0.89177299f}); - auto _gamma = backend->create_tensor(element::f32, gamma_shape); + auto _gamma = backend->create_tensor(element::Type_t::f32, gamma_shape); copy_data(_gamma, vector{1.0f, 1.0f}); - auto _beta = backend->create_tensor(element::f32, beta_shape); + auto _beta = backend->create_tensor(element::Type_t::f32, beta_shape); copy_data(_beta, vector{0.0f, 0.0f}); - auto _mean = backend->create_tensor(element::f32, mean_shape); + auto _mean = backend->create_tensor(element::Type_t::f32, mean_shape); copy_data(_mean, vector{0.583388f, 0.619252f}); - auto _var = backend->create_tensor(element::f32, var_shape); + auto _var = backend->create_tensor(element::Type_t::f32, var_shape); copy_data(_var, vector{0.0119972f, 0.0282681f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{ -0.30327f, 1.1561f, -0.0963782f, -0.434702f, -1.4011f, 0.548275f, -1.06187f, 1.59295f}; @@ -418,15 +418,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto gamma_shape = Shape{2}; - auto gamma = make_shared(element::f32, gamma_shape); + auto gamma = make_shared(element::Type_t::f32, gamma_shape); auto beta_shape = Shape{2}; - auto beta = make_shared(element::f32, beta_shape); + auto beta = make_shared(element::Type_t::f32, beta_shape); auto mean_shape = Shape{2}; - auto mean = make_shared(element::f32, mean_shape); + auto mean = make_shared(element::Type_t::f32, mean_shape); auto var_shape = Shape{2}; - auto var = make_shared(element::f32, var_shape); + auto var = make_shared(element::Type_t::f32, var_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; auto bn = make_shared(input, gamma, beta, mean, var, eps); @@ -434,7 +434,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) auto f = make_shared(bn, ParameterVector{input, gamma, beta, mean, var}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -445,15 +445,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) 0.4375872f, 0.89177299f}); - auto _gamma = backend->create_tensor(element::f32, gamma_shape); + auto _gamma = backend->create_tensor(element::Type_t::f32, gamma_shape); copy_data(_gamma, vector{1.0f, 1.0f}); - auto _beta = backend->create_tensor(element::f32, beta_shape); + auto _beta = backend->create_tensor(element::Type_t::f32, beta_shape); copy_data(_beta, vector{0.0f, 0.0f}); - auto _mean = backend->create_tensor(element::f32, mean_shape); + auto _mean = backend->create_tensor(element::Type_t::f32, mean_shape); copy_data(_mean, vector{0.583388f, 0.619252f}); - auto _var = backend->create_tensor(element::f32, var_shape); + auto _var = backend->create_tensor(element::Type_t::f32, var_shape); copy_data(_var, vector{0.0119972f, 0.0282681f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{ -0.30327f, 1.1561f, -0.0963782f, -0.434702f, -1.4011f, 0.548275f, -1.06187f, 1.59295f}; diff --git a/ngraph/test/backend/broadcast.in.cpp b/ngraph/test/backend/broadcast.in.cpp index 25b5ac6976b155..6c203a8adb977d 100644 --- a/ngraph/test/backend/broadcast.in.cpp +++ b/ngraph/test/backend/broadcast.in.cpp @@ -43,19 +43,19 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_vector) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{4}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -66,19 +66,19 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_vector) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_matrix) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -89,19 +89,19 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_matrix) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_tensor) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -113,18 +113,18 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_tensor) NGRAPH_TEST(${BACKEND_NAME}, broadcast_trivial) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape.size()}, shape)), + A, op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 6, 8, 16, 32, 64, 128}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -136,21 +136,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_trivial) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_colwise) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 4}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {0})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {0})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -162,21 +162,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_colwise) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 4}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -189,22 +189,24 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_reversed) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 4}; auto broadcast = make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})); - auto reverse = make_shared( - broadcast, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})); + auto reverse = + make_shared(broadcast, + op::Constant::create(element::Type_t::i64, {1}, {1}), + op::v1::Reverse::Mode::INDEX); auto f = make_shared(reverse, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -216,21 +218,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_reversed) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_int64) { Shape shape_a{4}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_r{3, 4}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -240,21 +242,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_int64) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int64) { Shape shape_a{1}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_r{3, 1}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{4}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -264,21 +266,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int64) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int32) { Shape shape_a{1}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 1}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{4}); - auto result = backend->create_tensor(element::i32, shape_r); + auto result = backend->create_tensor(element::Type_t::i32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -287,15 +289,16 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int32) static void broadcast_test_helper(const Shape& shape_a, const Shape& shape_r, const AxisSet& axes) { - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); vector inp_data(shape_size(shape_a)); iota(inp_data.begin(), inp_data.end(), 1.f); - auto shape_const = op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r); + auto shape_const = op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r); std::shared_ptr broadcast; if (axes.size() > 0) { - auto axes_const = op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector()); + auto axes_const = + op::Constant::create(element::Type_t::i64, Shape{axes.size()}, axes.to_vector()); broadcast = make_shared(A, shape_const, axes_const); } else @@ -307,14 +310,14 @@ static void broadcast_test_helper(const Shape& shape_a, const Shape& shape_r, co auto ref_backend = runtime::Backend::create("INTERPRETER"); auto wrk_backend = runtime::Backend::create("${BACKEND_NAME}"); - auto wrk_a = wrk_backend->create_tensor(element::f32, shape_a); + auto wrk_a = wrk_backend->create_tensor(element::Type_t::f32, shape_a); copy_data(wrk_a, inp_data); - auto ref_a = ref_backend->create_tensor(element::f32, shape_a); + auto ref_a = ref_backend->create_tensor(element::Type_t::f32, shape_a); copy_data(ref_a, inp_data); - auto wrk_result = wrk_backend->create_tensor(element::f32, shape_r); - auto ref_result = ref_backend->create_tensor(element::f32, shape_r); + auto wrk_result = wrk_backend->create_tensor(element::Type_t::f32, shape_r); + auto ref_result = ref_backend->create_tensor(element::Type_t::f32, shape_r); auto wrk_handle = wrk_backend->compile(f); auto ref_handle = ref_backend->compile(f); @@ -446,19 +449,19 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_3d_stride_2) NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_0) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -470,21 +473,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_0) NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_1) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{2}, {0, 2})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{2}, {0, 2})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -496,21 +499,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_1) NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_2) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{2}, {0, 1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{2}, {0, 1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp b/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp index 1a322bb09bfa47..d5128334ef29f6 100644 --- a/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp +++ b/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp @@ -36,7 +36,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean) { const Shape input_shape{4, 3, 2}; const AxisSet axes{1, 2}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto mean_builder = builder::opset1::mean(input, axes); auto function = make_shared(mean_builder, ParameterVector{input}); @@ -53,7 +53,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean_dynamic) { const Shape input_shape{2, 4, 5}; const AxisSet axes{0, 1}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto mean_builder = builder::opset1::mean(input, axes); auto function = make_shared(mean_builder, ParameterVector{input}); @@ -71,7 +71,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean_dynamic_2) { const Shape input_shape{2, 1, 3}; const AxisSet axes{1, 2}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto mean_builder = builder::opset1::mean(input, axes); auto function = make_shared(mean_builder, ParameterVector{input}); @@ -91,7 +91,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_5d_to_3d) const auto elems_in_tensor = shape_size(shape_input); - const auto A = make_shared(element::f32, shape_input); + const auto A = make_shared(element::Type_t::f32, shape_input); const auto builder_collapse = builder::opset1::collapse(A, 1, shape_input.size() - 2); const auto f = make_shared(builder_collapse, ParameterVector{A}); @@ -112,7 +112,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_all_dims) const auto elems_in_tensor = shape_size(shape_input); - const auto A = make_shared(element::f32, shape_input); + const auto A = make_shared(element::Type_t::f32, shape_input); const auto builder_collapse = builder::opset1::collapse(A, 0, shape_input.size() - 1); const auto f = make_shared(builder_collapse, ParameterVector{A}); @@ -132,7 +132,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_none) const auto elems_in_tensor = shape_size(shape_input); - const auto A = make_shared(element::f32, shape_input); + const auto A = make_shared(element::Type_t::f32, shape_input); const auto builder_collapse = builder::opset1::collapse(A, 2, shape_input.size() - 4); const auto f = make_shared(builder_collapse, ParameterVector{A}); @@ -151,7 +151,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_dyn_shape) PartialShape pshape_input{1, 2, 3, 4, 5, Dimension()}; PartialShape pshape_output{1, 24, 5, Dimension()}; - const auto A = make_shared(element::f32, pshape_input); + const auto A = make_shared(element::Type_t::f32, pshape_input); EXPECT_TRUE(A->get_output_partial_shape(0).same_scheme( PartialShape{1, 2, 3, 4, 5, Dimension::dynamic()})); const auto builder_collapse = builder::opset1::collapse(A, 1, 3); diff --git a/ngraph/test/backend/ceiling.in.cpp b/ngraph/test/backend/ceiling.in.cpp index e237bfa9c29ea4..ca97bd85eb72a8 100644 --- a/ngraph/test/backend/ceiling.in.cpp +++ b/ngraph/test/backend/ceiling.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, ceiling) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); @@ -59,7 +59,7 @@ NGRAPH_TEST(${BACKEND_NAME}, ceiling_int64) { // This tests large numbers that will not fit in a double Shape shape{3}; - auto A = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector expected{0, 1, 0x4000000000000001}; diff --git a/ngraph/test/backend/comparison.in.cpp b/ngraph/test/backend/comparison.in.cpp index 0f9651e3c1ecad..98a078a1048b9e 100644 --- a/ngraph/test/backend/comparison.in.cpp +++ b/ngraph/test/backend/comparison.in.cpp @@ -43,18 +43,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, equal) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 8, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -64,18 +64,18 @@ NGRAPH_TEST(${BACKEND_NAME}, equal) NGRAPH_TEST(${BACKEND_NAME}, notequal) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 8, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -85,18 +85,18 @@ NGRAPH_TEST(${BACKEND_NAME}, notequal) NGRAPH_TEST(${BACKEND_NAME}, greater) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -106,18 +106,18 @@ NGRAPH_TEST(${BACKEND_NAME}, greater) NGRAPH_TEST(${BACKEND_NAME}, greater_int64) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape); + auto a = backend->create_tensor(element::Type_t::i64, shape); copy_data(a, vector{0x4000000000000002, 0x4000000000000006, -8, 17, -5, 5, 2, 1}); - auto b = backend->create_tensor(element::i64, shape); + auto b = backend->create_tensor(element::Type_t::i64, shape); copy_data(b, vector{0x4000000000000001, 0x4000000000000002, 4, 8, 0, 0, 1, 2}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -127,18 +127,18 @@ NGRAPH_TEST(${BACKEND_NAME}, greater_int64) NGRAPH_TEST(${BACKEND_NAME}, greatereq) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, -8, 8, 0, 0, 0.5, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -148,18 +148,18 @@ NGRAPH_TEST(${BACKEND_NAME}, greatereq) NGRAPH_TEST(${BACKEND_NAME}, less) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -169,18 +169,18 @@ NGRAPH_TEST(${BACKEND_NAME}, less) NGRAPH_TEST(${BACKEND_NAME}, lesseq) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, -8, 8, 0, 0, 0.5, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -190,18 +190,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lesseq) NGRAPH_TEST(${BACKEND_NAME}, lesseq_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0x40000170, 0x40000005, 0x40000005, -5}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{0x40000140, 0x40000001, 0x40000005, 0}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -211,18 +211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lesseq_int32) NGRAPH_TEST(${BACKEND_NAME}, lesseq_bool) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); + auto a = backend->create_tensor(element::Type_t::boolean, shape); copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1}); - auto b = backend->create_tensor(element::boolean, shape); + auto b = backend->create_tensor(element::Type_t::boolean, shape); copy_data(b, vector{0, 0, 0, 0, 0, 0, 0, 0}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. diff --git a/ngraph/test/backend/concat.in.cpp b/ngraph/test/backend/concat.in.cpp index 0dc8b899efe78b..db8e68275b6296 100644 --- a/ngraph/test/backend/concat.in.cpp +++ b/ngraph/test/backend/concat.in.cpp @@ -34,11 +34,11 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, concat_negative_axis) { auto pshape_a = PartialShape::dynamic(); - auto A = make_shared(element::f32, pshape_a); + auto A = make_shared(element::Type_t::f32, pshape_a); auto pshape_b = PartialShape::dynamic(); - auto B = make_shared(element::f32, pshape_b); + auto B = make_shared(element::Type_t::f32, pshape_b); auto pshape_c = PartialShape::dynamic(); - auto C = make_shared(element::f32, pshape_c); + auto C = make_shared(element::Type_t::f32, pshape_c); auto pshape_r = PartialShape::dynamic(); auto f = make_shared(make_shared(NodeVector{A, B, C}, -1), ParameterVector{A, B, C}); @@ -46,13 +46,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_negative_axis) auto backend = runtime::Backend::create("${BACKEND_NAME}", true); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, Shape{2, 2}); + auto a = backend->create_tensor(element::Type_t::f32, Shape{2, 2}); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, Shape{2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, Shape{2, 3}); + auto c = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); ASSERT_EQ(result->get_shape(), (Shape{2, 8})); @@ -63,11 +63,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_negative_axis) NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_colwise) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 3}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2, 3}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{2, 8}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 1), ParameterVector{A, B, C}); @@ -75,13 +75,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_colwise) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -94,11 +94,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_colwise) NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_rowwise) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{3, 2}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{3, 2}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{8, 2}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -106,13 +106,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_rowwise) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -125,11 +125,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_rowwise) NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_int64) { Shape shape_a{2, 2}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_b{3, 2}; - auto B = make_shared(element::i64, shape_b); + auto B = make_shared(element::Type_t::i64, shape_b); Shape shape_c{3, 2}; - auto C = make_shared(element::i64, shape_c); + auto C = make_shared(element::Type_t::i64, shape_c); Shape shape_r{8, 2}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -137,13 +137,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_int64) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::i64, shape_b); + auto b = backend->create_tensor(element::Type_t::i64, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::i64, shape_c); + auto c = backend->create_tensor(element::Type_t::i64, shape_c); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -166,7 +166,7 @@ NGRAPH_TEST_P(${BACKEND_NAME}, concat_vector_params, concat_vector_large) ParameterVector inputs_param; for (uint32_t i = 0; i < num_inputs; i++) { - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); inputs_param.push_back(A); inputs.push_back(A); } @@ -180,12 +180,12 @@ NGRAPH_TEST_P(${BACKEND_NAME}, concat_vector_params, concat_vector_large) std::vector ref_result; for (uint32_t i = 0; i < num_inputs; i++) { - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{static_cast(i)}); ref_result.push_back(static_cast(i)); inputs_value.push_back(a); } - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, inputs_value); @@ -205,11 +205,11 @@ NGRAPH_INSTANTIATE_TEST_CASE_P(${BACKEND_NAME}, NGRAPH_TEST(${BACKEND_NAME}, concat_vector) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{6}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{12}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -217,13 +217,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_vector) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, vector{18, 19}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -235,9 +235,9 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_vector) NGRAPH_TEST(${BACKEND_NAME}, concat_4d_tensor) { Shape shape{1, 1, 1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); Shape shape_r{3, 1, 1, 1}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -245,13 +245,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_4d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -262,9 +262,9 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_4d_tensor) NGRAPH_TEST(${BACKEND_NAME}, concat_2d_tensor) { Shape shape{1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); Shape shape_r{3, 1}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -272,13 +272,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_2d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -289,11 +289,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_2d_tensor) NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_2d_tensor) { Shape shape{1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); - auto C = make_shared(element::f32, shape); - auto D = make_shared(element::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); + auto D = make_shared(element::Type_t::f32, shape); auto add2 = make_shared(C, D); auto subtract = make_shared(C, A); Shape shape_r{3, 1}; @@ -303,15 +303,15 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_2d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto d = backend->create_tensor(element::f32, shape); + auto d = backend->create_tensor(element::Type_t::f32, shape); copy_data(d, vector{4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c, d}); @@ -322,11 +322,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_2d_tensor) NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_propagate_2d_tensor) { Shape shape{1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); - auto C = make_shared(element::f32, shape); - auto D = make_shared(element::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); + auto D = make_shared(element::Type_t::f32, shape); auto add2 = make_shared(C, D); auto concat1 = make_shared(NodeVector{add1, add2}, 0); auto subtract = make_shared(C, A); @@ -337,15 +337,15 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_propagate_2d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto d = backend->create_tensor(element::f32, shape); + auto d = backend->create_tensor(element::Type_t::f32, shape); copy_data(d, vector{4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c, d}); @@ -357,20 +357,20 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_1) { Shape shape{1, 2, 2}; Shape shape_r{1, 4, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(A, B); auto concat = make_shared(NodeVector{add1, add2}, 1); auto f = make_shared(make_shared(concat, concat), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected; @@ -383,8 +383,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_2) { Shape shape{1, 2, 2}; Shape shape_r{1, 8, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(A, B); auto concat1 = make_shared(NodeVector{add1, add2}, 1); @@ -394,11 +394,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected; @@ -411,8 +411,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_3) { Shape shape{1, 2, 2}; Shape shape_r{1, 16, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto concat1 = make_shared(NodeVector{A, B}, 1); auto concat2 = make_shared(NodeVector{A, B}, 1); auto concat3 = make_shared(NodeVector{A, B}, 1); @@ -423,11 +423,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_3) auto f = make_shared(make_shared(concat14, concat14), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected; @@ -440,8 +440,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat) { Shape shape{2, 2}; Shape shape_r{4, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(add1, add1); auto concat = make_shared(NodeVector{add1, add2}, 0); @@ -449,11 +449,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat) auto f = make_shared(add3, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected = {4, 4, 4, 4, 8, 8, 8, 8}; @@ -464,8 +464,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat_2) { Shape shape{1, 2, 2}; Shape shape_r{1, 6, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(A, B); auto add3 = make_shared(A, B); @@ -480,11 +480,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat_2) auto f = make_shared(add6, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected = {4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}; @@ -556,11 +556,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d) } Shape shape_a{2, 3, 4, 3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 3, 3, 3, 2}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2, 3, 2, 3, 2}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{2, 3, 9, 3, 2}; auto r = make_shared(NodeVector{A, B, C}, 2); @@ -569,14 +569,14 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, c_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -616,9 +616,9 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{4}; auto r = make_shared(NodeVector{A, B}, 0); @@ -630,12 +630,12 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last) vector a_data{1, 2, 3, 4}; vector b_data(0); - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -646,11 +646,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{4}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{8}; auto r = make_shared(NodeVector{A, B, C}, 0); @@ -663,14 +663,14 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle) vector b_data(0); vector c_data{5, 6, 7, 8}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, c_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -682,13 +682,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_zero) { Shape shape{0}; - auto constant_1 = op::Constant::create(element::f32, shape, {1}); + auto constant_1 = op::Constant::create(element::Type_t::f32, shape, {1}); auto concat_1 = make_shared(NodeVector{constant_1, constant_1}, 0); auto f = make_shared(concat_1, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -700,11 +700,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_zero) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_4d_middle) { Shape shape_a{2, 2, 1, 1}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 2, 0, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2, 2, 1, 1}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{2, 2, 2, 1}; auto r = make_shared(NodeVector{A, B, C}, 2); @@ -717,14 +717,14 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_4d_middle) vector b_data(0); vector c_data{5, 6, 7, 8}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, c_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); diff --git a/ngraph/test/backend/constant.in.cpp b/ngraph/test/backend/constant.in.cpp index 813037b0d00954..e5d872e50ad0e0 100644 --- a/ngraph/test/backend/constant.in.cpp +++ b/ngraph/test/backend/constant.in.cpp @@ -34,13 +34,13 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tensor_constant) { Shape shape{2, 2, 2}; - auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); + auto A = op::Constant::create(element::Type_t::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto f = make_shared(A, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -52,14 +52,14 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant) NGRAPH_TEST(${BACKEND_NAME}, tensor_2constant) { Shape shape{2, 2, 2}; - auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); + auto A = op::Constant::create(element::Type_t::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto f = make_shared(NodeVector{A, A}, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result0 = backend->create_tensor(element::f32, shape); - auto result1 = backend->create_tensor(element::f32, shape); + auto result0 = backend->create_tensor(element::Type_t::f32, shape); + auto result1 = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result0, result1}, {}); @@ -74,13 +74,13 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_2constant) NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_with_op) { Shape shape{2, 2, 2}; - auto A = op::Constant::create(element::f32, shape, {-1, 2, 3, -4, 5, -6, -7, 8}); + auto A = op::Constant::create(element::Type_t::f32, shape, {-1, 2, 3, -4, 5, -6, -7, 8}); auto f = make_shared(make_shared(A), ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -91,29 +91,30 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_with_op) NGRAPH_TEST(${BACKEND_NAME}, constant_multi_use) { - auto A = make_shared(element::i32, Shape{}, std::vector{"388"}); + auto A = + make_shared(element::Type_t::i32, Shape{}, std::vector{"388"}); auto f = make_shared(A, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - std::shared_ptr r1 = backend->create_tensor(element::i32, Shape{}); + std::shared_ptr r1 = backend->create_tensor(element::Type_t::i32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({r1}, std::vector>{}); EXPECT_EQ(read_vector(r1), std::vector{388}); - std::shared_ptr r2 = backend->create_tensor(element::i32, Shape{}); + std::shared_ptr r2 = backend->create_tensor(element::Type_t::i32, Shape{}); handle->call_with_validate({r2}, std::vector>{}); EXPECT_EQ(read_vector(r2), std::vector{388}); } NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_float32) { - auto r = op::Constant::create(element::f32, Shape{}, {4.75}); + auto r = op::Constant::create(element::Type_t::f32, Shape{}, {4.75}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -123,13 +124,13 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_float32) NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64) { - auto r = op::Constant::create(element::i64, Shape{}, {0x4000000000000001}); + auto r = op::Constant::create(element::Type_t::i64, Shape{}, {0x4000000000000001}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::i64, Shape{}); + auto result = backend->create_tensor(element::Type_t::i64, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -139,13 +140,13 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64) NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32) { Shape shape{2, 2}; - auto r = op::Constant::create(element::f32, shape, {4.75, 4.5, -5.25, 0.0}); + auto r = op::Constant::create(element::Type_t::f32, shape, {4.75, 4.5, -5.25, 0.0}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -157,11 +158,12 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32) NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64) { Shape shape{2}; - auto r = op::Constant::create(element::i64, shape, {0x4000000000000001, 0x4000000000000002}); + auto r = + op::Constant::create(element::Type_t::i64, shape, {0x4000000000000001, 0x4000000000000002}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::i64, shape); + auto result = backend->create_tensor(element::Type_t::i64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); EXPECT_EQ((vector{0x4000000000000001, 0x4000000000000002}), @@ -171,18 +173,18 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64) NGRAPH_TEST(${BACKEND_NAME}, constant_equality_bool) { Shape shape{4}; - // auto A = make_shared(element::boolean, shape); - // auto B = make_shared(element::boolean, shape); + // auto A = make_shared(element::Type_t::boolean, shape); + // auto B = make_shared(element::Type_t::boolean, shape); // auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto A = op::Constant::create(element::boolean, shape, {true, false, true, false}); - auto B = op::Constant::create(element::boolean, shape, {true, true, true, true}); + auto A = op::Constant::create(element::Type_t::boolean, shape, {true, false, true, false}); + auto B = op::Constant::create(element::Type_t::boolean, shape, {true, true, true, true}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); diff --git a/ngraph/test/backend/convert.in.cpp b/ngraph/test/backend/convert.in.cpp index 17cc8d13ff00f7..0a0b780ea76bfc 100644 --- a/ngraph/test/backend/convert.in.cpp +++ b/ngraph/test/backend/convert.in.cpp @@ -32,15 +32,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto f = make_shared(make_shared(A, element::f32), ParameterVector{A}); + auto A = make_shared(element::Type_t::i32, shape); + auto f = make_shared(make_shared(A, element::Type_t::f32), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{281, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -50,15 +51,16 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32) NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32) { Shape shape{2, 2}; - auto A = make_shared(element::u16, shape); - auto f = make_shared(make_shared(A, element::f32), ParameterVector{A}); + auto A = make_shared(element::Type_t::u16, shape); + auto f = make_shared(make_shared(A, element::Type_t::f32), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::u16, shape); + auto a = backend->create_tensor(element::Type_t::u16, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -69,9 +71,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32) NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) { Shape shape{2, 3}; - auto A = make_shared(element::i32, shape); - auto f = - make_shared(make_shared(A, element::boolean), ParameterVector{A}); + auto A = make_shared(element::Type_t::i32, shape); + auto f = make_shared(make_shared(A, element::Type_t::boolean), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -79,9 +81,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) int32_t max = std::numeric_limits::max(); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0, 12, 23, 0, lowest, max}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -91,9 +93,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool) { Shape shape{3, 3}; - auto A = make_shared(element::f32, shape); - auto f = - make_shared(make_shared(A, element::boolean), ParameterVector{A}); + auto A = make_shared(element::Type_t::f32, shape); + auto f = make_shared(make_shared(A, element::Type_t::boolean), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -104,9 +106,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool) float neg_inf = -std::numeric_limits::infinity(); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -121,14 +123,14 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bf16) vector a_data = { 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; - auto A = make_shared(element::f32, shape_a); - auto convert = make_shared(A, element::bf16); + auto A = make_shared(element::Type_t::f32, shape_a); + auto convert = make_shared(A, element::Type_t::bf16); auto f = make_shared(NodeVector{convert}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::bf16, shape_a); + auto result = backend->create_tensor(element::Type_t::bf16, shape_a); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_EQ((vector{ @@ -144,14 +146,14 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_float32) vector a_data = { 0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5}; - auto A = make_shared(element::bf16, shape_a); - auto convert = make_shared(A, element::f32); + auto A = make_shared(element::Type_t::bf16, shape_a); + auto convert = make_shared(A, element::Type_t::f32); auto f = make_shared(NodeVector{convert}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::bf16, shape_a); + auto a = backend->create_tensor(element::Type_t::bf16, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_a); + auto result = backend->create_tensor(element::Type_t::f32, shape_a); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_EQ((vector{0.5f, diff --git a/ngraph/test/backend/convolution.in.cpp b/ngraph/test/backend/convolution.in.cpp index ab2b5939b79c2d..1b4d7ef2dcf4c2 100644 --- a/ngraph/test/backend/convolution.in.cpp +++ b/ngraph/test/backend/convolution.in.cpp @@ -34,9 +34,9 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) { Shape shape_a{1, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 2, 1, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{1, 2, 2, 2}; auto conv1 = make_shared(A, B, @@ -57,11 +57,11 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1.0f, 1.0f, 1.0f, 1.0f}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{4.0f, 4.0f, 4.0f, 4.0f, 4.0f, 4.0f, 4.0f, 4.0f}; @@ -73,9 +73,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) { Shape shape_a{1, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 2, 1, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{1, 2, 2, 2}; auto conv1 = make_shared(A, B, @@ -90,11 +90,11 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{3.0f, 3.0f, 3.0f, 3.0f}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{18.0f, 24.0f, 30.0f, 36.0f, 18.0f, 24.0f, 30.0f, 36.0f}; @@ -106,9 +106,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) { Shape shape_a{1, 1, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{1, 1, 1, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{1, 1, 5, 5}; auto conv1 = make_shared(A, B, @@ -123,11 +123,11 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1.0f, 2.0f, 3.0f, 4.0f}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{2.0f}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); // clang-format off vector expected_result{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, @@ -145,12 +145,12 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) { Shape shape_filter{6, 3, 3, 3}; - auto filters = make_shared(element::f32, PartialShape::dynamic()); + auto filters = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_delta{2, 6, 3, 3}; - auto deltas = make_shared(element::f32, PartialShape::dynamic()); + auto deltas = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_data_batch_shape{2, 3, 5, 5}; auto data_batch_shape = - make_shared(element::i64, PartialShape{Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto strides = Strides{1, 1}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; @@ -165,7 +165,7 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) auto handle = backend->compile(f); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); vector filter, delta, expected_result; @@ -181,11 +181,12 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) vector shapes = {5, 5}; // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_delta); + auto a = backend->create_tensor(element::Type_t::f32, shape_delta); copy_data(a, delta); - auto b = backend->create_tensor(element::f32, shape_filter); + auto b = backend->create_tensor(element::Type_t::f32, shape_filter); copy_data(b, filter); - auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape + auto c = backend->create_tensor(element::Type_t::i64, + Shape{shapes.size()}); // dynamic data batch shape copy_data(c, shapes); handle->call_with_validate({result}, {a, b, c}); EXPECT_FALSE(test::all_close_f(vector{expected_result}, read_vector(result))); diff --git a/ngraph/test/backend/cos.in.cpp b/ngraph/test/backend/cos.in.cpp index 9e29f11199f17b..87f9b81192b325 100644 --- a/ngraph/test/backend/cos.in.cpp +++ b/ngraph/test/backend/cos.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, cos) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/cosh.in.cpp b/ngraph/test/backend/cosh.in.cpp index 461c609fb2dd15..126ee0e3f5fb0a 100644 --- a/ngraph/test/backend/cosh.in.cpp +++ b/ngraph/test/backend/cosh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, cosh) { Shape shape{6}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; diff --git a/ngraph/test/backend/ctc_greedy_decoder.in.cpp b/ngraph/test/backend/ctc_greedy_decoder.in.cpp index ae516d7ef2e631..57fb6675a52215 100644 --- a/ngraph/test/backend/ctc_greedy_decoder.in.cpp +++ b/ngraph/test/backend/ctc_greedy_decoder.in.cpp @@ -53,8 +53,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -74,8 +74,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_f16) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f16, data_shape); - auto masks = make_shared(element::f16, masks_shape); + auto data = make_shared(element::Type_t::f16, data_shape); + auto masks = make_shared(element::Type_t::f16, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -95,8 +95,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_multiple_batches) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -136,8 +136,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_single_batch_short_sequence) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -157,8 +157,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_merge) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, true); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -178,8 +178,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_single_no_merge) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -199,8 +199,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_multiple_sequences) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); diff --git a/ngraph/test/backend/cum_sum.in.cpp b/ngraph/test/backend/cum_sum.in.cpp index 7e6f143562e740..e70d0258066a2a 100644 --- a/ngraph/test/backend/cum_sum.in.cpp +++ b/ngraph/test/backend/cum_sum.in.cpp @@ -42,18 +42,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default) { Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{1}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); @@ -63,18 +63,18 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default) NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i64, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i64, Shape{1}); auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{0}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); @@ -85,15 +85,15 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim) NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_default_axis) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -105,19 +105,19 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_3d) { auto test_cumsum_3d = [](const int32_t axis_val) -> void { Shape shape{3, 2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{axis_val}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); @@ -153,19 +153,19 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_allmodes) { auto test_cum_sum_allmodes = [](const int64_t axis_val, int exclusive, int reverse) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i64, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i64, Shape{1}); auto f = make_shared(make_shared(A, axis, exclusive, reverse), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{axis_val}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); diff --git a/ngraph/test/backend/divide.in.cpp b/ngraph/test/backend/divide.in.cpp index 8ad877117e009f..46d4faa9321e7b 100644 --- a/ngraph/test/backend/divide.in.cpp +++ b/ngraph/test/backend/divide.in.cpp @@ -52,18 +52,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0x40000140, 0x40000001, 8, 16}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{2, 5, 4, 8}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -96,18 +96,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_cpp_rounding_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B, false), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{-10, -10, 10, 10}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{-3, 3, -3, 3}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_python_rounding_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{-10, -10, 10, 10}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{-3, 3, -3, 3}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -140,18 +140,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A / B, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -162,18 +162,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_by_zero_float32) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{0, 0, 0, 0}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); diff --git a/ngraph/test/backend/dyn_reshape.in.cpp b/ngraph/test/backend/dyn_reshape.in.cpp index a7e6ebbe425442..ca382cee1d99f2 100644 --- a/ngraph/test/backend/dyn_reshape.in.cpp +++ b/ngraph/test/backend/dyn_reshape.in.cpp @@ -29,8 +29,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reshape_v1) { - auto arg = std::make_shared(element::i64, PartialShape::dynamic()); - auto pattern = make_shared(element::i64, PartialShape::dynamic(1)); + auto arg = std::make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto pattern = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); auto reshape_v1 = std::make_shared(arg, pattern, false); auto f = std::make_shared(NodeVector{reshape_v1}, ParameterVector{arg, pattern}); @@ -41,15 +41,15 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v1) auto arg_data = vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; auto pattern_data = vector{2, 2, 3}; - auto arg_tensor = backend->create_tensor(element::i64, Shape{arg_data.size()}); - auto pattern_tensor = backend->create_tensor(element::i64, Shape{pattern_data.size()}); + auto arg_tensor = backend->create_tensor(element::Type_t::i64, Shape{arg_data.size()}); + auto pattern_tensor = backend->create_tensor(element::Type_t::i64, Shape{pattern_data.size()}); copy_data(arg_tensor, arg_data); copy_data(pattern_tensor, pattern_data); - auto output = backend->create_dynamic_tensor(element::i64, PartialShape::dynamic()); + auto output = backend->create_dynamic_tensor(element::Type_t::i64, PartialShape::dynamic()); ex->call_with_validate({output}, {arg_tensor, pattern_tensor}); - ASSERT_EQ(output->get_element_type(), element::i64); + ASSERT_EQ(output->get_element_type(), element::Type_t::i64); EXPECT_EQ(read_vector(output), vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})); } diff --git a/ngraph/test/backend/dynamic.in.cpp b/ngraph/test/backend/dynamic.in.cpp index a7b21469063b52..911d9acf649ff5 100644 --- a/ngraph/test/backend/dynamic.in.cpp +++ b/ngraph/test/backend/dynamic.in.cpp @@ -39,7 +39,8 @@ NGRAPH_TEST(${BACKEND_NAME}, create_dynamic_backend) NGRAPH_TEST(${BACKEND_NAME}, create_dynamic_tensor) { auto backend = runtime::Backend::create("${BACKEND_NAME}", true); - auto t = backend->create_dynamic_tensor(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto t = backend->create_dynamic_tensor(element::Type_t::f32, + PartialShape{2, Dimension::dynamic(), 3}); ASSERT_TRUE(t->get_partial_shape().same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } @@ -48,9 +49,12 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) // // Create a graph for f(a,b,c) = (a+b)*c, where a, b, c all have shape {2,?,3}. // - auto a = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto c = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto a = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto c = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); auto a_plus_b_times_c = (a + b) * c; @@ -66,8 +70,8 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) // // Create a dynamic output tensor with shape {2,?,3}. // - auto t_r = - backend->create_dynamic_tensor(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, + PartialShape{2, Dimension::dynamic(), 3}); // // For each of n=[0,...,5), run the compiled executable against a test vector of shape @@ -83,9 +87,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) } // Create static tensors for the inputs and copy data. - auto t_a = backend->create_tensor(element::f32, Shape{2, middle_dim, 3}); - auto t_b = backend->create_tensor(element::f32, Shape{2, middle_dim, 3}); - auto t_c = backend->create_tensor(element::f32, Shape{2, middle_dim, 3}); + auto t_a = backend->create_tensor(element::Type_t::f32, Shape{2, middle_dim, 3}); + auto t_b = backend->create_tensor(element::Type_t::f32, Shape{2, middle_dim, 3}); + auto t_c = backend->create_tensor(element::Type_t::f32, Shape{2, middle_dim, 3}); copy_data(t_a, inputs); copy_data(t_b, inputs); @@ -112,9 +116,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) static void axpy_test(const PartialShape& input_pshape, const std::vector& input_shapes) { - auto a = make_shared(element::f32, input_pshape); - auto x = make_shared(element::f32, input_pshape); - auto y = make_shared(element::f32, input_pshape); + auto a = make_shared(element::Type_t::f32, input_pshape); + auto x = make_shared(element::Type_t::f32, input_pshape); + auto y = make_shared(element::Type_t::f32, input_pshape); auto axpy = a * x + y; @@ -122,7 +126,7 @@ static void axpy_test(const PartialShape& input_pshape, const std::vector auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, input_pshape); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, input_pshape); for (auto& shape : input_shapes) { @@ -132,9 +136,9 @@ static void axpy_test(const PartialShape& input_pshape, const std::vector inputs[i] = i; } - auto t_a = backend->create_tensor(element::f32, shape); - auto t_x = backend->create_tensor(element::f32, shape); - auto t_y = backend->create_tensor(element::f32, shape); + auto t_a = backend->create_tensor(element::Type_t::f32, shape); + auto t_x = backend->create_tensor(element::Type_t::f32, shape); + auto t_y = backend->create_tensor(element::Type_t::f32, shape); copy_data(t_a, inputs); copy_data(t_x, inputs); @@ -179,13 +183,13 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_axpy) static void to_vector_test(const PartialShape& input_pshape, const std::vector& input_shapes) { - auto x = make_shared(element::f32, input_pshape); + auto x = make_shared(element::Type_t::f32, input_pshape); shared_ptr x_new_shape = make_shared(x); - auto axes = op::Constant::create(element::i64, {}, {0}); + auto axes = op::Constant::create(element::Type_t::i64, {}, {0}); x_new_shape = make_shared(x_new_shape, axes); x_new_shape = make_shared( - x_new_shape, op::Constant::create(element::u64, {1}, Shape{1}), false); + x_new_shape, op::Constant::create(element::Type_t::u64, {1}, Shape{1}), false); auto x_reshaped = make_shared(x, x_new_shape, true); @@ -193,7 +197,7 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vectorcompile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic(1)); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic(1)); for (auto& shape : input_shapes) { @@ -203,7 +207,7 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vectorcreate_tensor(element::f32, shape); + auto t_x = backend->create_tensor(element::Type_t::f32, shape); copy_data(t_x, inputs); @@ -241,11 +245,12 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_to_vector) static void reverse_shape_test(const PartialShape& input_pshape, const std::vector& input_shapes) { - auto x = make_shared(element::f32, input_pshape); + auto x = make_shared(element::Type_t::f32, input_pshape); shared_ptr x_new_shape = make_shared(x); - x_new_shape = make_shared( - x_new_shape, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + x_new_shape = make_shared(x_new_shape, + op::Constant::create(element::Type_t::i64, {1}, {0}), + op::v1::Reverse::Mode::INDEX); auto x_reshaped = make_shared(x, x_new_shape, true); @@ -253,7 +258,7 @@ static void reverse_shape_test(const PartialShape& input_pshape, auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); for (auto& shape : input_shapes) { @@ -263,7 +268,7 @@ static void reverse_shape_test(const PartialShape& input_pshape, inputs[i] = i; } - auto t_x = backend->create_tensor(element::f32, shape); + auto t_x = backend->create_tensor(element::Type_t::f32, shape); copy_data(t_x, inputs); @@ -302,8 +307,8 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_reverse_shape) NGRAPH_TEST(${BACKEND_NAME}, dynamic_transpose) { - auto arg = std::make_shared(element::i32, PartialShape::dynamic()); - auto input_order = make_shared(element::i32, PartialShape::dynamic()); + auto arg = std::make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto input_order = make_shared(element::Type_t::i32, PartialShape::dynamic()); auto transpose = std::make_shared(arg, input_order); auto f = std::make_shared(NodeVector{transpose}, ParameterVector{arg, input_order}); @@ -314,15 +319,16 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_transpose) auto arg_data = vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; auto input_order_data = vector{2, 0, 1}; - auto arg_tensor = backend->create_tensor(element::i32, Shape{2, 2, 3}); - auto input_order_tensor = backend->create_tensor(element::i32, Shape{input_order_data.size()}); + auto arg_tensor = backend->create_tensor(element::Type_t::i32, Shape{2, 2, 3}); + auto input_order_tensor = + backend->create_tensor(element::Type_t::i32, Shape{input_order_data.size()}); copy_data(arg_tensor, arg_data); copy_data(input_order_tensor, input_order_data); - auto output = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic()); + auto output = backend->create_dynamic_tensor(element::Type_t::i32, PartialShape::dynamic()); ex->call_with_validate({output}, {arg_tensor, input_order_tensor}); - ASSERT_EQ(output->get_element_type(), element::i32); + ASSERT_EQ(output->get_element_type(), element::Type_t::i32); EXPECT_EQ(read_vector(output), vector({1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12})); } diff --git a/ngraph/test/backend/erf.in.cpp b/ngraph/test/backend/erf.in.cpp index 1cbe2260567594..baac0c7861eaff 100644 --- a/ngraph/test/backend/erf.in.cpp +++ b/ngraph/test/backend/erf.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, erf) { Shape shape{8}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/exp.in.cpp b/ngraph/test/backend/exp.in.cpp index f4d3ae2a1c53f5..52369462c7cb81 100644 --- a/ngraph/test/backend/exp.in.cpp +++ b/ngraph/test/backend/exp.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, exp) { Shape shape{8}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/floor.in.cpp b/ngraph/test/backend/floor.in.cpp index 03d919b1aa5f70..bb8675c92e9ec2 100644 --- a/ngraph/test/backend/floor.in.cpp +++ b/ngraph/test/backend/floor.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, floor) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); @@ -58,7 +58,7 @@ NGRAPH_TEST(${BACKEND_NAME}, floor) NGRAPH_TEST(${BACKEND_NAME}, floor_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); @@ -71,7 +71,7 @@ NGRAPH_TEST(${BACKEND_NAME}, floor_int64) { // This tests large numbers that will not fit in a double Shape shape{3}; - auto A = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/function_name.in.cpp b/ngraph/test/backend/function_name.in.cpp index c8f99e5d179426..559d4ce901ea36 100644 --- a/ngraph/test/backend/function_name.in.cpp +++ b/ngraph/test/backend/function_name.in.cpp @@ -33,8 +33,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, function_name) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B, ParameterVector{A, B}, "funky func name"); auto backend = runtime::Backend::create("${BACKEND_NAME}"); diff --git a/ngraph/test/backend/fused_op.in.cpp b/ngraph/test/backend/fused_op.in.cpp index 38d451aa56499f..155a11f7f028b2 100644 --- a/ngraph/test/backend/fused_op.in.cpp +++ b/ngraph/test/backend/fused_op.in.cpp @@ -57,7 +57,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, elu) { - auto A = make_shared(element::f32, Shape{3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{3, 2}); auto elu = make_shared(A, 0.5f); auto function = make_shared(NodeVector{elu}, ParameterVector{A}); @@ -70,7 +70,7 @@ NGRAPH_TEST(${BACKEND_NAME}, elu) NGRAPH_TEST(${BACKEND_NAME}, elu_negative_alpha) { - auto A = make_shared(element::f32, Shape{3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{3, 2}); auto elu = make_shared(A, -1.f); auto function = make_shared(NodeVector{elu}, ParameterVector{A}); @@ -85,8 +85,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu) { Shape shape{3, 2}; Shape rshape{3}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, rshape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, rshape); auto prelu = make_shared(A, B); auto f = make_shared(NodeVector{prelu}, ParameterVector{A, B}); std::vector a{-2, 3, -2, 1, -1, 0}; @@ -103,7 +103,7 @@ NGRAPH_TEST(${BACKEND_NAME}, hardsigmoid) const Shape shape{2, 7}; const float alpha_f = 0.125f; const float beta_f = 0.642f; - const auto A = make_shared(element::f32, shape); + const auto A = make_shared(element::Type_t::f32, shape); const auto alpha = op::Constant::create(A->get_element_type(), Shape{}, {alpha_f}); const auto beta = op::Constant::create(A->get_element_type(), Shape{}, {beta_f}); auto hardsigmoid = make_shared(A, alpha, beta); @@ -138,8 +138,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu_shared_slope) { Shape shape{3, 2}; Shape rshape{}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, rshape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, rshape); auto prelu = make_shared(A, B); auto f = make_shared(NodeVector{prelu}, ParameterVector{A, B}); std::vector a{-2, 3, -2, 1, -1, 0}; @@ -155,8 +155,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope) { Shape shape{3, 2}; Shape rshape{}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, rshape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, rshape); auto prelu = make_shared(A, B); auto f = make_shared(NodeVector{prelu}, ParameterVector{A, B}); std::vector a{-2, 3, -2, 1, -1, 0}; @@ -170,8 +170,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope) NGRAPH_TEST(${BACKEND_NAME}, group_conv) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -193,8 +193,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv) NGRAPH_TEST(${BACKEND_NAME}, group_conv_striding) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{2, 2}, @@ -215,8 +215,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_striding) NGRAPH_TEST(${BACKEND_NAME}, group_conv_window_dilation) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -238,8 +238,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_window_dilation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_data_dilation) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -262,8 +262,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_data_dilation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -286,8 +286,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding) NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding_and_window_dilation) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -310,8 +310,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding_and_window_dilation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_shape_variation) { - auto data = make_shared(element::f32, Shape{1, 4, 4, 1}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 4, 1}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -334,8 +334,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_shape_variation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_data_variation) { - auto data = make_shared(element::f32, Shape{1, 4, 3, 3}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 3, 3}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -360,8 +360,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_data_variation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 1, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 1, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -382,7 +382,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape) NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first) { - auto A = make_shared(element::f32, Shape{1, 2, 4, 4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 4, 4}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(A, mode, 2); auto function = make_shared(NodeVector{space_to_depth}, ParameterVector{A}); @@ -403,7 +403,7 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first) NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_depth_first) { - auto A = make_shared(element::f32, Shape{1, 2, 4, 4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 4, 4}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; auto space_to_depth = make_shared(A, mode, 2); auto function = make_shared(NodeVector{space_to_depth}, ParameterVector{A}); @@ -421,7 +421,7 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_depth_first) NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_block_first) { - auto A = make_shared(element::f32, Shape{1, 8, 2, 2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8, 2, 2}); auto depth_to_space = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); auto function = make_shared(NodeVector{depth_to_space}, ParameterVector{A}); @@ -440,7 +440,7 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_block_first) NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_depth_first) { - auto A = make_shared(element::f32, Shape{1, 8, 2, 2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8, 2, 2}); auto depth_to_space = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); auto function = make_shared(NodeVector{depth_to_space}, ParameterVector{A}); @@ -460,8 +460,9 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_depth_first) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -487,8 +488,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -515,8 +516,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_h_4d) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -541,8 +542,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_h_4d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_1axis_5d) { Shape data_shape{1, 2, 2, 2, 3}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -567,8 +568,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_1axis_5d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d) { Shape data_shape{1, 2, 2, 2, 3}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -593,8 +595,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x2_shape) { Shape data_shape{2, 2}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -617,8 +619,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x2_shape) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x4_shape) { Shape data_shape{2, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -648,8 +650,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x4_shape) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d_max_bias) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); float eps{5000}; auto eps_mode = op::EpsMode::MAX; @@ -696,7 +699,7 @@ namespace NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_double) { - auto type = element::f64; + auto type = element::Type_t::f64; typedef double ctype; auto sshape = Shape{5, 2}; @@ -782,7 +785,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_double) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float) { - auto type = element::f32; + auto type = element::Type_t::f32; typedef float ctype; auto sshape = Shape{5, 2}; @@ -868,7 +871,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int8) { - auto type = element::i8; + auto type = element::Type_t::i8; typedef int8_t ctype; auto sshape = Shape{4, 2}; @@ -897,7 +900,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int8) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int16) { - auto type = element::i16; + auto type = element::Type_t::i16; typedef int16_t ctype; auto sshape = Shape{4, 2}; @@ -926,7 +929,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int16) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int32) { - auto type = element::i32; + auto type = element::Type_t::i32; typedef int32_t ctype; auto sshape = Shape{4, 2}; @@ -955,7 +958,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int32) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int64) { - auto type = element::i64; + auto type = element::Type_t::i64; typedef int64_t ctype; auto sshape = Shape{4, 2}; @@ -984,7 +987,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int64) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint8) { - auto type = element::u8; + auto type = element::Type_t::u8; typedef uint8_t ctype; auto sshape = Shape{4, 2}; @@ -1016,7 +1019,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint8) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint16) { - auto type = element::u16; + auto type = element::Type_t::u16; typedef uint16_t ctype; auto sshape = Shape{4, 2}; @@ -1048,7 +1051,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint16) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint32) { - auto type = element::u32; + auto type = element::Type_t::u32; typedef uint32_t ctype; auto sshape = Shape{4, 2}; @@ -1080,7 +1083,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint32) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint64) { - auto type = element::u64; + auto type = element::Type_t::u64; typedef uint64_t ctype; auto sshape = Shape{4, 2}; @@ -1112,7 +1115,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint64) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float16) { - auto type = element::f16; + auto type = element::Type_t::f16; typedef float16 ctype; auto sshape = Shape{5, 2}; @@ -1198,7 +1201,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float16) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_bfloat16) { - auto type = element::bf16; + auto type = element::Type_t::bf16; typedef bfloat16 ctype; auto sshape = Shape{5, 2}; @@ -1285,7 +1288,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_bfloat16) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization) { Shape data_shape{1, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, true, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1305,7 +1308,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization_split_channels) { Shape data_shape{1, 2, 5, 1}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, false, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1325,7 +1328,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization_split_channels) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization) { Shape data_shape{1, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1354,7 +1357,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_split_channels) { Shape data_shape{1, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1383,7 +1386,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_split_channels) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_shared_across_channel_batch_size_2) { Shape data_shape{2, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, true); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1406,7 +1409,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_shared_across_chann NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_not_shared_across_channel_batch_size_2) { Shape data_shape{2, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1429,7 +1432,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_not_shared_across_c NGRAPH_TEST(${BACKEND_NAME}, grn_4d) { const Shape data_shape{1, 2, 3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); float bias{1e-6f}; const auto grn = make_shared(data, bias); @@ -1453,7 +1456,7 @@ NGRAPH_TEST(${BACKEND_NAME}, grn_4d) NGRAPH_TEST(${BACKEND_NAME}, grn_2d_with_bias) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); float bias{2.25f}; const auto grn = make_shared(data, bias); @@ -1484,9 +1487,9 @@ NGRAPH_TEST(${BACKEND_NAME}, grn_2d_with_bias) NGRAPH_TEST(${BACKEND_NAME}, unsqueeze) { - auto data_node = make_shared(element::f32, Shape{4, 2}); + auto data_node = make_shared(element::Type_t::f32, Shape{4, 2}); auto axes_node = - make_shared(element::i64, Shape{2}, vector{1, 2}); + make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto squeeze = make_shared(data_node, axes_node); auto function = make_shared(NodeVector{squeeze}, ParameterVector{data_node}); @@ -1499,7 +1502,7 @@ NGRAPH_TEST(${BACKEND_NAME}, unsqueeze) NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_simple) { - const auto data = make_shared(element::i32, Shape{1, 15, 2, 2}); + const auto data = make_shared(element::Type_t::i32, Shape{1, 15, 2, 2}); auto tested_op = make_shared(data, 1, 5); auto function = make_shared(tested_op, ParameterVector{data}); @@ -1523,7 +1526,7 @@ NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_negative_axis) // in this test the output is the same as in shuffle_channels_simple but // the axis value is negative and the C(channels) value is in a different dimension(0) of the // shape - const auto data = make_shared(element::i32, Shape{15, 2, 1, 2}); + const auto data = make_shared(element::Type_t::i32, Shape{15, 2, 1, 2}); auto tested_op = make_shared(data, -4, 5); auto function = make_shared(tested_op, ParameterVector{data}); @@ -1544,7 +1547,7 @@ NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_negative_axis) NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_float) { - const auto data = make_shared(element::f32, Shape{6, 1, 1, 1}); + const auto data = make_shared(element::Type_t::f32, Shape{6, 1, 1, 1}); auto tested_op = make_shared(data, 0, 2); auto function = make_shared(tested_op, ParameterVector{data}); @@ -1559,9 +1562,9 @@ NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_float) NGRAPH_TEST(${BACKEND_NAME}, squeeze) { - const auto data_node = make_shared(element::f32, Shape{1, 4, 1, 1, 2}); + const auto data_node = make_shared(element::Type_t::f32, Shape{1, 4, 1, 1, 2}); const auto axes_node = - make_shared(element::i64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::i64, Shape{2}, vector{0, 2}); const auto squeeze = make_shared(data_node, axes_node); const auto function = make_shared(NodeVector{squeeze}, ParameterVector{data_node}); @@ -1575,9 +1578,9 @@ NGRAPH_TEST(${BACKEND_NAME}, squeeze) NGRAPH_TEST(${BACKEND_NAME}, squeeze_default_axes) { - const auto data_node = make_shared(element::f32, Shape{1, 4, 1, 1, 2}); + const auto data_node = make_shared(element::Type_t::f32, Shape{1, 4, 1, 1, 2}); const auto axes_node = - make_shared(element::i64, Shape{0}, vector{}); + make_shared(element::Type_t::i64, Shape{0}, vector{}); const auto squeeze = make_shared(data_node, axes_node); const auto function = make_shared(NodeVector{squeeze}, ParameterVector{data_node}); @@ -1591,15 +1594,15 @@ NGRAPH_TEST(${BACKEND_NAME}, squeeze_default_axes) NGRAPH_TEST(${BACKEND_NAME}, squeeze_dynamic) { - const auto data_param = make_shared(element::f32, Shape{1, 4, 1, 1, 2}); - const auto axes_param = make_shared(element::i64, Shape{2}); + const auto data_param = make_shared(element::Type_t::f32, Shape{1, 4, 1, 1, 2}); + const auto axes_param = make_shared(element::Type_t::i64, Shape{2}); EXPECT_THROW(make_shared(data_param, axes_param), CheckFailure); } NGRAPH_TEST(${BACKEND_NAME}, squared_difference) { - const auto x1 = make_shared(element::f32, Shape{2, 2}); - const auto x2 = make_shared(element::f32, Shape{2, 2}); + const auto x1 = make_shared(element::Type_t::f32, Shape{2, 2}); + const auto x2 = make_shared(element::Type_t::f32, Shape{2, 2}); auto tested_op = make_shared(x1, x2); auto function = make_shared(tested_op, ParameterVector{x1, x2}); @@ -1614,8 +1617,8 @@ NGRAPH_TEST(${BACKEND_NAME}, squared_difference) NGRAPH_TEST(${BACKEND_NAME}, squared_difference_broadcast) { - const auto x1 = make_shared(element::i32, Shape{2, 2}); - const auto x2 = make_shared(element::i32, Shape{}); + const auto x1 = make_shared(element::Type_t::i32, Shape{2, 2}); + const auto x2 = make_shared(element::Type_t::i32, Shape{}); auto tested_op = make_shared(x1, x2); auto function = make_shared(tested_op, ParameterVector{x1, x2}); @@ -1635,15 +1638,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_zero_bias_peepholes) const size_t hidden_size = 3; const size_t gates_count = 4; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared( X, @@ -1710,15 +1716,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_bias_peepholes) const size_t hidden_size = 3; const size_t gates_count = 4; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, B, hidden_size); @@ -1799,15 +1808,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_bias_peepholes_clip_input_forget) const float clip_threshold = 3.5f; bool input_forget = true; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared(X, H_t, @@ -1900,15 +1912,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_activaction_functions) vector activation_alpha{0.f, 0.f, 1.8345f}; vector activation_beta{0.f, 0.f, 3.05f}; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared(X, H_t, @@ -1993,11 +2008,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize) { const Shape data_shape{1, 2, 3, 4}; const size_t levels = 4; - const auto data = make_shared(element::f32, data_shape); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const auto quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); @@ -2036,11 +2051,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_with_clip) { const Shape data_shape{1, 2, 3, 4}; const size_t levels = 5; - const auto data = make_shared(element::f32, data_shape); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const auto quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); @@ -2076,11 +2091,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_with_clip_across_channels) { Shape data_shape{1, 2, 5, 5}; size_t levels = 5; - auto data = make_shared(element::f32, data_shape); - auto input_low = make_shared(element::f32, Shape{2, 1, 1}); - auto input_high = make_shared(element::f32, Shape{2, 1, 1}); - auto output_low = make_shared(element::f32, Shape{2, 1, 1}); - auto output_high = make_shared(element::f32, Shape{2, 1, 1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto input_low = make_shared(element::Type_t::f32, Shape{2, 1, 1}); + auto input_high = make_shared(element::Type_t::f32, Shape{2, 1, 1}); + auto output_low = make_shared(element::Type_t::f32, Shape{2, 1, 1}); + auto output_high = make_shared(element::Type_t::f32, Shape{2, 1, 1}); auto quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); @@ -2119,11 +2134,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_pdpd) { Shape data_shape{1, 2, 5, 5}; size_t levels = 5; - auto data = make_shared(element::f32, data_shape); - auto input_low = make_shared(element::f32, Shape{2}); - auto input_high = make_shared(element::f32, Shape{2}); - auto output_low = make_shared(element::f32, Shape{2}); - auto output_high = make_shared(element::f32, Shape{2}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto input_low = make_shared(element::Type_t::f32, Shape{2}); + auto input_high = make_shared(element::Type_t::f32, Shape{2}); + auto output_low = make_shared(element::Type_t::f32, Shape{2}); + auto output_high = make_shared(element::Type_t::f32, Shape{2}); auto quantize = make_shared(data, @@ -2170,10 +2185,12 @@ NGRAPH_TEST(${BACKEND_NAME}, rnn_cell_no_bias) const size_t input_size = 3; const size_t hidden_size = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); auto function = make_shared(rnn_cell, ParameterVector{X, H_t, W, R}); @@ -2220,11 +2237,13 @@ NGRAPH_TEST(${BACKEND_NAME}, rnn_cell_bias_clip) const size_t hidden_size = 3; float clip = 2.88f; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, Shape{hidden_size}); const auto rnn_cell = make_shared(X, H_t, @@ -2282,11 +2301,13 @@ NGRAPH_TEST(${BACKEND_NAME}, rnn_cell_activation_function) const size_t hidden_size = 3; float clip = 2.88f; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, Shape{hidden_size}); const auto rnn_cell = make_shared(X, H_t, @@ -2346,13 +2367,15 @@ NGRAPH_TEST(${BACKEND_NAME}, gru_cell_bias_clip) float clip = 2.88f; bool linear_before_reset = false; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); const auto gru_cell = make_shared(X, H_t, @@ -2419,13 +2442,15 @@ NGRAPH_TEST(${BACKEND_NAME}, gru_cell_linear_before_reset) float clip = 2.88f; bool linear_before_reset = true; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{(gates_count + 1) * hidden_size}); const auto gru_cell = make_shared(X, H_t, @@ -2491,13 +2516,15 @@ NGRAPH_TEST(${BACKEND_NAME}, gru_cell_activation_function) float clip = 2.88f; bool linear_before_reset = true; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{(gates_count + 1) * hidden_size}); const auto gru_cell = make_shared(X, H_t, @@ -2561,31 +2588,31 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_space_to_depth_block_first) Shape dts_input_shape{2, 32, 2, 4, 2, 4}; size_t block_size = 2; - auto dts_input = make_shared(element::f32, dts_input_shape); + auto dts_input = make_shared(element::Type_t::f32, dts_input_shape); auto depth_to_space = make_shared( dts_input, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, block_size); auto dts_func = make_shared(NodeVector{depth_to_space}, ParameterVector{dts_input}); - auto dts_input_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto dts_input_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); const auto data_size = shape_size(dts_input_shape); vector data(data_size); std::iota(data.begin(), data.end(), 0); copy_data(dts_input_tensor, data); const auto dts_output_shape = depth_to_space->get_output_shape(0); - auto dts_output_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto dts_output_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); auto handle = backend->compile(dts_func); handle->call_with_validate({dts_output_tensor}, {dts_input_tensor}); auto dts_result = read_vector(dts_output_tensor); // use depth_to_space output as space_to_depth input - auto std_input = make_shared(element::f32, dts_output_shape); + auto std_input = make_shared(element::Type_t::f32, dts_output_shape); auto space_to_depth = make_shared( std_input, op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, block_size); auto std_func = make_shared(NodeVector{space_to_depth}, ParameterVector{std_input}); - auto std_input_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto std_input_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); copy_data(std_input_tensor, dts_result); - auto std_output_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto std_output_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); handle = backend->compile(std_func); handle->call_with_validate({std_output_tensor}, {std_input_tensor}); auto std_result = read_vector(std_output_tensor); @@ -2601,31 +2628,31 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_space_to_depth_depth_first) Shape dts_input_shape{2, 32, 2, 4, 2, 4}; size_t block_size = 2; - auto dts_input = make_shared(element::f32, dts_input_shape); + auto dts_input = make_shared(element::Type_t::f32, dts_input_shape); auto depth_to_space = make_shared( dts_input, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, block_size); auto dts_func = make_shared(NodeVector{depth_to_space}, ParameterVector{dts_input}); - auto dts_input_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto dts_input_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); const auto data_size = shape_size(dts_input_shape); vector data(data_size); std::iota(data.begin(), data.end(), 0); copy_data(dts_input_tensor, data); const auto dts_output_shape = depth_to_space->get_output_shape(0); - auto dts_output_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto dts_output_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); auto handle = backend->compile(dts_func); handle->call_with_validate({dts_output_tensor}, {dts_input_tensor}); auto dts_result = read_vector(dts_output_tensor); // use depth_to_space output as space_to_depth input - auto std_input = make_shared(element::f32, dts_output_shape); + auto std_input = make_shared(element::Type_t::f32, dts_output_shape); auto space_to_depth = make_shared( std_input, op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, block_size); auto std_func = make_shared(NodeVector{space_to_depth}, ParameterVector{std_input}); - auto std_input_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto std_input_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); copy_data(std_input_tensor, dts_result); - auto std_output_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto std_output_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); handle = backend->compile(std_func); handle->call_with_validate({std_output_tensor}, {std_input_tensor}); auto std_result = read_vector(std_output_tensor); diff --git a/ngraph/test/backend/gather.in.cpp b/ngraph/test/backend/gather.in.cpp index 0f60ed9c1f7144..4447196522264a 100644 --- a/ngraph/test/backend/gather.in.cpp +++ b/ngraph/test/backend/gather.in.cpp @@ -40,9 +40,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_4d_indices_axis_0_uint8) Shape data_shape{3, 2}; Shape indices_shape{2, 2, 3, 4}; Shape out_shape{2, 2, 3, 4, 2}; - auto P = make_shared(element::u8, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u8, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -65,9 +65,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_4d_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{2, 2, 3, 4}; Shape out_shape{2, 2, 3, 4, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -93,9 +93,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_3d_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{2, 3, 4}; Shape out_shape{2, 3, 4, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -116,9 +116,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_2d_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -135,9 +135,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_2d_negative_and_positive_indices_axis_0_2d_i Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -154,9 +154,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_1d_indices_axis_0_1d_input) Shape data_shape{3}; Shape indices_shape{2}; Shape out_shape{2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -172,9 +172,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_scalar_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{}; Shape out_shape{2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -190,9 +190,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_2d_indices_axis_1_2d_input) Shape data_shape{3, 3}; Shape indices_shape{1, 2}; Shape out_shape{3, 1, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -208,9 +208,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_1d_indices_axis_2_4d_input) Shape data_shape{2, 2, 3, 3}; Shape indices_shape{2}; Shape out_shape{2, 2, 2, 3}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {2}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {2}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -231,9 +231,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_scalar_indices_axis_1_2d_input) Shape data_shape{3, 3}; Shape indices_shape{}; Shape out_shape{3}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -249,9 +249,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int8) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i8, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i8, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -267,9 +267,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int16) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i16, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i16, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -285,9 +285,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int32) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -303,9 +303,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int64) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i64, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i64, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -321,9 +321,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint8) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u8, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u8, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -339,9 +339,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint16) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u16, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u16, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -357,9 +357,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint32) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -375,9 +375,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint64) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u64, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u64, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -393,9 +393,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_bool) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::boolean, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::boolean, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); diff --git a/ngraph/test/backend/gather_nd.in.cpp b/ngraph/test/backend/gather_nd.in.cpp index 7bb292efd53e4b..10a8d5dda6e064 100644 --- a/ngraph/test/backend/gather_nd.in.cpp +++ b/ngraph/test/backend/gather_nd.in.cpp @@ -45,19 +45,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_single_indices) Shape params_shape{3, 3}; Shape indices_shape{2}; Shape out_shape{}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 2}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -77,19 +77,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 1, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -109,19 +109,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -143,19 +143,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 3}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 1, 1, 0, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -175,19 +175,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 1, 1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -209,19 +209,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{1, 1}; Shape out_shape{1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -243,19 +243,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 2}; Shape out_shape{2, 1}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 0, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -275,19 +275,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -309,19 +309,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 3}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -343,19 +343,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 1, 1, 0, 0, 0, 1, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -372,24 +372,50 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d) MIN_FLOAT_TOLERANCE_BITS)); } +NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d_negative) +{ + Shape params_shape{2, 2, 2}; + Shape indices_shape{2, 2, 2}; + Shape out_shape{2, 2, 2}; + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto G = make_shared(P, I); + auto f = make_shared(G, ParameterVector{P, I}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto p = backend->create_tensor(element::Type_t::f32, params_shape); + copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); + copy_data(i, vector{0, -1, -1, 0, 0, 0, 1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); + + auto c = backend->compile(f); + c->call_with_validate({result}, {p, i}); + EXPECT_TRUE(test::all_close_f((vector{1.2f, 1.3f, 2.0f, 2.1f, 1.0f, 1.1f, 2.2f, 2.3f}), + read_vector(result), + MIN_FLOAT_TOLERANCE_BITS)); +} + NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_2d_from_3d) { Shape params_shape{2, 2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -412,20 +438,20 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims1) Shape indices_shape{2, 1}; Shape out_shape{2, 4}; int batch_dims = 1; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I, batch_dims); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -440,22 +466,22 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims2) Shape indices_shape{2, 3, 3, 2}; Shape out_shape{6, 3}; int batch_dims = 2; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I, batch_dims); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0, 3, 1, 2, 1, 0, 1, 1, 1, 2, 0, 3, 0, 3, 1, 2, 1, 2, 0, 1, 1, 3, 1, 1, 1, 2, 0, 2, 0, 0, 0, 3, 1, 3, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -471,20 +497,20 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims2_lead_dims) Shape indices_shape{2, 3, 1, 1}; Shape out_shape{6, 1}; int batch_dims = 2; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I, batch_dims); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0, 2, 0, 2, 2}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); diff --git a/ngraph/test/backend/gelu.in.cpp b/ngraph/test/backend/gelu.in.cpp index 5e99792b678f00..426f92c74ec954 100644 --- a/ngraph/test/backend/gelu.in.cpp +++ b/ngraph/test/backend/gelu.in.cpp @@ -50,7 +50,7 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, gelu_f32) { Shape shape{100000}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -66,9 +66,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gelu_f32) } // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, args[0]); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::transform(args[0].begin(), args[0].end(), args[0].begin(), [](float x) -> float { return 0.5f * x * (1.0f + erf(x / sqrt(2.0f))); @@ -82,16 +82,16 @@ NGRAPH_TEST(${BACKEND_NAME}, gelu_f32) NGRAPH_TEST(${BACKEND_NAME}, gelu_f64) { Shape shape{8}; - auto A = make_shared(element::f64, shape); + auto A = make_shared(element::Type_t::f64, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); vector input{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}; copy_data(a, input); - auto result = backend->create_tensor(element::f64, shape); + auto result = backend->create_tensor(element::Type_t::f64, shape); std::transform(input.begin(), input.end(), input.begin(), [](double x) -> double { return 0.5 * x * (1.0 + erf(x / sqrt(2.0))); diff --git a/ngraph/test/backend/group_convolution.in.cpp b/ngraph/test/backend/group_convolution.in.cpp index 8db4e90d6a7dfa..762884564f6eb7 100644 --- a/ngraph/test/backend/group_convolution.in.cpp +++ b/ngraph/test/backend/group_convolution.in.cpp @@ -38,11 +38,11 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) { Shape shape_filter{6, 1, 3, 3}; - auto filters = make_shared(element::f32, PartialShape::dynamic()); + auto filters = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_delta{2, 6, 3, 3}; - auto deltas = make_shared(element::f32, PartialShape::dynamic()); + auto deltas = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_data_batch{2, 3, 5, 5}; - auto data_batch = make_shared(element::f32, PartialShape::dynamic()); + auto data_batch = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto strides = Strides{1, 1}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; @@ -58,7 +58,7 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) auto handle = backend->compile(f); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); vector filter, delta, data, expected_result; @@ -74,11 +74,11 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) for (int i = 0; i < 2 * 3 * 5 * 5; i++) expected_result.emplace_back(i); - auto a = backend->create_tensor(element::f32, shape_data_batch); + auto a = backend->create_tensor(element::Type_t::f32, shape_data_batch); copy_data(a, data); - auto b = backend->create_tensor(element::f32, shape_filter); + auto b = backend->create_tensor(element::Type_t::f32, shape_filter); copy_data(b, filter); - auto c = backend->create_tensor(element::f32, shape_delta); + auto c = backend->create_tensor(element::Type_t::f32, shape_delta); copy_data(c, delta); handle->call_with_validate({result}, {a, b, c}); EXPECT_FALSE(test::all_close_f(vector{expected_result}, read_vector(result))); @@ -93,8 +93,8 @@ NGRAPH_TEST(${BACKEND_NAME}, v1_group_conv_backprop_data) Strides dilations{1, 1}; const op::PadType auto_pad{op::PadType::EXPLICIT}; - auto data = make_shared(element::f32, Shape{1, 1, 3, 3}); - auto filters = make_shared(element::f32, Shape{1, 1, 1, 3, 3}); + auto data = make_shared(element::Type_t::f32, Shape{1, 1, 3, 3}); + auto filters = make_shared(element::Type_t::f32, Shape{1, 1, 1, 3, 3}); auto gcbd = make_shared( data, filters, strides, pads_begin, pads_end, dilations, auto_pad, output_padding); @@ -139,9 +139,9 @@ NGRAPH_TEST(${BACKEND_NAME}, v1_group_conv_backprop_data_output_shape) Strides strides{1, 1}; Strides dilations{1, 1}; - auto data = make_shared(element::f32, Shape{1, 1, 1, 10}); - auto filters = make_shared(element::f32, Shape{1, 1, 1, 1, 5}); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {1, 14}); + auto data = make_shared(element::Type_t::f32, Shape{1, 1, 1, 10}); + auto filters = make_shared(element::Type_t::f32, Shape{1, 1, 1, 1, 5}); + auto output_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 14}); auto gcbd = make_shared( data, filters, output_shape, strides, dilations, op::PadType::SAME_UPPER); diff --git a/ngraph/test/backend/hard_sigmoid.in.cpp b/ngraph/test/backend/hard_sigmoid.in.cpp index b8379c0695042d..08798a191deb46 100644 --- a/ngraph/test/backend/hard_sigmoid.in.cpp +++ b/ngraph/test/backend/hard_sigmoid.in.cpp @@ -33,10 +33,10 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_1d) { const Shape a_shape{3}; - const auto A = make_shared(element::f32, a_shape); + const auto A = make_shared(element::Type_t::f32, a_shape); - const auto alpha = op::Constant::create(element::f32, Shape{}, {0.5f}); - const auto beta = op::Constant::create(element::f32, Shape{}, {0.6f}); + const auto alpha = op::Constant::create(element::Type_t::f32, Shape{}, {0.5f}); + const auto beta = op::Constant::create(element::Type_t::f32, Shape{}, {0.6f}); const auto R = make_shared(A, alpha, beta); const auto f = make_shared(R, ParameterVector{A}); @@ -55,10 +55,10 @@ NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_1d) NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_2d) { const Shape a_shape{2, 5}; - const auto A = make_shared(element::f32, a_shape); + const auto A = make_shared(element::Type_t::f32, a_shape); - const auto alpha = op::Constant::create(element::f32, Shape{}, {0.2f}); - const auto beta = op::Constant::create(element::f32, Shape{}, {0.5f}); + const auto alpha = op::Constant::create(element::Type_t::f32, Shape{}, {0.2f}); + const auto beta = op::Constant::create(element::Type_t::f32, Shape{}, {0.5f}); const auto R = make_shared(A, alpha, beta); const auto f = make_shared(R, ParameterVector{A}); diff --git a/ngraph/test/backend/interpolate.in.cpp b/ngraph/test/backend/interpolate.in.cpp index 9fcc1e1a324a57..6911c81bb8bc3e 100644 --- a/ngraph/test/backend/interpolate.in.cpp +++ b/ngraph/test/backend/interpolate.in.cpp @@ -42,16 +42,17 @@ NGRAPH_TEST(${BACKEND_NAME}, interpolate_down_scales_const_linear) attrs.axes = AxisSet{0, 1, 2, 3}; attrs.mode = "linear"; attrs.align_corners = false; - const auto input = make_shared(element::f32, input_shape); - const auto output_shape_input = op::v0::Constant::create(element::i64, {4}, {1, 1, 1, 2}); + const auto input = make_shared(element::Type_t::f32, input_shape); + const auto output_shape_input = + op::v0::Constant::create(element::Type_t::i64, {4}, {1, 1, 1, 2}); std::vector intput_data{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; auto interpolate = make_shared(input, output_shape_input, attrs); auto f = make_shared(interpolate, ParameterVector{input}); auto backend = runtime::Backend::create("IE_CPU"); - auto input_tensor = backend->create_tensor(element::f32, input_shape); - auto result_tensor = backend->create_tensor(element::f32, output_shape); + auto input_tensor = backend->create_tensor(element::Type_t::f32, input_shape); + auto result_tensor = backend->create_tensor(element::Type_t::f32, output_shape); auto handle = backend->compile(f); copy_data(input_tensor, intput_data); diff --git a/ngraph/test/backend/layer_norm.in.cpp b/ngraph/test/backend/layer_norm.in.cpp index ebb0feb4f654ec..9fa0c3267ff34d 100644 --- a/ngraph/test/backend/layer_norm.in.cpp +++ b/ngraph/test/backend/layer_norm.in.cpp @@ -48,18 +48,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, layer_norm_affine_stats) { - auto p_data = make_shared(element::f32, Shape{2, 4}); - auto p_scale = make_shared(element::f32, Shape{4}); - auto p_bias = make_shared(element::f32, Shape{4}); + auto p_data = make_shared(element::Type_t::f32, Shape{2, 4}); + auto p_scale = make_shared(element::Type_t::f32, Shape{4}); + auto p_bias = make_shared(element::Type_t::f32, Shape{4}); auto ln = make_shared(p_data, p_scale, p_bias); auto f = make_shared(ln->outputs(), ParameterVector{p_data, p_scale, p_bias}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create tensors for input - auto data = backend->create_tensor(element::f32, Shape{2, 4}); - auto scale = backend->create_tensor(element::f32, Shape{4}); - auto bias = backend->create_tensor(element::f32, Shape{4}); + auto data = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); + auto scale = backend->create_tensor(element::Type_t::f32, Shape{4}); + auto bias = backend->create_tensor(element::Type_t::f32, Shape{4}); // Fill in input tensors vector d_input{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f}; copy_data(data, d_input); @@ -68,9 +68,9 @@ NGRAPH_TEST(${BACKEND_NAME}, layer_norm_affine_stats) vector b_input{-4.0f, -3.0f, -2.0f, -1.0f}; copy_data(bias, b_input); // Create tensors for output - auto norm = backend->create_tensor(element::f32, Shape{2, 4}); - auto mean = backend->create_tensor(element::f32, Shape{2}); - auto var = backend->create_tensor(element::f32, Shape{2}); + auto norm = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); + auto mean = backend->create_tensor(element::Type_t::f32, Shape{2}); + auto var = backend->create_tensor(element::Type_t::f32, Shape{2}); // Expected results (Manually computed) vector exp_norm{-2.658364534378051758f, diff --git a/ngraph/test/backend/log.in.cpp b/ngraph/test/backend/log.in.cpp index f3558820d39901..5b45b8687289ca 100644 --- a/ngraph/test/backend/log.in.cpp +++ b/ngraph/test/backend/log.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, log) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f}; diff --git a/ngraph/test/backend/log_softmax.in.cpp b/ngraph/test/backend/log_softmax.in.cpp index 1304e8156325b5..f9a24a83b24098 100644 --- a/ngraph/test/backend/log_softmax.in.cpp +++ b/ngraph/test/backend/log_softmax.in.cpp @@ -45,13 +45,13 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) { Shape shape{1}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{0}; @@ -64,13 +64,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; @@ -83,13 +83,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.4401896, -2.4401896, @@ -109,13 +109,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.4401896, -2.4401896, @@ -135,13 +135,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; @@ -154,13 +154,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-12.0024818, -12.0024818, @@ -190,13 +190,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.04858735, -3.04858735, @@ -226,13 +226,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-2.40760596, -1.40760596, @@ -262,13 +262,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-2.40760596, -1.40760596, @@ -298,13 +298,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.04858735, -3.04858735, @@ -334,13 +334,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg3) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-12.0024818, -12.0024818, diff --git a/ngraph/test/backend/logical_and.in.cpp b/ngraph/test/backend/logical_and.in.cpp index 680f9444a705e5..e39d68971a2b6b 100644 --- a/ngraph/test/backend/logical_and.in.cpp +++ b/ngraph/test/backend/logical_and.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, logical_and) { Shape shape{3, 4}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(std::make_shared(A, B), ParameterVector{A, B}); diff --git a/ngraph/test/backend/logical_not.in.cpp b/ngraph/test/backend/logical_not.in.cpp index c59654b048275b..a33db690a3cc6d 100644 --- a/ngraph/test/backend/logical_not.in.cpp +++ b/ngraph/test/backend/logical_not.in.cpp @@ -48,7 +48,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, not) { Shape shape{2, 2}; - auto A = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, 0, 1, 0}; @@ -62,7 +62,7 @@ NGRAPH_TEST(${BACKEND_NAME}, not) NGRAPH_TEST(${BACKEND_NAME}, not_i32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, 0, 2, 0}; diff --git a/ngraph/test/backend/logical_or.in.cpp b/ngraph/test/backend/logical_or.in.cpp index bfe148a86656b2..40e23624f8dd5c 100644 --- a/ngraph/test/backend/logical_or.in.cpp +++ b/ngraph/test/backend/logical_or.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, logical_or) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 0, 1, 1, 1, 0, 1, 0}; diff --git a/ngraph/test/backend/logical_xor.in.cpp b/ngraph/test/backend/logical_xor.in.cpp index f71a3f8aa5f8e1..c4ee11b8ec8afc 100644 --- a/ngraph/test/backend/logical_xor.in.cpp +++ b/ngraph/test/backend/logical_xor.in.cpp @@ -29,8 +29,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, logical_xor) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 0, 1, 1, 1, 0, 1, 0}; diff --git a/ngraph/test/backend/lrn.in.cpp b/ngraph/test/backend/lrn.in.cpp index 3c568e76d041d3..7aebbef155d89b 100644 --- a/ngraph/test/backend/lrn.in.cpp +++ b/ngraph/test/backend/lrn.in.cpp @@ -41,7 +41,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); double alpha = 3; double beta = 0.5; double bias = 1; @@ -73,8 +73,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_h) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{1}, vector{2}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{2}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -106,8 +106,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_h) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_hw) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -139,8 +139,9 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_hw) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_all_dims) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{4}, vector{0, 1, 2, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 1, 2, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -172,8 +173,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_all_dims) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_nw) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{0, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{0, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -205,8 +206,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_nw) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_empty) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -238,8 +239,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_empty) NGRAPH_TEST(${BACKEND_NAME}, lrn_6D_across_2_axes) { Shape shape{2, 3, 2, 2, 1, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -264,8 +265,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_6D_across_2_axes) NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_empty) { Shape shape{12}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -296,8 +297,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_empty) NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_outermost_axis) { Shape shape{6, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{1}, vector{0}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{0}); double alpha = 0.0002; double beta = 0.5; double bias = 2.0; diff --git a/ngraph/test/backend/matmul.in.cpp b/ngraph/test/backend/matmul.in.cpp index a134de115e8309..827208e3aa31b2 100644 --- a/ngraph/test/backend/matmul.in.cpp +++ b/ngraph/test/backend/matmul.in.cpp @@ -46,18 +46,18 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x0_0x2) Shape shape_b{0, 2}; Shape shape_r{2, 2}; - auto A = make_shared(element::f32, shape_a); - auto B = make_shared(element::f32, shape_b); + auto A = make_shared(element::Type_t::f32, shape_a); + auto B = make_shared(element::Type_t::f32, shape_b); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -72,20 +72,20 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_0x2_2x0) { Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{0, 0}; auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -96,20 +96,20 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_2x0) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{3, 0}; auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -119,19 +119,19 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_2x0) NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2_2x2) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); Shape shape_r{2, 2}; auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -143,17 +143,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x3_3x3) Shape shape_in1{2, 3}; Shape shape_in2{3, 3}; Shape shape_out{2, 3}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f}); copy_data(b, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f}); @@ -170,17 +170,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x3_3x3_int64) Shape shape_in1{2, 3}; Shape shape_in2{3, 3}; Shape shape_out{2, 3}; - auto A = make_shared(element::i64, shape_in1); - auto B = make_shared(element::i64, shape_in2); + auto A = make_shared(element::Type_t::i64, shape_in1); + auto B = make_shared(element::Type_t::i64, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i64, shape_in1); - shared_ptr b = backend->create_tensor(element::i64, shape_in2); - shared_ptr result = backend->create_tensor(element::i64, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::i64, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::i64, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::i64, shape_out); copy_data(a, vector{1, 2, 3, 4, 5, 6}); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); @@ -197,17 +197,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_3x3_transpose) Shape shape_in1{3, 2}; Shape shape_in2{3, 3}; Shape shape_out{2, 3}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, true, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 4.f, 2.f, 5.f, 3.f, 6.f}); copy_data(b, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f}); @@ -224,17 +224,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_2x3_transpose) Shape shape_in1{3, 2}; Shape shape_in2{2, 3}; Shape shape_out{2, 2}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, true, true); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 4.f, 2.f, 5.f, 3.f, 6.f}); copy_data(b, vector{1.f, 3.f, 5.f, 2.f, 4.f, 6.f}); @@ -251,17 +251,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x3x2_3x3_transpose) Shape shape_in1{2, 3, 2}; Shape shape_in2{3, 3}; Shape shape_out{2, 2, 3}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, true, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 4.f, 2.f, 5.f, 3.f, 6.f, 3.f, 2.f, 1.f, 4.f, 5.f, 6.f}); copy_data(b, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f}); @@ -279,16 +279,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2_2) Shape shape_in1{2}; Shape shape_in2{2}; Shape shape_out{}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 2.f}); copy_data(b, vector{1.f, 2.f}); @@ -304,16 +304,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2x3_2x1x3_transpose) Shape shape_in1{2, 2, 3}; Shape shape_in2{2, 1, 3}; Shape shape_out{2, 2, 1}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, true); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -336,16 +336,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2x3_2x1x3_transpose_int64) Shape shape_in1{2, 2, 3}; Shape shape_in2{2, 1, 3}; Shape shape_out{2, 2, 1}; - auto A = make_shared(element::i64, shape_in1); - auto B = make_shared(element::i64, shape_in2); + auto A = make_shared(element::Type_t::i64, shape_in1); + auto B = make_shared(element::Type_t::i64, shape_in2); auto matmul = make_shared(A, B, false, true); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::i64, shape_in1); - shared_ptr b = backend->create_tensor(element::i64, shape_in2); - shared_ptr result = backend->create_tensor(element::i64, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::i64, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::i64, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::i64, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -367,16 +367,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2x3_2x3x1_int64) Shape shape_in1{2, 2, 3}; Shape shape_in2{2, 3, 1}; Shape shape_out{2, 2, 1}; - auto A = make_shared(element::i64, shape_in1); - auto B = make_shared(element::i64, shape_in2); + auto A = make_shared(element::Type_t::i64, shape_in1); + auto B = make_shared(element::Type_t::i64, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::i64, shape_in1); - shared_ptr b = backend->create_tensor(element::i64, shape_in2); - shared_ptr result = backend->create_tensor(element::i64, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::i64, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::i64, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::i64, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -398,17 +398,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_1x2x3_1x4x3x2) Shape shape_in1{1, 2, 3}; Shape shape_in2{1, 4, 3, 2}; Shape shape_out{1, 4, 2, 2}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -455,8 +455,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_1_3_x_3_false_false_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); @@ -481,8 +481,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3_1_x_3_true_false_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); @@ -508,8 +508,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3_x_3_1_false_false_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); @@ -534,8 +534,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3_x_1_3_false_true_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); diff --git a/ngraph/test/backend/max_pool.in.cpp b/ngraph/test/backend/max_pool.in.cpp new file mode 100644 index 00000000000000..e310c06b32d83c --- /dev/null +++ b/ngraph/test/backend/max_pool.in.cpp @@ -0,0 +1,187 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +// clang-format off +#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS +#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS +#endif + +#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS +#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS +#endif +// clang-format on + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_floor) +{ + Shape in_shape{1, 1, 3, 3}; + Shape out_shape{1, 1, 2, 2}; + const Strides& strides{1, 1}; + const Shape& pads_begin{0, 0}; + const Shape& pads_end{0, 0}; + const Shape& kernel{2, 2}; + const op::RoundingType rounding_type = op::RoundingType::FLOOR; + const op::PadType pad_type = op::PadType::NOTSET; + + auto A = make_shared(element::f32, in_shape); + auto maxPool = make_shared( + A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type); + auto f = make_shared(maxPool, ParameterVector{A}); + + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9}; + std::vector result{5, 6, 8, 9}; + + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(out_shape, result); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_ceil) +{ + Shape in_shape{1, 1, 4, 4}; + Shape out_shape{1, 1, 2, 2}; + const Strides& strides{1, 1}; + const Shape& pads_begin{0, 0}; + const Shape& pads_end{0, 0}; + const Shape& kernel{3, 3}; + const op::RoundingType rounding_type = op::RoundingType::CEIL; + const op::PadType pad_type = op::PadType::NOTSET; + + auto A = make_shared(element::f32, in_shape); + auto maxPool = make_shared( + A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type); + auto f = make_shared(maxPool, ParameterVector{A}); + + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + std::vector result{11, 12, 15, 16}; + + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(out_shape, result); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_pad) +{ + Shape in_shape{1, 1, 2, 2}; + Shape out_shape{1, 1, 3, 3}; + const Strides& strides{1, 1}; + const Shape& pads_begin{1, 1}; + const Shape& pads_end{1, 1}; + const Shape& kernel{2, 2}; + const op::RoundingType rounding_type = op::RoundingType::CEIL; + const op::PadType pad_type = op::PadType::NOTSET; + + auto A = make_shared(element::f32, in_shape); + auto maxPool = make_shared( + A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type); + auto f = make_shared(maxPool, ParameterVector{A}); + + std::vector a{1, 2, 3, 4}; + std::vector result{1, 2, 2, 3, 4, 4, 3, 4, 4}; + + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(out_shape, result); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_same_upper) +{ + Shape in_shape{1, 1, 3, 3}; + Shape out_shape{1, 1, 3, 3}; + const Strides& strides{1, 1}; + const Shape& pads_begin{0, 0}; + const Shape& pads_end{0, 0}; + const Shape& kernel{2, 2}; + const op::RoundingType rounding_type = op::RoundingType::CEIL; + const op::PadType pad_type = op::PadType::SAME_UPPER; + + auto A = make_shared(element::f32, in_shape); + auto maxPool = make_shared( + A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type); + auto f = make_shared(maxPool, ParameterVector{A}); + + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9}; + std::vector result{5, 6, 6, 8, 9, 9, 8, 9, 9}; + + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(out_shape, result); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d) +{ + Shape in_shape{1, 1, 2, 2, 2}; + Shape out_shape{1, 1, 2, 2, 1}; + const Strides& strides{1, 1, 1}; + const Shape& pads_begin{0, 0, 0}; + const Shape& pads_end{0, 0, 0}; + const Shape& kernel{1, 1, 2}; + const op::RoundingType rounding_type = op::RoundingType::CEIL; + const op::PadType pad_type = op::PadType::VALID; + + auto A = make_shared(element::f32, in_shape); + auto maxPool = make_shared( + A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type); + auto f = make_shared(maxPool, ParameterVector{A}); + + std::vector a{1, 2, 3, 4, 5, 6, 7, 8}; + std::vector result{2, 4, 6, 8}; + + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(out_shape, result); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_same_lower) +{ + Shape in_shape{1, 1, 3, 3}; + Shape out_shape{1, 1, 3, 3}; + const Strides& strides{1, 1}; + const Shape& pads_begin{0, 0}; + const Shape& pads_end{0, 0}; + const Shape& kernel{2, 2}; + const op::RoundingType rounding_type = op::RoundingType::CEIL; + const op::PadType pad_type = op::PadType::SAME_LOWER; + + auto A = make_shared(element::f32, in_shape); + auto maxPool = make_shared( + A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type); + auto f = make_shared(maxPool, ParameterVector{A}); + + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9}; + std::vector result{1, 2, 3, 4, 5, 6, 7, 8, 9}; + + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(out_shape, result); + test_case.run(); +} diff --git a/ngraph/test/backend/maximum.in.cpp b/ngraph/test/backend/maximum.in.cpp index e24a1b6320e5ac..fb668b3664e7a1 100644 --- a/ngraph/test/backend/maximum.in.cpp +++ b/ngraph/test/backend/maximum.in.cpp @@ -51,18 +51,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, maximum) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -73,18 +73,18 @@ NGRAPH_TEST(${BACKEND_NAME}, maximum) NGRAPH_TEST(${BACKEND_NAME}, maximum_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0x40000140, 0x40000001, -8, 17}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{0x40000170, 0x40000000, 4, 8}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -94,18 +94,18 @@ NGRAPH_TEST(${BACKEND_NAME}, maximum_int32) NGRAPH_TEST(${BACKEND_NAME}, maximum_int64) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape); + auto a = backend->create_tensor(element::Type_t::i64, shape); copy_data(a, vector{1, 8, -8, 17, -5, 67635216, 2, 17179887632}); - auto b = backend->create_tensor(element::i64, shape); + auto b = backend->create_tensor(element::Type_t::i64, shape); copy_data(b, vector{1, 2, 4, 8, 0, 18448, 1, 280592}); - auto result = backend->create_tensor(element::i64, shape); + auto result = backend->create_tensor(element::Type_t::i64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); diff --git a/ngraph/test/backend/minimum.in.cpp b/ngraph/test/backend/minimum.in.cpp index fcd18dc6b57350..cb48daaf8b5242 100644 --- a/ngraph/test/backend/minimum.in.cpp +++ b/ngraph/test/backend/minimum.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, minimum) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 8, -8, 17, -0.5, 0.5, 2, 1}; @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum) NGRAPH_TEST(${BACKEND_NAME}, minimum_int32) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 8, -8, 17, -5, 67635216, 2, 1}; @@ -80,8 +80,8 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum_int32) NGRAPH_TEST(${BACKEND_NAME}, minimum_int64) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 8, -8, 17, -5, 67635216, 2, 17179887632}; diff --git a/ngraph/test/backend/multiple_backends.in.cpp b/ngraph/test/backend/multiple_backends.in.cpp index e97d7560f2676f..515ba2cf217b37 100644 --- a/ngraph/test/backend/multiple_backends.in.cpp +++ b/ngraph/test/backend/multiple_backends.in.cpp @@ -35,12 +35,12 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, multiple_backends) { Shape shape{2, 2}; - auto A1 = make_shared(element::f32, shape); - auto B1 = make_shared(element::f32, shape); + auto A1 = make_shared(element::Type_t::f32, shape); + auto B1 = make_shared(element::Type_t::f32, shape); auto f = make_shared(A1 + B1, ParameterVector{A1, B1}); - auto A2 = make_shared(element::f32, shape); - auto B2 = make_shared(element::f32, shape); + auto A2 = make_shared(element::Type_t::f32, shape); + auto B2 = make_shared(element::Type_t::f32, shape); auto g = make_shared(A2 * B2, ParameterVector{A2, B2}); auto backend1 = runtime::Backend::create("${BACKEND_NAME}"); @@ -48,13 +48,13 @@ NGRAPH_TEST(${BACKEND_NAME}, multiple_backends) auto backend2 = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a1 = backend1->create_tensor(element::f32, shape); - shared_ptr b1 = backend1->create_tensor(element::f32, shape); - shared_ptr result1 = backend1->create_tensor(element::f32, shape); + shared_ptr a1 = backend1->create_tensor(element::Type_t::f32, shape); + shared_ptr b1 = backend1->create_tensor(element::Type_t::f32, shape); + shared_ptr result1 = backend1->create_tensor(element::Type_t::f32, shape); - shared_ptr a2 = backend2->create_tensor(element::f32, shape); - shared_ptr b2 = backend2->create_tensor(element::f32, shape); - shared_ptr result2 = backend2->create_tensor(element::f32, shape); + shared_ptr a2 = backend2->create_tensor(element::Type_t::f32, shape); + shared_ptr b2 = backend2->create_tensor(element::Type_t::f32, shape); + shared_ptr result2 = backend2->create_tensor(element::Type_t::f32, shape); copy_data(a1, test::NDArray({{1, 2}, {3, 4}}).get_vector()); copy_data(b1, test::NDArray({{5, 6}, {7, 8}}).get_vector()); diff --git a/ngraph/test/backend/multiple_result.in.cpp b/ngraph/test/backend/multiple_result.in.cpp index f9128a5bf93f7f..57361900135b2b 100644 --- a/ngraph/test/backend/multiple_result.in.cpp +++ b/ngraph/test/backend/multiple_result.in.cpp @@ -34,9 +34,9 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, multiple_result) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto A_add_B = make_shared(A, B); auto A_add_B_mul_C = make_shared(A_add_B, C); @@ -44,15 +44,15 @@ NGRAPH_TEST(${BACKEND_NAME}, multiple_result) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{5, 6, 7, 8}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{9, 10, 11, 12}); - auto r0 = backend->create_tensor(element::f32, shape); - auto r1 = backend->create_tensor(element::f32, shape); + auto r0 = backend->create_tensor(element::Type_t::f32, shape); + auto r1 = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({r0, r1}, {a, b, c}); diff --git a/ngraph/test/backend/multiply.in.cpp b/ngraph/test/backend/multiply.in.cpp index 75bd095480576c..bea292e9d0efbf 100644 --- a/ngraph/test/backend/multiply.in.cpp +++ b/ngraph/test/backend/multiply.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, multiply) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 2, 3, 4}; @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, multiply) NGRAPH_TEST(${BACKEND_NAME}, multiply_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A * B, ParameterVector{A, B}); std::vector a{1, 2, 3, 4}; diff --git a/ngraph/test/backend/negative.in.cpp b/ngraph/test/backend/negative.in.cpp index 791461caacf15a..d3b45010644623 100644 --- a/ngraph/test/backend/negative.in.cpp +++ b/ngraph/test/backend/negative.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, negative) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, -2, 0, -4.75f, 8.75f, -8.75f}; @@ -60,7 +60,7 @@ NGRAPH_TEST(${BACKEND_NAME}, negative) NGRAPH_TEST(${BACKEND_NAME}, negative_i32) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); @@ -76,7 +76,7 @@ NGRAPH_TEST(${BACKEND_NAME}, negative_i32) NGRAPH_TEST(${BACKEND_NAME}, negative_f32) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); diff --git a/ngraph/test/backend/node_name.in.cpp b/ngraph/test/backend/node_name.in.cpp index 9424d6f7363589..2e30c0b0a39833 100644 --- a/ngraph/test/backend/node_name.in.cpp +++ b/ngraph/test/backend/node_name.in.cpp @@ -33,8 +33,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, node_name) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto C = A + B; C->set_friendly_name("a node name"); auto f = make_shared(C, ParameterVector{A, B}); @@ -42,9 +42,9 @@ NGRAPH_TEST(${BACKEND_NAME}, node_name) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr b = backend->create_tensor(element::f32, shape); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{1, 2}, {3, 4}}).get_vector()); copy_data(b, test::NDArray({{5, 6}, {7, 8}}).get_vector()); diff --git a/ngraph/test/backend/non_max_suppression.in.cpp b/ngraph/test/backend/non_max_suppression.in.cpp index e258d272e41dcd..cd7220e911aa3f 100644 --- a/ngraph/test/backend/non_max_suppression.in.cpp +++ b/ngraph/test/backend/non_max_suppression.in.cpp @@ -57,14 +57,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_center_point_box_format) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -78,12 +79,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_center_point_box_format) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{3, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{3, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{3, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{3, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -120,14 +121,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_flipped_coordinates) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -141,12 +143,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_flipped_coordinates) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{3, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{3, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{3, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{3, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -184,14 +186,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_identical_boxes) const auto boxes_shape = Shape{1, 10, 4}; const auto scores_shape = Shape{1, 1, 10}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -205,12 +208,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_identical_boxes) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{1, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{1, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{1, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{1, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -247,14 +250,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_limit_output_size) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -268,12 +272,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_limit_output_size) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{2, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{2, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{2, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -308,14 +312,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_single_box) const auto boxes_shape = Shape{1, 1, 4}; const auto scores_shape = Shape{1, 1, 1}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -329,12 +334,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_single_box) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{1, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{1, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{1, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{1, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -371,14 +376,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -392,12 +398,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{3, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{3, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{3, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{3, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -434,14 +440,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU_and_scores) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -455,12 +462,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU_and_scores) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{2, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{2, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{2, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -499,14 +506,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_batches) const auto boxes_shape = Shape{2, 6, 4}; const auto scores_shape = Shape{2, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -520,12 +528,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_batches) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{4, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{4, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{4, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{4, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -564,14 +572,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_classes) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 2, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -585,12 +594,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_classes) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{4, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{4, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{4, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{4, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); diff --git a/ngraph/test/backend/non_zero.in.cpp b/ngraph/test/backend/non_zero.in.cpp index 774513f61967a7..f74c0e8dae126f 100644 --- a/ngraph/test/backend/non_zero.in.cpp +++ b/ngraph/test/backend/non_zero.in.cpp @@ -29,14 +29,14 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, non_zero) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::f32, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::f32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto cfun = backend->compile(fun); - auto input = backend->create_tensor(element::f32, Shape{3, 2}); + auto input = backend->create_tensor(element::Type_t::f32, Shape{3, 2}); copy_data(input, vector{0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 3.0f}); std::vector expected_result{2, 2, 0, 1}; @@ -45,7 +45,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero) auto result = make_shared(); cfun->call_with_validate({result}, {input}); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), expected_output_shape); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result); @@ -54,8 +54,8 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero) NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::i32, p_shape); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -63,7 +63,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) Shape input_shape{3, 2}; vector input_data(shape_size(input_shape), 1); - auto input = backend->create_tensor(element::i32, input_shape); + auto input = backend->create_tensor(element::Type_t::i32, input_shape); copy_data(input, input_data); std::vector expected_result{0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1}; @@ -72,7 +72,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) auto result = make_shared(); cfun->call_with_validate({result}, {input}); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result); @@ -81,8 +81,8 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_0s) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::i32, p_shape); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -90,7 +90,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_0s) Shape input_shape{3, 2}; vector input_data(shape_size(input_shape), 0); - auto input = backend->create_tensor(element::i32, input_shape); + auto input = backend->create_tensor(element::Type_t::i32, input_shape); copy_data(input, input_data); Shape expected_output_shape{input_shape.size(), 0}; @@ -98,7 +98,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_0s) auto result = make_shared(); cfun->call_with_validate({result}, {input}); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); diff --git a/ngraph/test/backend/normalize_l2.in.cpp b/ngraph/test/backend/normalize_l2.in.cpp index 77e0415e632fe7..4d15baf8e969ca 100644 --- a/ngraph/test/backend/normalize_l2.in.cpp +++ b/ngraph/test/backend/normalize_l2.in.cpp @@ -41,8 +41,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{0, 1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -51,9 +51,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -74,9 +74,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -87,8 +87,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{0}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{0}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -97,9 +97,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -110,8 +110,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -120,9 +120,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -135,8 +135,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{0, 1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -145,9 +145,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -158,8 +158,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), @@ -168,9 +168,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -181,8 +181,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{0}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{0}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), @@ -191,9 +191,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -204,8 +204,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), @@ -214,9 +214,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/numeric.in.cpp b/ngraph/test/backend/numeric.in.cpp index 63f444b646d09f..a95febf5d14a16 100644 --- a/ngraph/test/backend/numeric.in.cpp +++ b/ngraph/test/backend/numeric.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, numeric_float_nan) { Shape shape{5}; - auto A = op::Constant::create(element::f32, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); - auto B = op::Constant::create(element::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); + auto A = op::Constant::create(element::Type_t::f32, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); + auto B = op::Constant::create(element::Type_t::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); @@ -43,8 +43,8 @@ NGRAPH_TEST(${BACKEND_NAME}, numeric_float_nan) NGRAPH_TEST(${BACKEND_NAME}, numeric_double_nan) { Shape shape{5}; - auto A = op::Constant::create(element::f64, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); - auto B = op::Constant::create(element::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); + auto A = op::Constant::create(element::Type_t::f64, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); + auto B = op::Constant::create(element::Type_t::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); @@ -55,8 +55,10 @@ NGRAPH_TEST(${BACKEND_NAME}, numeric_double_nan) NGRAPH_TEST(${BACKEND_NAME}, numeric_float_inf) { Shape shape{5}; - auto A = op::Constant::create(element::f32, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); - auto B = op::Constant::create(element::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); + auto A = + op::Constant::create(element::Type_t::f32, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); + auto B = + op::Constant::create(element::Type_t::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); @@ -67,8 +69,10 @@ NGRAPH_TEST(${BACKEND_NAME}, numeric_float_inf) NGRAPH_TEST(${BACKEND_NAME}, numeric_double_inf) { Shape shape{5}; - auto A = op::Constant::create(element::f64, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); - auto B = op::Constant::create(element::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); + auto A = + op::Constant::create(element::Type_t::f64, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); + auto B = + op::Constant::create(element::Type_t::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/one_hot.in.cpp b/ngraph/test/backend/one_hot.in.cpp index 47192df718f117..93e54b6059bde8 100644 --- a/ngraph/test/backend/one_hot.in.cpp +++ b/ngraph/test/backend/one_hot.in.cpp @@ -36,12 +36,12 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_2_in_3) { Shape shape_a{}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); int axis = 0; Shape shape_r{3}; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -54,12 +54,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_2_in_3) NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_1_in_3) { Shape shape_a{}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); int axis = 0; Shape shape_r{3}; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -72,12 +72,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_1_in_3) NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_0_in_3) { Shape shape_a{}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -90,12 +90,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_0_in_3) NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_0) { Shape shape_a{8}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 8}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -109,12 +109,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_0) NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1) { Shape shape_a{8}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{8, 3}; int axis = 1; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -128,12 +128,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1) NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_barely_oob) { Shape shape_a{8}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{8, 3}; int axis = 1; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -148,12 +148,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_barely_oob) NGRAPH_TEST(${BACKEND_NAME}, one_hot_matrix_0) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 3, 3}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -169,12 +169,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_many_categories) // Imagenet has roughly 20,000 categories constexpr uint32_t category_count = 20000; Shape shape_a{6}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{6, category_count}; int axis = 1; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -194,24 +194,24 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_many_categories) NGRAPH_TEST(${BACKEND_NAME}, one_hot_on_off_float) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 3, 3}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::f32, {}, {2.5}); - auto off_value = op::Constant::create(element::f32, {}, {0.5}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::f32, {}, {2.5}); + auto off_value = op::Constant::create(element::Type_t::f32, {}, {0.5}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{ 0, 1, 1, 2, 1, 0, 0, 2, 1, }); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/pad.in.cpp b/ngraph/test/backend/pad.in.cpp index 7ffbd97a093c00..99bc1a77e35fa1 100644 --- a/ngraph/test/backend/pad.in.cpp +++ b/ngraph/test/backend/pad.in.cpp @@ -33,11 +33,11 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {5}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {4}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {5}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -46,9 +46,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{15}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{15}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -61,11 +61,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {4}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -74,9 +74,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{8}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{8}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -88,11 +88,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {4}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -101,9 +101,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{3}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{3}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -114,11 +114,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -127,9 +127,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{11}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{11}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -140,11 +140,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -153,9 +153,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -166,11 +166,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -179,9 +179,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -191,11 +191,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -204,9 +204,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -217,11 +217,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -230,9 +230,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{2}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -242,11 +242,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -255,9 +255,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})); - auto result = backend->create_tensor(element::f32, Shape{6, 9}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 9}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -275,11 +275,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -288,9 +288,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})); - auto result = backend->create_tensor(element::f32, Shape{6, 5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -308,11 +308,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -321,9 +321,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{11}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{11}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -335,11 +335,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -348,9 +348,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -361,11 +361,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -374,9 +374,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -387,11 +387,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -400,9 +400,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -414,11 +414,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -427,9 +427,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{2}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -440,11 +440,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect) { const Shape data_shape{3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {10}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {9}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {10}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {9}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -453,9 +453,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3})); - auto result = backend->create_tensor(element::f32, Shape{22}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{22}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -468,11 +468,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -481,10 +481,10 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{6, 9}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 9}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -502,11 +502,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -515,10 +515,10 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{6, 5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -536,11 +536,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d) { const Shape data_shape{2, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {2, 0}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {9}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {1, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 0}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {9}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -549,9 +549,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3}, {4, 5, 6}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{5, 2}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 2}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -564,11 +564,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative) { const Shape data_shape{3, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {-1, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {-1, -1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {9}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {-1, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {-1, -1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {9}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -577,9 +577,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -591,11 +591,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0) { const Shape data_shape{0, 0}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {3, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {3, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -604,8 +604,8 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); - auto result = backend->create_tensor(element::f32, Shape{5, 5}); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -622,11 +622,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3) { const Shape data_shape{0, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {3, 1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {3, 1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -635,8 +635,8 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); - auto result = backend->create_tensor(element::f32, Shape{5, 5}); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -653,11 +653,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0) { const Shape data_shape{3, 0}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -666,8 +666,8 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); - auto result = backend->create_tensor(element::f32, Shape{5, 5}); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -684,11 +684,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) { const Shape data_shape{1, 2, 2, 2}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {42}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 1, 1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 1, 1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {42}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -697,7 +697,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); // clang-format off copy_data(a, test::NDArray( { @@ -713,7 +713,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) } }).get_vector()); // clang-format on - auto result = backend->create_tensor(element::f32, Shape{1, 2, 4, 4}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 2, 4, 4}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -742,11 +742,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d) { const Shape data_shape{1, 3, 2, 2}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, -1, 1, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, -1, 1, 1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {42}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{4}, {0, -1, 1, 1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{4}, {0, -1, 1, 1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {42}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -755,7 +755,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); // clang-format off copy_data(a, test::NDArray( { @@ -776,7 +776,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d) }).get_vector()); // clang-format on - auto result = backend->create_tensor(element::f32, Shape{1, 1, 4, 4}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1, 4, 4}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -803,11 +803,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) { const Shape data_shape{2, 2, 4, 4}; const auto window_movement_strides = Strides{2, 2}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); - const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {42}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 0, 0}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 2, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {42}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -816,7 +816,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{{{0, 1, 0, 2}, // img 0 chan 0 {0, 3, 2, 0}, @@ -839,7 +839,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) {1, 0, 0, 0}}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, Shape{2, 2, 6, 6}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2, 2, 6, 6}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -879,11 +879,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric) { const Shape data_shape{2, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::SYMMETRIC), @@ -892,9 +892,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3}, {4, 5, 6}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{4, 7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{4, 7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/parameter_as_output.in.cpp b/ngraph/test/backend/parameter_as_output.in.cpp index b2b84e0e875678..898c24691ff671 100644 --- a/ngraph/test/backend/parameter_as_output.in.cpp +++ b/ngraph/test/backend/parameter_as_output.in.cpp @@ -31,14 +31,14 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, parameter_as_output) { Shape shape{3, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(A, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); vector expected{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; vector zero(shape_size(shape), 0); diff --git a/ngraph/test/backend/partial_slice.in.cpp b/ngraph/test/backend/partial_slice.in.cpp index 61a322f9b31128..4416bb21631df5 100644 --- a/ngraph/test/backend/partial_slice.in.cpp +++ b/ngraph/test/backend/partial_slice.in.cpp @@ -49,7 +49,7 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, partial_slice_static) { Shape shape_x{2, 3, 2}; - auto x = make_shared(element::f32, shape_x); + auto x = make_shared(element::Type_t::f32, shape_x); AxisVector axes{0, 1}; vector lower_bounds{1, 0}; vector upper_bounds{2, 2}; @@ -61,10 +61,10 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_static) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto t_x = backend->create_tensor(element::f32, shape_x); + auto t_x = backend->create_tensor(element::Type_t::f32, shape_x); vector v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}; copy_data(t_x, v_x); - auto t_r = backend->create_tensor(element::f32, Shape{1, 2, 2}); + auto t_r = backend->create_tensor(element::Type_t::f32, Shape{1, 2, 2}); auto handle = backend->compile(f); handle->call_with_validate({t_r}, {t_x}); @@ -76,7 +76,7 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_static) NGRAPH_TEST(${BACKEND_NAME}, partial_slice_partial_shape) { auto pshape_x = PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}; - auto x = make_shared(element::f32, pshape_x); + auto x = make_shared(element::Type_t::f32, pshape_x); AxisVector axes{0, 1}; vector lower_bounds{1, 0}; vector upper_bounds{2, 2}; @@ -89,10 +89,10 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_partial_shape) // Create some tensors for input/output Shape shape_x{2, 3, 2}; - auto t_x = backend->create_tensor(element::f32, shape_x); + auto t_x = backend->create_tensor(element::Type_t::f32, shape_x); vector v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}; copy_data(t_x, v_x); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({t_r}, {t_x}); @@ -104,7 +104,7 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_partial_shape) NGRAPH_TEST(${BACKEND_NAME}, partial_slice_unkown_rank) { auto pshape_x = PartialShape::dynamic(); - auto x = make_shared(element::f32, pshape_x); + auto x = make_shared(element::Type_t::f32, pshape_x); AxisVector axes{0, 1}; vector lower_bounds{1, 0}; vector upper_bounds{2, 2}; @@ -117,10 +117,10 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_unkown_rank) // Create some tensors for input/output Shape shape_x{2, 3, 2}; - auto t_x = backend->create_tensor(element::f32, shape_x); + auto t_x = backend->create_tensor(element::Type_t::f32, shape_x); vector v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}; copy_data(t_x, v_x); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({t_r}, {t_x}); diff --git a/ngraph/test/backend/power.in.cpp b/ngraph/test/backend/power.in.cpp index 91ed81d89a68e2..9c0ea5bea0d8e6 100644 --- a/ngraph/test/backend/power.in.cpp +++ b/ngraph/test/backend/power.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, power) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 2, 3, 5}; diff --git a/ngraph/test/backend/quantize_dequantize.in.cpp b/ngraph/test/backend/quantize_dequantize.in.cpp index 0da1e807c03f48..98c7779cbb844d 100644 --- a/ngraph/test/backend/quantize_dequantize.in.cpp +++ b/ngraph/test/backend/quantize_dequantize.in.cpp @@ -34,8 +34,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -67,8 +67,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_zero_offset) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -100,8 +100,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_axes) Shape scale_offset_shape{4}; AxisSet quantization_axes{0}; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -134,8 +134,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -168,8 +168,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8_zero_offset) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -202,8 +202,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i32; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i32; typedef float input_c_type; typedef int32_t output_c_type; @@ -236,8 +236,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32_zero_offset) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i32; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i32; typedef float input_c_type; typedef int32_t output_c_type; @@ -270,8 +270,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_uint8) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -302,8 +302,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int8) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -335,8 +335,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int32) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f64; - auto output_type = element::i32; + auto input_type = element::Type_t::f64; + auto output_type = element::Type_t::i32; // TODO: fails with input due to 32 bits typedef double input_c_type; @@ -369,8 +369,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_ZERO) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -401,8 +401,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_INFINITY) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -433,8 +433,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_UPWARD) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -465,8 +465,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_DOWNWARD) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -497,8 +497,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_EVEN) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -529,8 +529,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_INFINITY) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -566,8 +566,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_ZERO) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -603,8 +603,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_UP) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -635,8 +635,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_DOWN) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -667,8 +667,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_dynamic_offset) Shape scale_offset_shape = {}; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; diff --git a/ngraph/test/backend/range.in.cpp b/ngraph/test/backend/range.in.cpp index 8aa970796528bd..5fa671bf6fa786 100644 --- a/ngraph/test/backend/range.in.cpp +++ b/ngraph/test/backend/range.in.cpp @@ -42,9 +42,9 @@ struct RangeTest NGRAPH_TEST(${BACKEND_NAME}, range) { // Create a graph for f(start,stop,step) = Range(start,stop,step). - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::Type_t::i32, Shape{}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}); auto range = make_shared(start, stop, step); ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); @@ -55,7 +55,7 @@ NGRAPH_TEST(${BACKEND_NAME}, range) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::i32, PartialShape::dynamic()); std::vector> int32_tests = { RangeTest{0, 10, 1, Shape{10}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, @@ -65,9 +65,9 @@ NGRAPH_TEST(${BACKEND_NAME}, range) for (auto& test : int32_tests) { - auto t_start = backend->create_tensor(element::i32, Shape{}); - auto t_stop = backend->create_tensor(element::i32, Shape{}); - auto t_step = backend->create_tensor(element::i32, Shape{}); + auto t_start = backend->create_tensor(element::Type_t::i32, Shape{}); + auto t_stop = backend->create_tensor(element::Type_t::i32, Shape{}); + auto t_step = backend->create_tensor(element::Type_t::i32, Shape{}); copy_data(t_start, std::vector{test.start}); copy_data(t_stop, std::vector{test.stop}); @@ -75,7 +75,7 @@ NGRAPH_TEST(${BACKEND_NAME}, range) ex->call_with_validate({t_r}, {t_start, t_stop, t_step}); - ASSERT_EQ(t_r->get_element_type(), element::i32); + ASSERT_EQ(t_r->get_element_type(), element::Type_t::i32); ASSERT_EQ(t_r->get_shape(), test.expected_result_shape); auto results = read_vector(t_r); diff --git a/ngraph/test/backend/reduce_max.in.cpp b/ngraph/test/backend/reduce_max.in.cpp index efd3bc68b24bc3..8a4022af26f4b8 100644 --- a/ngraph/test/backend/reduce_max.in.cpp +++ b/ngraph/test/backend/reduce_max.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -48,8 +48,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -65,9 +65,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -83,9 +83,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -101,9 +101,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -119,9 +119,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -140,18 +140,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero_int32) { Shape shape_a{3, 0}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); copy_data(result, vector({3, 3, 3})); int32_t minval = std::numeric_limits::has_infinity @@ -168,18 +168,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -192,18 +192,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -214,18 +214,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -236,9 +236,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -255,9 +255,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -274,9 +274,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -293,9 +293,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -312,19 +312,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -334,19 +334,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_double) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape_a); + auto a = backend->create_tensor(element::Type_t::f64, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f64, shape_rt); + auto result = backend->create_tensor(element::Type_t::f64, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -356,18 +356,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -385,17 +385,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -405,17 +405,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -425,18 +425,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -446,18 +446,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -467,18 +467,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -488,18 +488,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -513,18 +513,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero_int32) { Shape shape_a{3, 0}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); copy_data(result, vector({3, 3, 3})); int32_t minval = std::numeric_limits::has_infinity @@ -541,18 +541,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -565,18 +565,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -587,18 +587,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -609,19 +609,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -633,19 +633,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -657,19 +657,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -681,19 +681,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -704,19 +704,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -726,19 +726,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_double) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape_a); + auto a = backend->create_tensor(element::Type_t::f64, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f64, shape_rt); + auto result = backend->create_tensor(element::Type_t::f64, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -748,18 +748,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -776,8 +776,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -785,9 +785,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -796,8 +796,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -805,9 +805,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -816,8 +816,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -825,9 +825,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -836,8 +836,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -845,11 +845,11 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_TRUE(test::all_close_f((vector{2, 4, 6}), read_vector(result))); -} \ No newline at end of file +} diff --git a/ngraph/test/backend/reduce_mean.in.cpp b/ngraph/test/backend/reduce_mean.in.cpp index 242f6907ea7b0d..fc268aa88d8d3a 100644 --- a/ngraph/test/backend/reduce_mean.in.cpp +++ b/ngraph/test/backend/reduce_mean.in.cpp @@ -33,17 +33,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -54,17 +54,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -96,18 +96,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -141,17 +141,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -162,17 +162,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -182,18 +182,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -204,18 +204,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -226,18 +226,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -248,8 +248,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -257,9 +257,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -269,8 +269,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -278,9 +278,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -290,8 +290,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -299,9 +299,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -311,8 +311,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -320,9 +320,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/reduce_min.in.cpp b/ngraph/test/backend/reduce_min.in.cpp index ca95bacaf67b21..31ed9a00de6c37 100644 --- a/ngraph/test/backend/reduce_min.in.cpp +++ b/ngraph/test/backend/reduce_min.in.cpp @@ -33,17 +33,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -54,17 +54,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -96,18 +96,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -139,18 +139,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -165,18 +165,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -189,18 +189,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -211,18 +211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -233,19 +233,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -257,19 +257,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -281,19 +281,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -304,19 +304,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -327,19 +327,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -349,18 +349,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -378,17 +378,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -399,17 +399,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -419,18 +419,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -441,18 +441,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -463,18 +463,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -484,18 +484,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -510,18 +510,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -534,18 +534,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -556,18 +556,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -578,19 +578,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -602,19 +602,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -626,19 +626,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -649,19 +649,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -672,19 +672,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -694,18 +694,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -722,8 +722,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -731,9 +731,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -743,8 +743,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -752,9 +752,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -764,8 +764,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -773,9 +773,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -785,8 +785,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -794,9 +794,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/reduce_prod.in.cpp b/ngraph/test/backend/reduce_prod.in.cpp index 46d7427b1d4a06..87df2a5753566a 100644 --- a/ngraph/test/backend/reduce_prod.in.cpp +++ b/ngraph/test/backend/reduce_prod.in.cpp @@ -33,17 +33,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -53,18 +53,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -95,18 +95,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -140,18 +140,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -162,18 +162,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -184,19 +184,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -215,19 +215,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -246,19 +246,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -272,19 +272,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -298,18 +298,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -323,18 +323,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -344,17 +344,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i32, Shape{}); + auto result = backend->create_tensor(element::Type_t::i32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -364,17 +364,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -386,17 +386,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -406,18 +406,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -427,18 +427,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -448,18 +448,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -471,18 +471,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -493,18 +493,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -515,18 +515,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -537,19 +537,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -568,19 +568,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -599,19 +599,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -625,19 +625,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -651,18 +651,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -676,18 +676,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -697,17 +697,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -717,17 +717,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -738,8 +738,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -747,9 +747,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -758,8 +758,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -767,9 +767,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -778,8 +778,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -787,9 +787,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -798,8 +798,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -807,9 +807,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/reduce_sum.in.cpp b/ngraph/test/backend/reduce_sum.in.cpp index 9d49ae671d1d34..0f93b0c1efdf00 100644 --- a/ngraph/test/backend/reduce_sum.in.cpp +++ b/ngraph/test/backend/reduce_sum.in.cpp @@ -43,17 +43,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -63,8 +63,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_large_1d_to_scalar) { Shape shape{1000000}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -79,9 +79,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_large_1d_to_scalar) v_a[i] = static_cast(random_generator() % 255); r += static_cast(v_a[i]); } - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, v_a); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -93,18 +93,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_large_1d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -114,9 +114,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_6d) { Shape shape_a{2, 6, 4, 5, 7, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2, 4, 5, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{1, 4}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{1, 4}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -124,10 +124,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_6d) auto backend_ref = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a_wrk = backend_wrk->create_tensor(element::f32, shape_a); - auto a_ref = backend_ref->create_tensor(element::f32, shape_a); - auto result_wrk = backend_wrk->create_tensor(element::f32, shape_rt); - auto result_ref = backend_ref->create_tensor(element::f32, shape_rt); + auto a_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_a); + auto a_ref = backend_ref->create_tensor(element::Type_t::f32, shape_a); + auto result_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_rt); + auto result_ref = backend_ref->create_tensor(element::Type_t::f32, shape_rt); vector inp_data(shape_size(shape_a)); iota(inp_data.begin(), inp_data.end(), 1.f); @@ -145,18 +145,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_6d) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -166,18 +166,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -189,18 +189,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -211,18 +211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -233,18 +233,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -255,19 +255,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -286,19 +286,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -317,19 +317,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -342,19 +342,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -367,19 +367,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{0x40000001, 10, 19, 4, 13, 22, 7, 16, 25, 2, 11, 20, 5, 14, 23, 8, 17, 26, 3, 12, 21, 6, 15, 24, 9, 18, 27}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -391,18 +391,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -416,18 +416,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim_int32) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -441,18 +441,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -462,18 +463,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar_int32) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -483,18 +485,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_2d_to_scalar_int8) { Shape shape_a{3, 3}; - auto A = make_shared(element::i8, shape_a); + auto A = make_shared(element::Type_t::i8, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape_a); + auto a = backend->create_tensor(element::Type_t::i8, shape_a); copy_data(a, std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i8, shape_rt); + auto result = backend->create_tensor(element::Type_t::i8, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -505,17 +507,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_trivial_in_double) { Shape shape{4, 3}; Shape rshape{3}; - auto A = make_shared(element::f64, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f64, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result = backend->create_tensor(element::f64, rshape); + auto result = backend->create_tensor(element::Type_t::f64, rshape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -533,10 +535,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc) return; } Shape shape_a{10, 10, 10, 30}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{10}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -568,10 +570,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc_double) return; } Shape shape_a{10, 10, 20, 300}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{10}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -601,10 +603,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float) return; } Shape shape_a{20}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -632,10 +634,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_double) return; } Shape shape_a{20}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -676,9 +678,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) { // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto axes_i64 = make_shared(axes, element::i64); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto axes_i64 = make_shared(axes, element::Type_t::i64); auto sum = make_shared(x, axes_i64, false); ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); @@ -689,7 +692,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); std::vector x_shapes{ Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; @@ -707,8 +710,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) for (size_t i = 0; i < x_shapes.size(); i++) { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); + auto t_x = backend->create_tensor(element::Type_t::f32, x_shapes[i]); + auto t_axes = backend->create_tensor(element::Type_t::i32, Shape{axeses[i].size()}); copy_data(t_x, inputs[i]); copy_data(t_axes, axeses[i]); @@ -726,8 +729,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) { Shape shape{7, 4}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -736,7 +739,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{-infi, 0, 0, infi}, {infi, 100, -100, -infi}, @@ -746,7 +749,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) {infi, infi, infi, -infi}, {infi, std::nanf(""), 42, infi}}) .get_vector()); - auto result = backend->create_tensor(element::f32, Shape{7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -766,17 +769,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -786,8 +789,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_large_1d_to_scalar) { Shape shape{1000000}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -802,9 +805,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_large_1d_to_scalar) v_a[i] = static_cast(random_generator() % 255); r += static_cast(v_a[i]); } - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, v_a); - auto result = backend->create_tensor(element::f32, Shape{1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -816,18 +819,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_large_1d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -837,9 +840,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_6d) { Shape shape_a{2, 6, 4, 5, 7, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2, 1, 4, 5, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{1, 4}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{1, 4}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -847,10 +850,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_6d) auto backend_ref = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a_wrk = backend_wrk->create_tensor(element::f32, shape_a); - auto a_ref = backend_ref->create_tensor(element::f32, shape_a); - auto result_wrk = backend_wrk->create_tensor(element::f32, shape_rt); - auto result_ref = backend_ref->create_tensor(element::f32, shape_rt); + auto a_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_a); + auto a_ref = backend_ref->create_tensor(element::Type_t::f32, shape_a); + auto result_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_rt); + auto result_ref = backend_ref->create_tensor(element::Type_t::f32, shape_rt); vector inp_data(shape_size(shape_a)); iota(inp_data.begin(), inp_data.end(), 1.f); @@ -868,18 +871,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_6d) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -889,18 +892,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -912,18 +915,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -934,18 +937,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -956,18 +959,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -978,19 +981,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1009,19 +1012,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1040,19 +1043,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1065,19 +1068,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1090,19 +1093,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{0x40000001, 10, 19, 4, 13, 22, 7, 16, 25, 2, 11, 20, 5, 14, 23, 8, 17, 26, 3, 12, 21, 6, 15, 24, 9, 18, 27}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1114,18 +1117,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -1139,18 +1142,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim_int32) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -1164,18 +1167,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1185,18 +1189,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar_int32) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1206,18 +1211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_2d_to_scalar_int8) { Shape shape_a{3, 3}; - auto A = make_shared(element::i8, shape_a); + auto A = make_shared(element::Type_t::i8, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape_a); + auto a = backend->create_tensor(element::Type_t::i8, shape_a); copy_data(a, std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i8, shape_rt); + auto result = backend->create_tensor(element::Type_t::i8, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1228,17 +1233,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_trivial_in_double) { Shape shape{4, 3}; Shape rshape{1, 3}; - auto A = make_shared(element::f64, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f64, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result = backend->create_tensor(element::f64, rshape); + auto result = backend->create_tensor(element::Type_t::f64, rshape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1256,10 +1261,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc) return; } Shape shape_a{10, 10, 10, 30}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{10, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1291,10 +1296,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc_double) return; } Shape shape_a{10, 10, 20, 300}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{10, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1324,10 +1329,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float) return; } Shape shape_a{20}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1355,10 +1360,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_double) return; } Shape shape_a{20}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1399,9 +1404,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) { // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto axes_i64 = make_shared(axes, element::i64); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto axes_i64 = make_shared(axes, element::Type_t::i64); auto sum = make_shared(x, axes_i64, true); ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); @@ -1412,7 +1418,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); std::vector x_shapes{ Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; @@ -1430,8 +1436,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) for (size_t i = 0; i < x_shapes.size(); i++) { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); + auto t_x = backend->create_tensor(element::Type_t::f32, x_shapes[i]); + auto t_axes = backend->create_tensor(element::Type_t::i32, Shape{axeses[i].size()}); copy_data(t_x, inputs[i]); copy_data(t_axes, axeses[i]); @@ -1449,8 +1455,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) { Shape shape{7, 4}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1459,7 +1465,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{-infi, 0, 0, infi}, {infi, 100, -100, -infi}, @@ -1469,7 +1475,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) {infi, infi, infi, -infi}, {infi, std::nanf(""), 42, infi}}) .get_vector()); - auto result = backend->create_tensor(element::f32, Shape{7, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1488,8 +1494,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -1497,9 +1503,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1508,8 +1514,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -1517,9 +1523,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1528,8 +1534,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1537,9 +1543,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1548,8 +1554,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1557,9 +1563,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/region_yolo.in.cpp b/ngraph/test/backend/region_yolo.in.cpp index 8d520c4929acc1..74fb92423910a4 100644 --- a/ngraph/test/backend/region_yolo.in.cpp +++ b/ngraph/test/backend/region_yolo.in.cpp @@ -45,7 +45,7 @@ NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v2_caffe) Shape input_shape{batch, channels, height, width}; Shape output_shape{batch, channels * height * width}; - auto A = make_shared(element::f32, input_shape); + auto A = make_shared(element::Type_t::f32, input_shape); auto R = make_shared(A, coords, classes, num, true, mask, 1, 3); auto f = make_shared(R, ParameterVector{A}); @@ -71,7 +71,7 @@ NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v3_mxnet) Shape shape{batch, channels, height, width}; const auto count = shape_size(shape); - const auto A = make_shared(element::f32, shape); + const auto A = make_shared(element::Type_t::f32, shape); const auto R = make_shared(A, coords, classes, num, false, mask, 1, 3); const auto f = make_shared(R, ParameterVector{A}); diff --git a/ngraph/test/backend/relu.in.cpp b/ngraph/test/backend/relu.in.cpp index e36f45240f56fa..00aa5d4e51d046 100644 --- a/ngraph/test/backend/relu.in.cpp +++ b/ngraph/test/backend/relu.in.cpp @@ -35,16 +35,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; auto handle = backend->compile(f); @@ -55,16 +55,16 @@ NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop_i32) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 8, -8, 17, -2, 1, 8, -8, 17, -1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; auto handle = backend->compile(f); @@ -75,16 +75,16 @@ NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop_i32) NGRAPH_TEST(${BACKEND_NAME}, relu_4Dfprop) { auto shape_a = Shape{2, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 2, 2, 2}; auto f = make_shared(relu, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1}; auto handle = backend->compile(f); @@ -95,17 +95,17 @@ NGRAPH_TEST(${BACKEND_NAME}, relu_4Dfprop) NGRAPH_TEST(${BACKEND_NAME}, fuse_max_with_constant_zero_input_as_relu) { auto shape_a = Shape{2, 5}; - auto A = op::Constant::create(element::f32, shape_a, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); - auto B = make_shared(element::f32, shape_a); + auto A = op::Constant::create(element::Type_t::f32, shape_a, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); + auto B = make_shared(element::Type_t::f32, shape_a); auto max = make_shared(A, B); auto shape_rt = Shape{2, 5}; auto f = make_shared(max, ParameterVector{B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto b = backend->create_tensor(element::f32, shape_a); + auto b = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(b, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; auto handle = backend->compile(f); diff --git a/ngraph/test/backend/reorg_yolo.in.cpp b/ngraph/test/backend/reorg_yolo.in.cpp index 0389a2c4b25cc4..229407e8a85234 100644 --- a/ngraph/test/backend/reorg_yolo.in.cpp +++ b/ngraph/test/backend/reorg_yolo.in.cpp @@ -48,7 +48,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reorg_yolo_stride_2) { // in_shape [N,C,H,W] const auto in_shape = Shape{1, 8, 4, 4}; - auto p = make_shared(element::f32, in_shape); + auto p = make_shared(element::Type_t::f32, in_shape); size_t stride = 2; auto reorg_yolo = make_shared(p, Strides{stride}); auto fun = make_shared(OutputVector{reorg_yolo}, ParameterVector{p}); @@ -78,7 +78,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reorg_yolo_stride_3) { // in_shape [N,C,H,W] const auto in_shape = Shape{1, 9, 3, 3}; - auto p = make_shared(element::f32, in_shape); + auto p = make_shared(element::Type_t::f32, in_shape); size_t stride = 3; auto reorg_yolo = make_shared(p, Strides{stride}); auto fun = make_shared(OutputVector{reorg_yolo}, ParameterVector{p}); diff --git a/ngraph/test/backend/reshape.in.cpp b/ngraph/test/backend/reshape.in.cpp index 130629430b54e4..5034e8e20ecc3d 100644 --- a/ngraph/test/backend/reshape.in.cpp +++ b/ngraph/test/backend/reshape.in.cpp @@ -47,18 +47,18 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, reshape_t2v_012) { Shape shape_a{2, 2, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{12}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -70,18 +70,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2v_012) NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_012) { Shape shape_a{1, 1, 1}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -92,18 +92,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_012) NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_120) { Shape shape_a{1, 1, 1}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -114,18 +114,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_120) NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 1, 1, 1, 1, 1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{42}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -136,18 +136,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t) NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t1) { Shape shape_a{}; - auto A = make_shared(element::boolean, shape_a); + auto A = make_shared(element::Type_t::boolean, shape_a); Shape shape_r{1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape_a); + auto a = backend->create_tensor(element::Type_t::boolean, shape_a); copy_data(a, vector{42}); - auto result = backend->create_tensor(element::boolean, shape_r); + auto result = backend->create_tensor(element::Type_t::boolean, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -157,18 +157,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t1) NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_col) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -179,18 +179,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_col) NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_row) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 3}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -201,18 +201,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_row) NGRAPH_TEST(${BACKEND_NAME}, reshape_v2t_middle) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 3, 1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -223,18 +223,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2t_middle) NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_same) { Shape shape_a{3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 3}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -247,9 +247,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero) { Shape shape_a{2, 2, 5, 5}; Shape shape_r{2, 5, 5, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto r = make_shared( - A, op::Constant::create(element::u64, {4}, Shape{0, 5, 0, 2}), true); + A, op::Constant::create(element::Type_t::u64, {4}, Shape{0, 5, 0, 2}), true); auto f = make_shared(r, ParameterVector{A}); vector a_data(shape_size(shape_a)); @@ -258,9 +258,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -311,23 +311,23 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero) NGRAPH_TEST(${BACKEND_NAME}, reshape_6d) { Shape shape_a{2, 2, 3, 3, 2, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 2, 2, 4, 3, 2}; vector a_data(shape_size(shape_a)); iota(a_data.begin(), a_data.end(), 1.f); auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -338,7 +338,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_6d) NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_1D_to_scalar) { const Shape input_shape{1}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto reshape_builder = builder::opset1::reshape(input, Shape{}); auto function = make_shared(reshape_builder, ParameterVector{input}); @@ -353,7 +353,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_1D_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_3D_to_scalar) { const Shape input_shape{1, 1, 1}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto reshape_builder = builder::opset1::reshape(input, Shape{}); auto function = make_shared(reshape_builder, ParameterVector{input}); @@ -370,22 +370,22 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_3D_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reshape_shufflenet_5d) { Shape shape_a{1, 112, 56, 56}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{1, 4, 28, 56, 56}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{1, 28, 4, 56, 56}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{1, 112, 56, 56}; vector a_data(shape_size(shape_a)); iota(a_data.begin(), a_data.end(), 1.f); auto r0 = make_shared( - A, op::Constant::create(element::u64, {shape_b.size()}, shape_b), false); + A, op::Constant::create(element::Type_t::u64, {shape_b.size()}, shape_b), false); auto r1 = make_shared( - r0, op::Constant::create(element::u64, {shape_c.size()}, shape_c), false); + r0, op::Constant::create(element::Type_t::u64, {shape_c.size()}, shape_c), false); auto r2 = make_shared( - r1, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + r1, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r2, ParameterVector{A}); auto ref_func = clone_function(*f); diff --git a/ngraph/test/backend/reverse.in.cpp b/ngraph/test/backend/reverse.in.cpp index 90caa46a9b9d6b..e8e21cfe3418e6 100644 --- a/ngraph/test/backend/reverse.in.cpp +++ b/ngraph/test/backend/reverse.in.cpp @@ -32,18 +32,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reverse_1d) { Shape shape{8}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -55,19 +55,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_1d) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_0) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -80,19 +80,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_0) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -105,20 +105,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1_mask) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared(A, - op::Constant::create(element::boolean, {2}, {false, true}), - op::v1::Reverse::Mode::MASK), + make_shared( + A, + op::Constant::create(element::Type_t::boolean, {2}, {false, true}), + op::v1::Reverse::Mode::MASK), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -131,19 +132,20 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1_mask) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -156,20 +158,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01_mask) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared( - make_shared(A, - op::Constant::create(element::boolean, {2}, {true, true}), - op::v1::Reverse::Mode::MASK), - ParameterVector{A}); + auto A = make_shared(element::Type_t::f32, shape); + auto f = + make_shared(make_shared( + A, + op::Constant::create(element::Type_t::boolean, {2}, {true, true}), + op::v1::Reverse::Mode::MASK), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -182,21 +185,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01_mask) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_0) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -211,21 +214,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_0) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_1) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -240,21 +243,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_1) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_2) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -269,21 +272,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_2) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_01) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -298,21 +302,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_01) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_02) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {0, 2}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -327,21 +332,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_02) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_12) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {1, 2}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {1, 2}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -356,21 +362,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_12) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_012) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {3}, {0, 1, 2}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {3}, {0, 1, 2}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -384,8 +391,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_012) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_rank_index_mode) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = make_shared(element::i64, Shape{1, 1}); // correct: 1D + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = + make_shared(element::Type_t::i64, Shape{1, 1}); // correct: 1D EXPECT_THROW(make_shared( make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::INDEX), @@ -395,8 +403,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_rank_index_mode) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_elems_mask_mode) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = make_shared(element::boolean, Shape{2}); // correct: 3 + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = + make_shared(element::Type_t::boolean, Shape{2}); // correct: 3 EXPECT_THROW(make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::MASK), ngraph::NodeValidationFailure); @@ -404,8 +413,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_elems_mask_mode) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_axes_out_of_bounds) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = op::Constant::create(element::i64, Shape{2}, {1, 10}); + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 10}); EXPECT_THROW(make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::INDEX), ngraph::NodeValidationFailure); @@ -413,8 +422,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_axes_out_of_bounds) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_too_many_axes) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = op::Constant::create(element::i64, Shape{4}, {0, 1, 2, 3}); + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 1, 2, 3}); EXPECT_THROW(make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::INDEX), ngraph::NodeValidationFailure); diff --git a/ngraph/test/backend/reverse_sequence.in.cpp b/ngraph/test/backend/reverse_sequence.in.cpp index 1fcca9cf820a07..aa76919bf4e693 100644 --- a/ngraph/test/backend/reverse_sequence.in.cpp +++ b/ngraph/test/backend/reverse_sequence.in.cpp @@ -34,8 +34,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) { Shape shape{2, 3, 4, 2}; Shape seq_len_shape{4}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, seq_len_shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); size_t batch_axis = 2; size_t sequence_axis = 1; @@ -46,10 +46,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector input{ 0, 0, 3, 0, 6, 0, 9, 0, 1, 0, 4, 0, 7, 0, 10, 0, 2, 0, 5, 0, 8, 0, 11, 0, @@ -74,9 +74,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) { Shape shape{4, 3, 2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); Shape seq_len_shape{4}; - auto B = make_shared(element::i32, seq_len_shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); size_t batch_axis = 0; size_t sequence_axis = 1; @@ -88,10 +88,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector seq_lenghts{1, 2, 3, 3}; copy_data(b, seq_lenghts); @@ -114,9 +114,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4d2c3h2w2) { Shape shape{4, 2, 3, 2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); Shape seq_len_shape{4}; - auto B = make_shared(element::i32, seq_len_shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); size_t batch_axis = 0; size_t sequence_axis = 2; @@ -128,10 +128,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4d2c3h2w2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector input{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -161,8 +161,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_negative_axes) { Shape shape{2, 3, 4, 2}; Shape seq_len_shape{4}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, seq_len_shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); int64_t batch_axis = -2; int64_t sequence_axis = -3; @@ -173,10 +173,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_negative_axes) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector input{ 0, 0, 3, 0, 6, 0, 9, 0, 1, 0, 4, 0, 7, 0, 10, 0, 2, 0, 5, 0, 8, 0, 11, 0, diff --git a/ngraph/test/backend/roi_pooling.in.cpp b/ngraph/test/backend/roi_pooling.in.cpp new file mode 100644 index 00000000000000..cd1814b202429a --- /dev/null +++ b/ngraph/test/backend/roi_pooling.in.cpp @@ -0,0 +1,216 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_1x1_max) +{ + const int H = 6; + const int W = 6; + const int image_size = H * W; + const int channels = 3; + const int num_rois = 3; + + const int pooled_h = 1; + const int pooled_w = 1; + const float spatial_scale = 1.f; + + Shape feat_maps_shape{1, channels, H, W}; + Shape rois_shape{num_rois, 5}; + Shape pooled_shape{pooled_h, pooled_w}; + Shape output_shape{num_rois, channels, pooled_h, pooled_w}; + + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto roi_pooling = + make_shared(feat_maps, rois, pooled_shape, spatial_scale, "max"); + const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); + + vector feat_maps_vect; + for (unsigned int i = 0; i < channels * image_size; i++) + { + feat_maps_vect.push_back(1.f * i / 10); + } + + vector rois_vect = {0, 1, 1, 2, 3, 0, 1, 1, 2, 3, 0, 1, 1, 2, 3}; + + const vector expected_vect = {2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f, 2.0f, 5.6f, 9.2f}; + + auto test_case = test::TestCase(f); + test_case.add_input(feat_maps_shape, feat_maps_vect); + test_case.add_input(rois_shape, rois_vect); + test_case.add_expected_output(output_shape, expected_vect); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_2x2_max) +{ + const int H = 6; + const int W = 6; + const int image_size = H * W; + const int channels = 1; + const int num_rois = 3; + + const int pooled_h = 2; + const int pooled_w = 2; + const float spatial_scale = 1.f; + + Shape feat_maps_shape{1, channels, H, W}; + Shape rois_shape{num_rois, 5}; + Shape pooled_shape{pooled_h, pooled_w}; + Shape output_shape{num_rois, channels, pooled_h, pooled_w}; + + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto roi_pooling = + make_shared(feat_maps, rois, pooled_shape, spatial_scale, "max"); + const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); + + vector feat_maps_vect; + for (unsigned int i = 0; i < channels * image_size; i++) + { + feat_maps_vect.push_back(1.f * i / 10); + } + + vector rois_vect = {0, 1, 1, 3, 3, 0, 1, 2, 2, 4, 0, 0, 1, 4, 5}; + + const vector expected_vect = { + 1.4f, 1.5f, 2.0f, 2.1f, 1.9f, 2.0f, 2.5f, 2.6f, 2.0f, 2.2f, 3.2f, 3.4f}; + + auto test_case = test::TestCase(f); + test_case.add_input(feat_maps_shape, feat_maps_vect); + test_case.add_input(rois_shape, rois_vect); + test_case.add_expected_output(output_shape, expected_vect); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_1x1_bilinear) +{ + const int H = 6; + const int W = 6; + const int image_size = H * W; + const int channels = 3; + const int num_rois = 2; + + const int pooled_h = 1; + const int pooled_w = 1; + const float spatial_scale = 1.f; + + Shape feat_maps_shape{1, channels, H, W}; + Shape rois_shape{num_rois, 5}; + Shape pooled_shape{pooled_h, pooled_w}; + Shape output_shape{num_rois, channels, pooled_h, pooled_w}; + + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto roi_pooling = + make_shared(feat_maps, rois, pooled_shape, spatial_scale, "bilinear"); + const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); + + vector feat_maps_vect; + for (unsigned int i = 0; i < channels * image_size; i++) + { + feat_maps_vect.push_back(1.f * i / 10); + } + + vector rois_vect = {0, 0.2, 0.2, 0.4, 0.4, 0, 0.2, 0.2, 0.6, 0.6}; + + const vector expected_vect = {1.05f, 4.65f, 8.25f, 1.4f, 5.0f, 8.6f}; + + auto test_case = test::TestCase(f); + test_case.add_input(feat_maps_shape, feat_maps_vect); + test_case.add_input(rois_shape, rois_vect); + test_case.add_expected_output(output_shape, expected_vect); + test_case.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_2x2_bilinear) +{ + const int H = 8; + const int W = 8; + const int image_size = H * W; + const int channels = 1; + const int num_rois = 3; + + const int pooled_h = 2; + const int pooled_w = 2; + const float spatial_scale = 1.f; + + Shape feat_maps_shape{1, channels, H, W}; + Shape rois_shape{num_rois, 5}; + Shape pooled_shape{pooled_h, pooled_w}; + Shape output_shape{num_rois, channels, pooled_h, pooled_w}; + + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto roi_pooling = + make_shared(feat_maps, rois, pooled_shape, spatial_scale, "bilinear"); + const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); + + vector feat_maps_vect; + for (unsigned int i = 0; i < channels * image_size; i++) + { + feat_maps_vect.push_back(1.f * i / 10); + } + + vector rois_vect = {0.f, + 0.15f, + 0.2f, + 0.75f, + 0.8f, + 0.f, + 0.15f, + 0.2f, + 0.75f, + 0.8f, + 0.f, + 0.15f, + 0.2f, + 0.75f, + 0.8f}; + + const auto count = shape_size(output_shape); + const vector expected_vect = {1.225f, + 1.645f, + 4.585f, + 5.005f, + 1.225f, + 1.645f, + 4.585f, + 5.005f, + 1.225f, + 1.645f, + 4.585f, + 5.005f}; + + auto test_case = test::TestCase(f); + test_case.add_input(feat_maps_shape, feat_maps_vect); + test_case.add_input(rois_shape, rois_vect); + test_case.add_expected_output(output_shape, expected_vect); + test_case.run(); +} diff --git a/ngraph/test/backend/round.in.cpp b/ngraph/test/backend/round.in.cpp index 3e23132ef35b2b..392492b4a53461 100644 --- a/ngraph/test/backend/round.in.cpp +++ b/ngraph/test/backend/round.in.cpp @@ -34,15 +34,15 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, round) { Shape shape{5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_TO_EVEN), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.9f, 2.5f, 2.3f, 1.5f, -4.5f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -54,16 +54,16 @@ NGRAPH_TEST(${BACKEND_NAME}, round) NGRAPH_TEST(${BACKEND_NAME}, round_away_from_zero) { Shape shape{5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.9f, 2.5f, 2.3f, 1.5f, -4.5f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -75,13 +75,13 @@ NGRAPH_TEST(${BACKEND_NAME}, round_away_from_zero) NGRAPH_TEST(${BACKEND_NAME}, round_2D) { Shape shape{3, 5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_TO_EVEN), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.1f, 0.5f, @@ -98,7 +98,7 @@ NGRAPH_TEST(${BACKEND_NAME}, round_2D) -2.2f, -2.5f, -2.8f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -113,16 +113,16 @@ NGRAPH_TEST(${BACKEND_NAME}, round_int64) { // This tests large numbers that will not fit in a double Shape shape{3}; - auto A = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_TO_EVEN), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::i64, shape); + auto a = backend->create_tensor(element::Type_t::i64, shape); vector expected{0, 1, 0x4000000000000001}; copy_data(a, expected); - auto result = backend->create_tensor(element::i64, shape); + auto result = backend->create_tensor(element::Type_t::i64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/select.in.cpp b/ngraph/test/backend/select.in.cpp index 1affb447e0320f..9530b3fceda1da 100644 --- a/ngraph/test/backend/select.in.cpp +++ b/ngraph/test/backend/select.in.cpp @@ -34,21 +34,21 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, select) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); + auto a = backend->create_tensor(element::Type_t::boolean, shape); copy_data(a, vector{0, 1, 1, 0, 0, 1, 0, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -59,21 +59,21 @@ NGRAPH_TEST(${BACKEND_NAME}, select) NGRAPH_TEST(${BACKEND_NAME}, select_v1) { - auto A = make_shared(element::boolean, Shape{4}); - auto B = make_shared(element::f32, Shape{4}); - auto C = make_shared(element::f32, Shape{2, 4}); + auto A = make_shared(element::Type_t::boolean, Shape{4}); + auto B = make_shared(element::Type_t::f32, Shape{4}); + auto C = make_shared(element::Type_t::f32, Shape{2, 4}); auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, Shape{4}); + auto a = backend->create_tensor(element::Type_t::boolean, Shape{4}); copy_data(a, vector{0, 1, 1, 0}); - auto b = backend->create_tensor(element::f32, Shape{4}); + auto b = backend->create_tensor(element::Type_t::f32, Shape{4}); copy_data(b, vector{1, 2, 3, 4}); - auto c = backend->create_tensor(element::f32, Shape{2, 4}); + auto c = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f32, Shape{2, 4}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -84,21 +84,21 @@ NGRAPH_TEST(${BACKEND_NAME}, select_v1) NGRAPH_TEST(${BACKEND_NAME}, select_double) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::f64, shape); - auto C = make_shared(element::f64, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::f64, shape); + auto C = make_shared(element::Type_t::f64, shape); auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); + auto a = backend->create_tensor(element::Type_t::boolean, shape); copy_data(a, vector{0, 1, 1, 0, 0, 1, 0, 1}); - auto b = backend->create_tensor(element::f64, shape); + auto b = backend->create_tensor(element::Type_t::f64, shape); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8}); - auto c = backend->create_tensor(element::f64, shape); + auto c = backend->create_tensor(element::Type_t::f64, shape); copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f64, shape); + auto result = backend->create_tensor(element::Type_t::f64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); diff --git a/ngraph/test/backend/shape_of.in.cpp b/ngraph/test/backend/shape_of.in.cpp index 05a1269f26965e..076efcce153445 100644 --- a/ngraph/test/backend/shape_of.in.cpp +++ b/ngraph/test/backend/shape_of.in.cpp @@ -35,14 +35,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_scalar_v0) Shape input_shape{}; Shape output_shape{0}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector{0}); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -55,18 +55,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_scalar_v3) Shape input_shape{}; Shape output_shape{0}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector{0}); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); @@ -81,14 +81,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_vector_v0) Shape input_shape{2}; Shape output_shape{1}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2, 0)); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -101,18 +101,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_vector_v3) Shape input_shape{2}; Shape output_shape{1}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector{2, 0}); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); @@ -127,14 +127,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_matrix_v0) Shape input_shape{2, 4}; Shape output_shape{2}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4, 0)); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -147,18 +147,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_matrix_v3) Shape input_shape{2, 4}; Shape output_shape{2}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4, 0)); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); @@ -173,14 +173,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_5d_v0) Shape input_shape{2, 4, 8, 16, 32}; Shape output_shape{5}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4 * 8 * 16 * 32, 0)); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -193,18 +193,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_5d_v3) Shape input_shape{2, 4, 8, 16, 32}; Shape output_shape{5}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4 * 8 * 16 * 32, 0)); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); diff --git a/ngraph/test/backend/sigmoid.in.cpp b/ngraph/test/backend/sigmoid.in.cpp index 23bdc501e54cfc..931359da6a45ec 100644 --- a/ngraph/test/backend/sigmoid.in.cpp +++ b/ngraph/test/backend/sigmoid.in.cpp @@ -42,14 +42,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) { - auto input = make_shared(element::f32, Shape{1, 1, 2, 2}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1, 2, 2}); auto sigmoid_node = make_shared(input); auto func = make_shared(sigmoid_node, ParameterVector{input}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, input->get_shape()); - shared_ptr result = backend->create_tensor(element::f32, input->get_shape()); + shared_ptr a = + backend->create_tensor(element::Type_t::f32, input->get_shape()); + shared_ptr result = + backend->create_tensor(element::Type_t::f32, input->get_shape()); float x1 = 1.0f; float x2 = 4.0f; @@ -67,14 +69,16 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4) { - auto input = make_shared(element::f32, Shape{1, 1, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1, 4}); auto sigmoid_node = make_shared(input); auto func = make_shared(sigmoid_node, ParameterVector{input}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, input->get_shape()); - shared_ptr result = backend->create_tensor(element::f32, input->get_shape()); + shared_ptr a = + backend->create_tensor(element::Type_t::f32, input->get_shape()); + shared_ptr result = + backend->create_tensor(element::Type_t::f32, input->get_shape()); float x1 = 1.0f; float x2 = 4.0f; diff --git a/ngraph/test/backend/sign.in.cpp b/ngraph/test/backend/sign.in.cpp index 20e3dbf3f89876..751801831df152 100644 --- a/ngraph/test/backend/sign.in.cpp +++ b/ngraph/test/backend/sign.in.cpp @@ -49,15 +49,15 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sign) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, -2, 0, -4.8f, 4.8f, -0.0f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/sin.in.cpp b/ngraph/test/backend/sin.in.cpp index f424bc5e5807ca..3b69a5cbdd6be8 100644 --- a/ngraph/test/backend/sin.in.cpp +++ b/ngraph/test/backend/sin.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sin) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_TRUE(test::all_close_f(vector{0.00000000f, diff --git a/ngraph/test/backend/sinh.in.cpp b/ngraph/test/backend/sinh.in.cpp index b2dca5b21756f3..85a9e5caa648c0 100644 --- a/ngraph/test/backend/sinh.in.cpp +++ b/ngraph/test/backend/sinh.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sinh) { Shape shape{6}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); }); diff --git a/ngraph/test/backend/slice.in.cpp b/ngraph/test/backend/slice.in.cpp index dcdb3fb955c6aa..ba8b352a3bfe82 100644 --- a/ngraph/test/backend/slice.in.cpp +++ b/ngraph/test/backend/slice.in.cpp @@ -35,7 +35,7 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{}; auto r = make_shared(A, Coordinate{}, Coordinate{}); auto f = make_shared(make_shared(r), ParameterVector{A}); @@ -43,9 +43,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{312}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -56,7 +56,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 2}; auto r = make_shared(A, Coordinate{0, 1}, Coordinate{3, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -64,9 +64,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -77,7 +77,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix) NGRAPH_TEST(${BACKEND_NAME}, slice_vector) { Shape shape_a{16}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{12}; auto r = make_shared(A, Coordinate{2}, Coordinate{14}); auto f = make_shared(r, ParameterVector{A}); @@ -85,9 +85,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_vector) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -99,8 +99,8 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_vector) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_overlap) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); - auto B = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); + auto B = make_shared(element::Type_t::f32, shape_a); auto C = make_shared(A, B); Shape shape_r{2, 4}; auto D = make_shared(C, Coordinate{0, 0}, Coordinate{2, 4}); @@ -111,11 +111,11 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_overlap) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto b = backend->create_tensor(element::f32, shape_a); + auto b = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -127,7 +127,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_overlap) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 4}; auto D = make_shared(A, Coordinate{0, 0}, Coordinate{2, 4}); auto E = make_shared(A, Coordinate{2, 0}, Coordinate{4, 4}); @@ -137,9 +137,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -151,7 +151,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 4}; auto B = make_shared(A, Coordinate{0, 0}, Coordinate{2, 4}); auto D = make_shared(B, Coordinate{1, 0}, Coordinate{2, 4}); @@ -162,9 +162,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -175,7 +175,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap) { Shape shape_a{5, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 4}; auto B = make_shared(A, Coordinate{1, 0}, Coordinate{5, 4}); auto D = make_shared(B, Coordinate{1, 0}, Coordinate{3, 4}); @@ -186,10 +186,10 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -201,7 +201,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose) { Shape shape_a{4, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 4}; auto B = make_shared(A, Coordinate{1, 0}, Coordinate{4, 5}); auto C = builder::opset1::transpose(B); @@ -213,10 +213,10 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -228,7 +228,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_strided) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2}; auto r = make_shared(A, Coordinate{1, 0}, Coordinate{4, 4}, Strides{2, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -236,9 +236,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_strided) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -249,7 +249,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_strided) NGRAPH_TEST(${BACKEND_NAME}, slice_3d) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{1, 1, 1}, Coordinate{3, 3, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -257,7 +257,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -265,7 +265,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -277,7 +277,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{0, 0, 0}, Coordinate{4, 4, 4}, Strides{2, 2, 2}); auto f = make_shared(r, ParameterVector{A}); @@ -285,7 +285,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -293,7 +293,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -305,7 +305,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{0, 0, 0}, Coordinate{4, 4, 4}, Strides{2, 2, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -313,7 +313,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -321,7 +321,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -333,7 +333,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{0, 0, 0}, Coordinate{4, 4, 4}, Strides{2, 2, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -341,7 +341,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -349,7 +349,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -359,7 +359,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_start_just_oob) { Shape shape_a{20, 10, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{20, 0, 5}; auto r = make_shared(A, Coordinate{0, 10, 0}, Coordinate{20, 10, 5}, Strides{1, 1, 1}); @@ -368,10 +368,10 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_start_just_oob) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); vector a_data(20 * 10 * 5, 222.0f); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/softmax.in.cpp b/ngraph/test/backend/softmax.in.cpp index 9645ca066e2a17..b649e5dfc45cc8 100644 --- a/ngraph/test/backend/softmax.in.cpp +++ b/ngraph/test/backend/softmax.in.cpp @@ -43,14 +43,14 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d) { Shape shape{2, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(-10) + expf(-1); auto d1 = expf(-20) + expf(-2); @@ -80,14 +80,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d) NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_double) { Shape shape{2, 2, 3}; - auto A = make_shared(element::f64, shape); + auto A = make_shared(element::Type_t::f64, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}); - auto result = backend->create_tensor(element::f64, shape); + auto result = backend->create_tensor(element::Type_t::f64, shape); auto d0 = exp(-10) + exp(-1); auto d1 = exp(-20) + exp(-2); @@ -117,14 +117,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_double) NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_1) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 1), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(-10) + expf(-20) + expf(-30); auto d1 = expf(-40) + expf(-50) + expf(-60); @@ -143,14 +143,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_0) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(-10) + expf(-40); auto d1 = expf(-20) + expf(-50); @@ -170,14 +170,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_trivial) { Shape shape{1, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -188,16 +188,16 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_trivial) NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto low = std::numeric_limits::lowest(); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{low, 1, 2, 3, 4, 5}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(low) + expf(3); auto d1 = expf(1) + expf(4); @@ -213,16 +213,16 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) NGRAPH_TEST(${BACKEND_NAME}, softmax_overflow) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto high = std::numeric_limits::max(); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{high, 1, 2, 3, 4, 5}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(high - high) + expf(3 - high); auto d1 = expf(1) + expf(4); diff --git a/ngraph/test/backend/split.in.cpp b/ngraph/test/backend/split.in.cpp index 953295d07b110b..ce0642bd0413b5 100644 --- a/ngraph/test/backend/split.in.cpp +++ b/ngraph/test/backend/split.in.cpp @@ -28,8 +28,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, split_1d) { - const auto data = make_shared(element::i32, Shape{6}); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::i32, Shape{6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -47,8 +47,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_1d) NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_0) { Shape shape{6, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -67,8 +67,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_1) { Shape shape{6, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -87,8 +87,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_0) { Shape shape{2, 2, 3}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -107,8 +107,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_1) { Shape shape{2, 8, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto tested_op = make_shared(data, axis, 4); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -129,8 +129,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_2) { Shape shape{2, 1, 6}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -149,8 +149,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_2) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_0) { Shape shape{3, 2, 3, 1}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -170,8 +170,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_1) { Shape shape{2, 8, 2, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto tested_op = make_shared(data, axis, 4); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -196,8 +196,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_2) { Shape shape{2, 1, 6, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -217,8 +217,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_2) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_3) { Shape shape{2, 1, 2, 6}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {3}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {3}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); diff --git a/ngraph/test/backend/sqrt.in.cpp b/ngraph/test/backend/sqrt.in.cpp index 6c4b85aa09f477..f6d5b6b17b26de 100644 --- a/ngraph/test/backend/sqrt.in.cpp +++ b/ngraph/test/backend/sqrt.in.cpp @@ -49,15 +49,15 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sqrt) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{16, 4, 81, 100, 10000, 0}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -67,15 +67,15 @@ NGRAPH_TEST(${BACKEND_NAME}, sqrt) NGRAPH_TEST(${BACKEND_NAME}, sqrt_negative_inputs) { Shape shape{4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-1, 4, -81, 100}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/strided_slice.in.cpp b/ngraph/test/backend/strided_slice.in.cpp index 192a0f505ef4fd..2a0b2e54d43410 100644 --- a/ngraph/test/backend/strided_slice.in.cpp +++ b/ngraph/test/backend/strided_slice.in.cpp @@ -44,10 +44,12 @@ void check_strided_slice_success(const element::Type& input_element_type, const std::vector& expected_values) { auto arg = std::make_shared(input_element_type, input_shape); - auto begin_op = make_shared(element::i64, Shape{begin_values.size()}); - auto end_op = make_shared(element::i64, Shape{end_values.size()}); + auto begin_op = + make_shared(element::Type_t::i64, Shape{begin_values.size()}); + auto end_op = + make_shared(element::Type_t::i64, Shape{end_values.size()}); auto strides_op = - make_shared(element::i64, Shape{strides_values.size()}); + make_shared(element::Type_t::i64, Shape{strides_values.size()}); std::vector input_values(shape_size(input_shape)); std::iota(input_values.begin(), input_values.end(), static_cast(0)); @@ -69,9 +71,10 @@ void check_strided_slice_success(const element::Type& input_element_type, auto ex = backend->compile(f); auto arg_tensor = backend->create_tensor(input_element_type, input_shape); - auto begin_tensor = backend->create_tensor(element::i64, Shape{begin_values.size()}); - auto end_tensor = backend->create_tensor(element::i64, Shape{end_values.size()}); - auto strides_tensor = backend->create_tensor(element::i64, Shape{strides_values.size()}); + auto begin_tensor = backend->create_tensor(element::Type_t::i64, Shape{begin_values.size()}); + auto end_tensor = backend->create_tensor(element::Type_t::i64, Shape{end_values.size()}); + auto strides_tensor = + backend->create_tensor(element::Type_t::i64, Shape{strides_values.size()}); copy_data(arg_tensor, input_values); copy_data(begin_tensor, begin_values); copy_data(end_tensor, end_values); @@ -103,8 +106,10 @@ void check_strided_slice_stride_optional_success(const element::Type& input_elem const std::vector& expected_values) { auto arg = std::make_shared(input_element_type, input_shape); - auto begin_op = make_shared(element::i64, Shape{begin_values.size()}); - auto end_op = make_shared(element::i64, Shape{end_values.size()}); + auto begin_op = + make_shared(element::Type_t::i64, Shape{begin_values.size()}); + auto end_op = + make_shared(element::Type_t::i64, Shape{end_values.size()}); std::vector input_values(shape_size(input_shape)); std::iota(input_values.begin(), input_values.end(), static_cast(0)); @@ -125,8 +130,8 @@ void check_strided_slice_stride_optional_success(const element::Type& input_elem auto ex = backend->compile(f); auto arg_tensor = backend->create_tensor(input_element_type, input_shape); - auto begin_tensor = backend->create_tensor(element::i64, Shape{begin_values.size()}); - auto end_tensor = backend->create_tensor(element::i64, Shape{end_values.size()}); + auto begin_tensor = backend->create_tensor(element::Type_t::i64, Shape{begin_values.size()}); + auto end_tensor = backend->create_tensor(element::Type_t::i64, Shape{end_values.size()}); copy_data(arg_tensor, input_values); copy_data(begin_tensor, begin_values); copy_data(end_tensor, end_values); @@ -150,7 +155,7 @@ void check_strided_slice_stride_optional_success(const element::Type& input_elem NGRAPH_TEST(${BACKEND_NAME}, strided_slice_0) { check_strided_slice_success( - element::u32, + element::Type_t::u32, Shape{2, 3, 4}, std::vector{1, 0}, std::vector{0, 0}, @@ -171,7 +176,7 @@ NGRAPH_TEST(${BACKEND_NAME}, strided_slice_0) NGRAPH_TEST(${BACKEND_NAME}, strided_slice_1) { check_strided_slice_success( - element::u32, + element::Type_t::u32, Shape{2, 4, 6, 8, 2, 2, 2}, std::vector{0, 0, 2, 7, 0, 0, 1}, std::vector{0, 4, 6, 3, 0, 0, 0}, @@ -201,7 +206,7 @@ NGRAPH_TEST(${BACKEND_NAME}, strided_slice_1) // expected output shape is Shape{1,4} NGRAPH_TEST(${BACKEND_NAME}, strided_slice_stride_optional) { - check_strided_slice_stride_optional_success(element::u32, + check_strided_slice_stride_optional_success(element::Type_t::u32, Shape{2, 3, 4}, std::vector{-1, -1, 0}, std::vector{0, 0, 0}, diff --git a/ngraph/test/backend/subtract.in.cpp b/ngraph/test/backend/subtract.in.cpp index 4d4b232f817423..ce2b205bfae909 100644 --- a/ngraph/test/backend/subtract.in.cpp +++ b/ngraph/test/backend/subtract.in.cpp @@ -51,18 +51,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, subtract) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -72,18 +72,18 @@ NGRAPH_TEST(${BACKEND_NAME}, subtract) NGRAPH_TEST(${BACKEND_NAME}, subtract_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A - B, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); diff --git a/ngraph/test/backend/tan.in.cpp b/ngraph/test/backend/tan.in.cpp index 93a3600be2b70f..abbe7c25c9dd25 100644 --- a/ngraph/test/backend/tan.in.cpp +++ b/ngraph/test/backend/tan.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tan) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_TRUE(test::all_close_f(vector{0.00000000f, diff --git a/ngraph/test/backend/tanh.in.cpp b/ngraph/test/backend/tanh.in.cpp index 404c0b6d6c4cf1..08e5db9a49c3d9 100644 --- a/ngraph/test/backend/tanh.in.cpp +++ b/ngraph/test/backend/tanh.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tanh) { Shape shape{6}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return tanhf(x); }); diff --git a/ngraph/test/backend/tile.in.cpp b/ngraph/test/backend/tile.in.cpp index d9b4b5d520e7a1..bf1c2b9d6769e7 100644 --- a/ngraph/test/backend/tile.in.cpp +++ b/ngraph/test/backend/tile.in.cpp @@ -39,9 +39,9 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_re{3}; - auto repeats = make_shared(element::i64, shape_re, vector{2, 2, 1}); + auto repeats = make_shared(element::Type_t::i64, shape_re, vector{2, 2, 1}); Shape shape_r{2, 2, 3}; auto tile = make_shared(A, repeats); @@ -51,10 +51,10 @@ NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -66,9 +66,9 @@ NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank) NGRAPH_TEST(${BACKEND_NAME}, tile_3d_few_repeats) { Shape shape_a{2, 1, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_re{2}; - auto repeats = make_shared(element::i64, shape_re, vector{2, 1}); + auto repeats = make_shared(element::Type_t::i64, shape_re, vector{2, 1}); Shape shape_r{2, 2, 3}; auto tile = make_shared(A, repeats); @@ -78,10 +78,10 @@ NGRAPH_TEST(${BACKEND_NAME}, tile_3d_few_repeats) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/topk.in.cpp b/ngraph/test/backend/topk.in.cpp index e61451b8bc10fb..288512b8db404a 100644 --- a/ngraph/test/backend/topk.in.cpp +++ b/ngraph/test/backend/topk.in.cpp @@ -64,14 +64,14 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) Shape shape{128, 1000}; Shape rshape5{128, 5}; Shape rshape1{128, 1}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto B = make_shared(A, - op::Constant::create(element::i64, {}, {5}), + op::Constant::create(element::Type_t::i64, {}, {5}), 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto C = make_shared(A, - op::Constant::create(element::i64, {}, {1}), + op::Constant::create(element::Type_t::i64, {}, {1}), 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -86,7 +86,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -97,10 +97,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) } copy_data(a, data); - auto result5_value = backend->create_tensor(element::f32, rshape5); - auto result5_index = backend->create_tensor(element::i32, rshape5); - auto result1_value = backend->create_tensor(element::f32, rshape1); - auto result1_index = backend->create_tensor(element::i32, rshape1); + auto result5_value = backend->create_tensor(element::Type_t::f32, rshape5); + auto result5_index = backend->create_tensor(element::Type_t::i32, rshape5); + auto result1_value = backend->create_tensor(element::Type_t::f32, rshape1); + auto result1_index = backend->create_tensor(element::Type_t::i32, rshape1); auto exec = backend->compile(f); exec->call({result5_value, result5_index, result1_value, result1_index}, {a}); @@ -142,8 +142,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); @@ -154,7 +154,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -165,8 +165,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -196,8 +196,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::NONE); @@ -208,7 +208,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -219,8 +219,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -250,8 +250,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -262,7 +262,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -273,8 +273,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -300,8 +300,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -312,7 +312,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -323,8 +323,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -354,8 +354,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_INDICES); @@ -366,7 +366,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -377,8 +377,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -408,8 +408,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_INDICES); @@ -420,7 +420,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -431,8 +431,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -462,8 +462,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_all) { Shape shape{6}; Shape rshape{6}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {6}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {6}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -473,10 +473,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -491,8 +491,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) { Shape shape{6}; Shape rshape{6}; - auto A = make_shared(element::i32, shape); - auto k = op::Constant::create(element::i64, {}, {6}); + auto A = make_shared(element::Type_t::i32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {6}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -502,10 +502,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::i32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -519,8 +519,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) { Shape shape{6}; Shape rshape{3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -530,10 +530,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -548,8 +548,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) { Shape shape{6}; Shape rshape{1}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -559,10 +559,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -577,8 +577,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) { Shape shape{6}; Shape rshape{6}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {6}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {6}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -588,10 +588,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -606,8 +606,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) { Shape shape{6}; Shape rshape{3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -617,10 +617,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -635,8 +635,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) { Shape shape{6}; Shape rshape{1}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -646,10 +646,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -664,8 +664,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) { Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -675,10 +675,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -694,21 +694,25 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_int64) { Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 1; - auto B = make_shared( - A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, element::i64); + auto B = make_shared(A, + k, + axis, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, + element::Type_t::i64); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i64, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i64, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -724,8 +728,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) { Shape shape{2, 6, 3, 2, 4}; Shape rshape{2, 2, 3, 2, 4}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -735,7 +739,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data( a, vector{ @@ -761,8 +765,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) 205., 277., 213., 285., 198., 270., 206., 278., 214., 286., 199., 271., 207., 279., 215., 287., 200., 272., 208., 280., 216., 288.}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -790,8 +794,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -801,10 +805,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -820,8 +824,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) { Shape shape{2, 3, 2}; Shape rshape{2, 1, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -831,10 +835,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -849,8 +853,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) { Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -860,10 +864,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -879,8 +883,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -890,10 +894,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -909,8 +913,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) { Shape shape{2, 3, 2}; Shape rshape{2, 1, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -920,10 +924,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -938,8 +942,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) { Shape shape{4, 3}; Shape rshape{4, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {4}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {4}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -949,10 +953,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -968,8 +972,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) { Shape shape{4, 3}; Shape rshape{2, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -979,10 +983,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -998,8 +1002,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) { Shape shape{4, 3}; Shape rshape{1, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -1009,10 +1013,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1027,8 +1031,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) { Shape shape{2, 4}; Shape rshape{2, 1}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -1038,10 +1042,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 3, 2, 4, 1, 3, 3, 2}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1056,8 +1060,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) { Shape shape{4, 3}; Shape rshape{4, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {4}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {4}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1067,10 +1071,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1086,8 +1090,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) { Shape shape{4, 3}; Shape rshape{2, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1097,10 +1101,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1115,8 +1119,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) { Shape shape{4, 3}; Shape rshape{1, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::NONE); @@ -1126,10 +1130,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1143,9 +1147,9 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) { Shape shape{4, 8192, 5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); - auto k = op::Constant::create(element::i64, {}, {10}); + auto k = op::Constant::create(element::Type_t::i64, {}, {10}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -1183,9 +1187,9 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_min) { Shape shape{4, 8192, 5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); - auto k = op::Constant::create(element::i64, {}, {10}); + auto k = op::Constant::create(element::Type_t::i64, {}, {10}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1224,8 +1228,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1234,9 +1238,9 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1245,27 +1249,27 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) NGRAPH_TEST(${BACKEND_NAME}, topk_v1_invalid_strings) { - const auto data = make_shared(element::f32, Shape{1, 2, 3}); - const auto k = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto k = op::Constant::create(element::Type_t::i64, Shape{}, {1}); EXPECT_THROW(op::v1::TopK(data, k, 0, "max", "invalid_mode"), ngraph::CheckFailure); EXPECT_THROW(op::v1::TopK(data, k, 0, "invalid_sort", "index"), ngraph::CheckFailure); } NGRAPH_TEST(${BACKEND_NAME}, topk_v1_invalid_k) { - const auto data = make_shared(element::f32, Shape{1, 2, 3}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3}); // K must be a scalar - const auto k_non_scalar = op::Constant::create(element::i64, Shape{2}, {1, 2}); + const auto k_non_scalar = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); EXPECT_THROW(op::v1::TopK(data, k_non_scalar, 0, "max", "index"), ngraph::NodeValidationFailure); // K can only be i8, i32 or i64 - const auto k_float = op::Constant::create(element::f32, Shape{}, {1.0f}); + const auto k_float = op::Constant::create(element::Type_t::f32, Shape{}, {1.0f}); EXPECT_THROW(op::v1::TopK(data, k_float, 0, "max", "index"), ngraph::NodeValidationFailure); // the value of K must be positive - const auto k_negative = op::Constant::create(element::i8, Shape{}, {-1}); + const auto k_negative = op::Constant::create(element::Type_t::i8, Shape{}, {-1}); EXPECT_THROW(op::v1::TopK(data, k_negative, 0, "max", "index"), ngraph::NodeValidationFailure); } @@ -1299,8 +1303,8 @@ TYPED_TEST_P(topk_backend, topk_mode_sort_order) { const Shape shape{5}; const Shape rshape{3}; - const auto data = make_shared(element::f32, shape); - const auto k = op::Constant::create(element::i64, {}, {3}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto k = op::Constant::create(element::Type_t::i64, {}, {3}); const int64_t axis = 0; // helpers to reduce code verbosity diff --git a/ngraph/test/backend/transpose.in.cpp b/ngraph/test/backend/transpose.in.cpp index a7ebbf2a816680..000f86f27a24c9 100644 --- a/ngraph/test/backend/transpose.in.cpp +++ b/ngraph/test/backend/transpose.in.cpp @@ -33,9 +33,10 @@ NGRAPH_TEST(${BACKEND_NAME}, transpose) // Create a graph for f(x,perm) = Transpose(x,Convert(perm)). We'll do the permutation in // i32 and cast it to i64, just for fun (and to mirror the TensorFlow test I am porting here). // - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto perm = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto perm_i64 = make_shared(perm, element::i64); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto perm = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto perm_i64 = make_shared(perm, element::Type_t::i64); auto x_transpose = make_shared(x, perm_i64); @@ -45,7 +46,7 @@ NGRAPH_TEST(${BACKEND_NAME}, transpose) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); std::vector x_shapes{Shape{2, 3}, Shape{2, 3}, Shape{2, 2, 3}}; std::vector> perms{{0, 1}, {1, 0}, {2, 1, 0}}; @@ -58,8 +59,8 @@ NGRAPH_TEST(${BACKEND_NAME}, transpose) for (size_t i = 0; i < x_shapes.size(); i++) { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_perm = backend->create_tensor(element::i32, Shape{perms[i].size()}); + auto t_x = backend->create_tensor(element::Type_t::f32, x_shapes[i]); + auto t_perm = backend->create_tensor(element::Type_t::i32, Shape{perms[i].size()}); copy_data(t_x, inputs[i]); copy_data(t_perm, perms[i]); diff --git a/ngraph/test/backend/unhandled_op.in.cpp b/ngraph/test/backend/unhandled_op.in.cpp index ad243408ae6d6d..d3264b54416a28 100644 --- a/ngraph/test/backend/unhandled_op.in.cpp +++ b/ngraph/test/backend/unhandled_op.in.cpp @@ -56,7 +56,7 @@ namespace NGRAPH_TEST(${BACKEND_NAME}, unhandled_op) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto unhandled = make_shared(A); auto f = make_shared(unhandled, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); diff --git a/ngraph/test/backend/validate_call.in.cpp b/ngraph/test/backend/validate_call.in.cpp index 97e908caa84ad5..5630d57bfeca0c 100644 --- a/ngraph/test/backend/validate_call.in.cpp +++ b/ngraph/test/backend/validate_call.in.cpp @@ -38,13 +38,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_input_count) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c}, {a})); } @@ -55,13 +55,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_input_type) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::i32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c}, {a, b})); } @@ -72,13 +72,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_input_shape) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, {2, 3}); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, {2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c}, {a, b})); } @@ -89,14 +89,14 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_output_count) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); - auto d = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); + auto d = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c, d}, {a, b})); } @@ -107,13 +107,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_output_type) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::i32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({a}, {b, c})); } @@ -124,13 +124,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_output_shape) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, {2, 3}); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, {2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({a}, {c, b})); } diff --git a/ngraph/test/backend_debug_api.cpp b/ngraph/test/backend_debug_api.cpp index d9172c5a773e61..5124a3c429047d 100644 --- a/ngraph/test/backend_debug_api.cpp +++ b/ngraph/test/backend_debug_api.cpp @@ -33,18 +33,18 @@ using namespace ngraph; TEST(INTERPRETER, nan_check_input) { Shape shape{4}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); shared_ptr backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, NAN, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 1, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); shared_ptr handle = backend->compile(f); @@ -57,18 +57,18 @@ TEST(INTERPRETER, nan_check_input) TEST(INTERPRETER, nan_check_output) { Shape shape{4}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); shared_ptr backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 0, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 0, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); shared_ptr handle = backend->compile(f); shared_ptr ihandle = diff --git a/ngraph/test/build_graph.cpp b/ngraph/test/build_graph.cpp index b91dff59560211..c771382b4ec733 100644 --- a/ngraph/test/build_graph.cpp +++ b/ngraph/test/build_graph.cpp @@ -31,10 +31,10 @@ using namespace ngraph; TEST(build_graph, build_simple) { // Function with 4 parameters - auto arg0 = make_shared(element::f32, Shape{7, 3}); - auto arg1 = make_shared(element::f32, Shape{3}); - auto arg2 = make_shared(element::f32, Shape{32, 7}); - auto arg3 = make_shared(element::f32, Shape{32, 7}); + auto arg0 = make_shared(element::Type_t::f32, Shape{7, 3}); + auto arg1 = make_shared(element::Type_t::f32, Shape{3}); + auto arg2 = make_shared(element::Type_t::f32, Shape{32, 7}); + auto arg3 = make_shared(element::Type_t::f32, Shape{32, 7}); auto broadcast_1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto b1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); @@ -51,18 +51,18 @@ TEST(build_graph, literal) // float scalar from a float // auto float0 = FloatConstant::make(3.0); vector float_t{3.0}; - auto float0 = make_shared(element::f32, Shape{1}, float_t); + auto float0 = make_shared(element::Type_t::f32, Shape{1}, float_t); ASSERT_EQ(float0->get_vector(), std::vector{3.0}); - ASSERT_EQ(float0->get_element_type(), element::f32); + ASSERT_EQ(float0->get_element_type(), element::Type_t::f32); ASSERT_EQ(float0->get_shape(), Shape{1}); auto d = make_shared(float0, float0); ASSERT_EQ(d->input_values().at(0).get_node_shared_ptr(), float0); ASSERT_EQ(d->input_values().at(1).get_node_shared_ptr(), float0); vector int32{3}; - auto int32_0 = make_shared(element::i32, Shape{}, int32); + auto int32_0 = make_shared(element::Type_t::i32, Shape{}, int32); ASSERT_EQ(int32_0->get_vector(), std::vector{3}); - ASSERT_EQ(int32_0->get_element_type(), element::i32); + ASSERT_EQ(int32_0->get_element_type(), element::Type_t::i32); ASSERT_EQ(int32_0->get_shape(), Shape{}); } @@ -72,8 +72,8 @@ TEST(build_graph, tensor) // auto float0 = FloatConstant::make(3.0); Shape shape{2, 3}; vector float_t(shape_size(shape), 0); - auto float0 = make_shared(element::f32, shape, float_t); - ASSERT_EQ(float0->get_element_type(), element::f32); + auto float0 = make_shared(element::Type_t::f32, shape, float_t); + ASSERT_EQ(float0->get_element_type(), element::Type_t::f32); ASSERT_EQ(float0->get_shape(), shape); auto d = make_shared(float0, float0); ASSERT_EQ(d->input_values().at(0).get_node_shared_ptr(), float0); @@ -81,8 +81,8 @@ TEST(build_graph, tensor) Shape ishape{3, 5}; vector idata(shape_size(ishape), 0); - auto int32_0 = make_shared(element::i32, ishape, idata); - ASSERT_EQ(int32_0->get_element_type(), element::i32); + auto int32_0 = make_shared(element::Type_t::i32, ishape, idata); + ASSERT_EQ(int32_0->get_element_type(), element::Type_t::i32); ASSERT_EQ(int32_0->get_shape(), ishape); } @@ -90,10 +90,10 @@ TEST(build_graph, tensor) TEST(build_graph, function_undeclared_parameters) { // Function with 4 parameters - auto arg0 = make_shared(element::f32, Shape{7, 3}); - auto arg1 = make_shared(element::f32, Shape{3}); - auto arg2 = make_shared(element::f32, Shape{32, 7}); - auto arg3 = make_shared(element::f32, Shape{32, 7}); + auto arg0 = make_shared(element::Type_t::f32, Shape{7, 3}); + auto arg1 = make_shared(element::Type_t::f32, Shape{3}); + auto arg2 = make_shared(element::Type_t::f32, Shape{32, 7}); + auto arg3 = make_shared(element::Type_t::f32, Shape{32, 7}); auto broadcast_1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto b1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); @@ -121,10 +121,10 @@ TEST(build_graph, no_arg_construction) { // The ops // Parameters aren't converted yet - auto arg0 = make_shared(element::f32, Shape{7}); - auto arg1 = make_shared(element::f32, Shape{7}); - auto arg2 = make_shared(element::f32, Shape{7}); - auto arg3 = make_shared(element::f32, Shape{7}); + auto arg0 = make_shared(element::Type_t::f32, Shape{7}); + auto arg1 = make_shared(element::Type_t::f32, Shape{7}); + auto arg2 = make_shared(element::Type_t::f32, Shape{7}); + auto arg3 = make_shared(element::Type_t::f32, Shape{7}); auto add0 = make_shared(); auto abs0 = make_shared(); auto acos0 = make_shared(); @@ -142,13 +142,13 @@ TEST(build_graph, no_arg_construction) TEST(build_graph, multi_output_split_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto split = make_shared(data, axis, 2); auto abs = make_shared(split->output(1)); EXPECT_TRUE(abs->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); - auto new_parameter = make_shared(element::f32, Shape{2, 4}); + auto new_parameter = make_shared(element::Type_t::f32, Shape{2, 4}); split->input(0).replace_source_output(new_parameter->output(0)); auto f = make_shared(abs, ParameterVector{new_parameter}); @@ -159,18 +159,18 @@ TEST(build_graph, multi_output_split_dynamic) TEST(build_graph, function_revalidate_and_infer) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto pattern = op::Constant::create(element::i64, Shape{6}, {1, 3, 16, 2, 2, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto pattern = op::Constant::create(element::Type_t::i64, Shape{6}, {1, 3, 16, 2, 2, 2}); auto r = make_shared(arg, pattern, true); auto relu = make_shared(r); auto f = make_shared(relu, ParameterVector{arg}); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(r->get_output_shape(0), (Shape{1, 3, 16, 2, 2, 2})); EXPECT_EQ(f->get_output_shape(0), (Shape{1, 3, 16, 2, 2, 2})); - auto new_pattern = op::Constant::create(element::i64, Shape{2}, {32, 12}); + auto new_pattern = op::Constant::create(element::Type_t::i64, Shape{2}, {32, 12}); r->input(1).replace_source_output(new_pattern->output(0)); f->validate_nodes_and_infer_types(); @@ -193,13 +193,13 @@ TEST(build_graph, default_output_checks) TEST(build_graph, build_graph_with_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -214,13 +214,13 @@ TEST(build_graph, build_graph_with_sink) TEST(build_graph, build_graph_with_sink_output_ctor) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -236,13 +236,13 @@ TEST(build_graph, build_graph_with_sink_output_ctor) TEST(build_graph, build_graph_with_add_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -263,13 +263,13 @@ TEST(build_graph, build_graph_with_add_sink) TEST(build_graph, build_graph_with_wrong_remove_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -287,13 +287,13 @@ TEST(build_graph, build_graph_with_wrong_remove_sink) TEST(build_graph, build_graph_with_remove_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -313,13 +313,13 @@ TEST(build_graph, build_graph_with_remove_sink) TEST(build_graph, build_graph_with_add_result) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto res2 = make_shared(crop, "v0"); @@ -340,13 +340,13 @@ TEST(build_graph, build_graph_with_add_result) TEST(build_graph, build_graph_with_remove_result) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto res2 = make_shared(crop, "v0"); diff --git a/ngraph/test/builder.cpp b/ngraph/test/builder.cpp index 8658b0cbed8380..165508203434ec 100644 --- a/ngraph/test/builder.cpp +++ b/ngraph/test/builder.cpp @@ -26,14 +26,14 @@ shared_ptr make_reduce_result(function(const shared_ptr&, const AxisSet&)> func) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; auto f = make_shared(func(A, {0}), ParameterVector{A}); auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -44,14 +44,14 @@ shared_ptr make_reduce_result_true( function(const shared_ptr&, const AxisSet&, bool)> func) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; auto f = make_shared(func(A, {0}, true), ParameterVector{A}); auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -62,14 +62,14 @@ shared_ptr make_reduce_result_false( function(const shared_ptr&, const AxisSet&, bool)> func) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; auto f = make_shared(func(A, {0}, false), ParameterVector{A}); auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/builder_autobroadcast.cpp b/ngraph/test/builder_autobroadcast.cpp index a9b1bdf23a8436..ea412bcb5f39b2 100644 --- a/ngraph/test/builder_autobroadcast.cpp +++ b/ngraph/test/builder_autobroadcast.cpp @@ -26,7 +26,7 @@ using namespace ngraph; shared_ptr getParamFromShape(const Shape& shape) { - return make_shared(element::f32, shape); + return make_shared(element::Type_t::f32, shape); } inline const Shape& getShapeFromParam(const shared_ptr& node) @@ -217,8 +217,8 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_2d) { const Shape lhs{3, 1, 4, 6}; const Shape rhs{6, 5}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const OutputVector result = builder::numpy_broadcast_for_matmul_operation(lhs_node, rhs_node); @@ -230,8 +230,8 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_3d) { const Shape lhs{3, 1, 4, 6}; const Shape rhs{2, 6, 5}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const OutputVector result = builder::numpy_broadcast_for_matmul_operation(lhs_node, rhs_node); @@ -243,8 +243,8 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_nop) { const Shape lhs{4, 6}; const Shape rhs{6, 5}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const OutputVector result = builder::numpy_broadcast_for_matmul_operation(lhs_node, rhs_node); @@ -257,8 +257,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_scalar) const Shape lhs{2, 3, 4, 5}; const Shape rhs{}; size_t start_match_axis{3}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -271,8 +271,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_1elem_tensor) const Shape lhs{2, 3, 4, 5}; const Shape rhs{1, 1, 1}; size_t start_match_axis{1}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -285,8 +285,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_1d) const Shape lhs{2, 3, 4, 5}; const Shape rhs{5}; size_t start_match_axis{3}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -299,8 +299,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_2d) const Shape lhs{2, 3, 4, 5}; const Shape rhs{4, 5}; size_t start_match_axis{2}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -313,8 +313,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_2d_inside) const Shape lhs{2, 3, 4, 5}; const Shape rhs{3, 4}; size_t start_match_axis{1}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -327,8 +327,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_1d_left) const Shape lhs{2, 3, 4, 5}; const Shape rhs{2}; size_t start_match_axis{0}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -340,8 +340,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_identical) { const Shape lhs{2, 3, 4, 5}; size_t start_match_axis{0}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, lhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, lhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); diff --git a/ngraph/test/constant.cpp b/ngraph/test/constant.cpp index b11934ff342fdc..a0e20110e141e1 100644 --- a/ngraph/test/constant.cpp +++ b/ngraph/test/constant.cpp @@ -31,7 +31,7 @@ using namespace std; TEST(constant, boolean_string) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::boolean, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -49,7 +49,7 @@ TEST(constant, boolean_string) TEST(constant, boolean_string_broadcast) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{"1"}); + op::Constant c(element::Type_t::boolean, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -67,7 +67,7 @@ TEST(constant, boolean_string_broadcast) TEST(constant, boolean_vector) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::boolean, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -85,7 +85,7 @@ TEST(constant, boolean_vector) TEST(constant, boolean_vector_broadcast) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{1}); + op::Constant c(element::Type_t::boolean, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -107,7 +107,7 @@ TEST(constant, boolean_vector_broadcast) TEST(constant, float_string) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::f32, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -125,7 +125,7 @@ TEST(constant, float_string) TEST(constant, float_string_broadcast) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{"1"}); + op::Constant c(element::Type_t::f32, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -143,7 +143,7 @@ TEST(constant, float_string_broadcast) TEST(constant, float_vector) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::f32, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -161,7 +161,7 @@ TEST(constant, float_vector) TEST(constant, float_vector_broadcast) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{1}); + op::Constant c(element::Type_t::f32, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -183,7 +183,7 @@ TEST(constant, float_vector_broadcast) TEST(constant, double_string) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::f64, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -201,7 +201,7 @@ TEST(constant, double_string) TEST(constant, double_string_broadcast) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{"1"}); + op::Constant c(element::Type_t::f64, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -219,7 +219,7 @@ TEST(constant, double_string_broadcast) TEST(constant, double_vector) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::f64, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -237,7 +237,7 @@ TEST(constant, double_vector) TEST(constant, double_vector_broadcast) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{1}); + op::Constant c(element::Type_t::f64, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -259,7 +259,7 @@ TEST(constant, double_vector_broadcast) TEST(constant, int8_string) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i8, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -277,7 +277,7 @@ TEST(constant, int8_string) TEST(constant, int8_string_broadcast) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{"1"}); + op::Constant c(element::Type_t::i8, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -295,7 +295,7 @@ TEST(constant, int8_string_broadcast) TEST(constant, int8_vector) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i8, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -313,7 +313,7 @@ TEST(constant, int8_vector) TEST(constant, int8_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{1}); + op::Constant c(element::Type_t::i8, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -335,7 +335,7 @@ TEST(constant, int8_vector_broadcast) TEST(constant, int16_string) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -353,7 +353,7 @@ TEST(constant, int16_string) TEST(constant, int16_string_broadcast) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{"1"}); + op::Constant c(element::Type_t::i16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -371,7 +371,7 @@ TEST(constant, int16_string_broadcast) TEST(constant, int16_vector) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -389,7 +389,7 @@ TEST(constant, int16_vector) TEST(constant, int16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{1}); + op::Constant c(element::Type_t::i16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -411,7 +411,7 @@ TEST(constant, int16_vector_broadcast) TEST(constant, int32_string) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i32, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -429,7 +429,7 @@ TEST(constant, int32_string) TEST(constant, int32_string_broadcast) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{"1"}); + op::Constant c(element::Type_t::i32, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -447,7 +447,7 @@ TEST(constant, int32_string_broadcast) TEST(constant, int32_vector) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i32, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -465,7 +465,7 @@ TEST(constant, int32_vector) TEST(constant, int32_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{1}); + op::Constant c(element::Type_t::i32, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -487,7 +487,7 @@ TEST(constant, int32_vector_broadcast) TEST(constant, int64_string) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i64, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -505,7 +505,7 @@ TEST(constant, int64_string) TEST(constant, int64_string_broadcast) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{"1"}); + op::Constant c(element::Type_t::i64, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -523,7 +523,7 @@ TEST(constant, int64_string_broadcast) TEST(constant, int64_vector) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i64, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -541,7 +541,7 @@ TEST(constant, int64_vector) TEST(constant, int64_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{1}); + op::Constant c(element::Type_t::i64, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -563,7 +563,7 @@ TEST(constant, int64_vector_broadcast) TEST(constant, uint8_string) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u8, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -581,7 +581,7 @@ TEST(constant, uint8_string) TEST(constant, uint8_string_broadcast) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{"1"}); + op::Constant c(element::Type_t::u8, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -599,7 +599,7 @@ TEST(constant, uint8_string_broadcast) TEST(constant, uint8_vector) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u8, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -617,7 +617,7 @@ TEST(constant, uint8_vector) TEST(constant, uint8_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{1}); + op::Constant c(element::Type_t::u8, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -639,7 +639,7 @@ TEST(constant, uint8_vector_broadcast) TEST(constant, uint16_string) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -657,7 +657,7 @@ TEST(constant, uint16_string) TEST(constant, uint16_string_broadcast) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{"1"}); + op::Constant c(element::Type_t::u16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -675,7 +675,7 @@ TEST(constant, uint16_string_broadcast) TEST(constant, uint16_vector) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -693,7 +693,7 @@ TEST(constant, uint16_vector) TEST(constant, uint16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{1}); + op::Constant c(element::Type_t::u16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -715,7 +715,7 @@ TEST(constant, uint16_vector_broadcast) TEST(constant, uint32_string) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u32, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -733,7 +733,7 @@ TEST(constant, uint32_string) TEST(constant, uint32_string_broadcast) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{"1"}); + op::Constant c(element::Type_t::u32, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -751,7 +751,7 @@ TEST(constant, uint32_string_broadcast) TEST(constant, uint32_vector) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u32, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -769,7 +769,7 @@ TEST(constant, uint32_vector) TEST(constant, uint32_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{1}); + op::Constant c(element::Type_t::u32, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -791,7 +791,7 @@ TEST(constant, uint32_vector_broadcast) TEST(constant, uint64_string) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u64, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -809,7 +809,7 @@ TEST(constant, uint64_string) TEST(constant, uint64_string_broadcast) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{"1"}); + op::Constant c(element::Type_t::u64, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -827,7 +827,7 @@ TEST(constant, uint64_string_broadcast) TEST(constant, uint64_vector) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u64, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -845,7 +845,7 @@ TEST(constant, uint64_vector) TEST(constant, uint64_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{1}); + op::Constant c(element::Type_t::u64, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -867,7 +867,7 @@ TEST(constant, uint64_vector_broadcast) TEST(constant, bfloat16_string) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::bf16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -885,7 +885,7 @@ TEST(constant, bfloat16_string) TEST(constant, bfloat16_string_broadcast) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{"1"}); + op::Constant c(element::Type_t::bf16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -903,7 +903,7 @@ TEST(constant, bfloat16_string_broadcast) TEST(constant, bfloat16_vector) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::bf16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -921,7 +921,7 @@ TEST(constant, bfloat16_vector) TEST(constant, bfloat16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{1}); + op::Constant c(element::Type_t::bf16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -943,7 +943,7 @@ TEST(constant, bfloat16_vector_broadcast) TEST(constant, float16_string) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::f16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -961,7 +961,7 @@ TEST(constant, float16_string) TEST(constant, float16_string_broadcast) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{"1"}); + op::Constant c(element::Type_t::f16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -979,7 +979,7 @@ TEST(constant, float16_string_broadcast) TEST(constant, float16_vector) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::f16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -997,7 +997,7 @@ TEST(constant, float16_vector) TEST(constant, float16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{1}); + op::Constant c(element::Type_t::f16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -1015,7 +1015,7 @@ TEST(constant, float16_vector_broadcast) TEST(constant, shared_data) { Shape shape{100, 200}; - auto c1 = make_shared(element::f16, shape, vector{123}); + auto c1 = make_shared(element::Type_t::f16, shape, vector{123}); auto c2 = static_pointer_cast(c1->clone_with_new_inputs({})); const int16_t* p1 = c1->get_data_ptr(); const int16_t* p2 = c2->get_data_ptr(); @@ -1368,7 +1368,7 @@ TEST(constant, construct_uniform) TEST(constant, bad_get_data_ptr) { - op::Constant c(element::f32, Shape{}, vector{1.0}); + op::Constant c(element::Type_t::f32, Shape{}, vector{1.0}); EXPECT_EQ(*c.get_data_ptr(), 1.0); try { diff --git a/ngraph/test/constant_folding.cpp b/ngraph/test/constant_folding.cpp index be87409a08a65e..a7b635aa20be5e 100644 --- a/ngraph/test/constant_folding.cpp +++ b/ngraph/test/constant_folding.cpp @@ -62,7 +62,7 @@ TEST(constant_folding, acosh) { expected.push_back(std::acosh(f)); } - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); auto acosh = make_shared(constant); acosh->set_friendly_name("test"); auto f = make_shared(acosh, ParameterVector{}); @@ -94,7 +94,7 @@ TEST(constant_folding, asinh) { expected.push_back(std::asinh(f)); } - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); auto asinh = make_shared(constant); asinh->set_friendly_name("test"); auto f = make_shared(asinh, ParameterVector{}); @@ -126,7 +126,7 @@ TEST(constant_folding, atanh) { expected.push_back(std::atanh(f)); } - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); auto atanh = make_shared(constant); atanh->set_friendly_name("test"); auto f = make_shared(atanh, ParameterVector{}); @@ -155,9 +155,9 @@ TEST(constant_folding, constant_squeeze) Shape axes_shape{1}; vector values_in{0, 1, 2, 3, 4, 5, 6, 7}; - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); vector values_axes{2}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto squeeze = make_shared(constant, constant_axes); squeeze->set_friendly_name("test"); auto f = make_shared(squeeze, ParameterVector{}); @@ -186,9 +186,9 @@ TEST(constant_folding, constant_unsqueeze) Shape axes_shape{2}; vector values_in{0, 1, 2, 3, 4, 5, 6, 7}; - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); vector values_axes{2, 3}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto unsqueeze = make_shared(constant, constant_axes); unsqueeze->set_friendly_name("test"); auto f = make_shared(unsqueeze, ParameterVector{}); @@ -213,11 +213,11 @@ TEST(constant_folding, constant_unsqueeze) TEST(constant_folding, constant_broadcast_v1) { vector values_in{0, 1}; - auto constant_in = make_shared(element::i32, Shape{2}, values_in); + auto constant_in = make_shared(element::Type_t::i32, Shape{2}, values_in); vector shape_in{2, 4}; - auto constant_shape = make_shared(element::i64, Shape{2}, shape_in); + auto constant_shape = make_shared(element::Type_t::i64, Shape{2}, shape_in); vector axes_in{0}; - auto constant_axes = make_shared(element::i64, Shape{1}, axes_in); + auto constant_axes = make_shared(element::Type_t::i64, Shape{1}, axes_in); auto broadcast_v1 = make_shared(constant_in, constant_shape, constant_axes); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); @@ -242,9 +242,10 @@ TEST(constant_folding, constant_broadcast_v1) TEST(constant_folding, constant_broadcast_v1_with_target_shape) { vector values_in{1}; - auto constant_in = make_shared(element::i32, Shape{1, 1, 1, 1}, values_in); + auto constant_in = + make_shared(element::Type_t::i32, Shape{1, 1, 1, 1}, values_in); vector shape_in{1, 3, 1, 1}; - auto target_shape = make_shared(element::i64, Shape{4}, shape_in); + auto target_shape = make_shared(element::Type_t::i64, Shape{4}, shape_in); auto broadcast_v1 = make_shared(constant_in, target_shape); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); @@ -269,9 +270,9 @@ TEST(constant_folding, constant_broadcast_v1_with_target_shape) TEST(constant_folding, constant_broadcast_v1_numpy) { vector values_in{0, 1}; - auto constant_in = make_shared(element::i32, Shape{2}, values_in); + auto constant_in = make_shared(element::Type_t::i32, Shape{2}, values_in); vector shape_in{4, 2}; - auto constant_shape = make_shared(element::i64, Shape{2}, shape_in); + auto constant_shape = make_shared(element::Type_t::i64, Shape{2}, shape_in); auto broadcast_v1 = make_shared(constant_in, constant_shape); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); @@ -304,15 +305,15 @@ TEST(constant_folding, constant_unary_binary) vector values_g{1, 4}; vector values_h{0, 0, 1, 1}; vector values_i{0, 1}; - auto a = make_shared(element::i32, Shape{2, 2}, values_a); - auto b = make_shared(element::i32, Shape{2, 2}, values_b); - auto c = make_shared(element::i32, Shape{2, 2}, values_c); - auto d = make_shared(element::i32, Shape{2, 2}, values_d); - auto e = make_shared(element::i32, Shape{2}, values_e); - auto f = make_shared(element::i32, Shape{2}, values_f); - auto g = make_shared(element::i32, Shape{2}, values_g); - auto h = make_shared(element::boolean, Shape{2, 2}, values_h); - auto i = make_shared(element::boolean, Shape{2}, values_i); + auto a = make_shared(element::Type_t::i32, Shape{2, 2}, values_a); + auto b = make_shared(element::Type_t::i32, Shape{2, 2}, values_b); + auto c = make_shared(element::Type_t::i32, Shape{2, 2}, values_c); + auto d = make_shared(element::Type_t::i32, Shape{2, 2}, values_d); + auto e = make_shared(element::Type_t::i32, Shape{2}, values_e); + auto f = make_shared(element::Type_t::i32, Shape{2}, values_f); + auto g = make_shared(element::Type_t::i32, Shape{2}, values_g); + auto h = make_shared(element::Type_t::boolean, Shape{2, 2}, values_h); + auto i = make_shared(element::Type_t::boolean, Shape{2}, values_i); auto add = a + b; auto sub = a - b; @@ -434,8 +435,8 @@ TEST(constant_folding, const_convert) Shape input_shape{3, 4}; vector values_in{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7}; - auto constant = op::Constant::create(element::f32, input_shape, values_in); - auto convert = make_shared(constant, element::u64); + auto constant = op::Constant::create(element::Type_t::f32, input_shape, values_in); + auto convert = make_shared(constant, element::Type_t::u64); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -450,7 +451,7 @@ TEST(constant_folding, const_convert) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::u64); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::u64); auto values_out = new_const->get_vector(); vector values_expected{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7}; @@ -461,7 +462,7 @@ TEST(constant_folding, shape_of_v0) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -477,7 +478,7 @@ TEST(constant_folding, shape_of_v0) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::i64); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::i64); auto values_out = new_const->get_vector(); ASSERT_EQ((vector{3, 4, 0, 22, 608, 909, 3}), values_out); @@ -487,7 +488,7 @@ TEST(constant_folding, shape_of_v3) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -503,7 +504,7 @@ TEST(constant_folding, shape_of_v3) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::i64); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::i64); auto values_out = new_const->get_vector(); ASSERT_EQ((vector{3, 4, 0, 22, 608, 909, 3}), values_out); @@ -513,8 +514,8 @@ TEST(constant_folding, shape_of_i32_v3) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); - auto shape_of = make_shared(param, element::i32); + auto param = make_shared(element::Type_t::boolean, input_shape); + auto shape_of = make_shared(param, element::Type_t::i32); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -529,7 +530,7 @@ TEST(constant_folding, shape_of_i32_v3) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::i32); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::i32); auto values_out = new_const->get_vector(); ASSERT_EQ((vector{3, 4, 0, 22, 608, 909, 3}), values_out); @@ -539,7 +540,7 @@ TEST(constant_folding, shape_of_dynamic_v0) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -564,7 +565,7 @@ TEST(constant_folding, shape_of_dynamic_v3) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -583,15 +584,15 @@ TEST(constant_folding, shape_of_dynamic_v3) ASSERT_TRUE(result_as_concat); ASSERT_EQ(result_as_concat->get_friendly_name(), "test"); ASSERT_EQ(result_as_concat->get_output_shape(0), Shape{7}); - ASSERT_EQ(result_as_concat->get_output_element_type(0), element::i64); + ASSERT_EQ(result_as_concat->get_output_element_type(0), element::Type_t::i64); } TEST(constant_folding, shape_of_dynamic_i32_v3) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); - auto shape_of = make_shared(param, element::i32); + auto param = make_shared(element::Type_t::boolean, input_shape); + auto shape_of = make_shared(param, element::Type_t::i32); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -609,7 +610,7 @@ TEST(constant_folding, shape_of_dynamic_i32_v3) ASSERT_TRUE(result_as_concat); ASSERT_EQ(result_as_concat->get_friendly_name(), "test"); ASSERT_EQ(result_as_concat->get_output_shape(0), Shape{7}); - ASSERT_EQ(result_as_concat->get_output_element_type(0), element::i32); + ASSERT_EQ(result_as_concat->get_output_element_type(0), element::Type_t::i32); } // We need to be sure that constant folding won't be calculated endlessly. @@ -617,7 +618,7 @@ TEST(constant_folding, shape_of_dynamic_double_folding_v0) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -643,7 +644,7 @@ TEST(constant_folding, shape_of_dynamic_double_folding_v3) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -671,7 +672,7 @@ TEST(constant_folding, shape_of_rank_dynamic_v0) { PartialShape input_shape{PartialShape::dynamic()}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -692,7 +693,7 @@ TEST(constant_folding, shape_of_rank_dynamic_v3) { PartialShape input_shape{PartialShape::dynamic()}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -714,7 +715,7 @@ void const_reverse(const element::Type& axes_elem_type) Shape input_shape{3, 3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); auto axes = op::Constant::create(axes_elem_type, {1}, {1}); auto convert = make_shared(constant, axes, op::v1::Reverse::Mode::INDEX); convert->set_friendly_name("test"); @@ -739,14 +740,14 @@ void const_reverse(const element::Type& axes_elem_type) TEST(constant_folding, const_reverse) { - for (auto&& axes_elem_type : {element::i8, - element::u8, - element::i16, - element::u16, - element::i32, - element::u32, - element::i64, - element::u64}) + for (auto&& axes_elem_type : {element::Type_t::i8, + element::Type_t::u8, + element::Type_t::i16, + element::Type_t::u16, + element::Type_t::i32, + element::Type_t::u32, + element::Type_t::i64, + element::Type_t::u64}) { const_reverse(axes_elem_type); } @@ -758,10 +759,10 @@ TEST(constant_folding, const_reduceprod) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -792,10 +793,10 @@ TEST(constant_folding, const_reduceprod_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -826,10 +827,10 @@ TEST(constant_folding, const_reducesum) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -860,10 +861,10 @@ TEST(constant_folding, const_reducesum_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -894,10 +895,10 @@ TEST(constant_folding, const_reducemax) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -928,10 +929,10 @@ TEST(constant_folding, const_reducemax_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -962,10 +963,10 @@ TEST(constant_folding, const_reducemin) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -996,10 +997,10 @@ TEST(constant_folding, const_reducemin_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1030,10 +1031,10 @@ TEST(constant_folding, const_reducemean) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1064,10 +1065,10 @@ TEST(constant_folding, const_reducemean_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1097,8 +1098,8 @@ TEST(constant_folding, const_reduce_logical_and__no_keepdims) const Shape input_shape{3, 3}; const vector values_in{0, 1, 1, 0, 1, 0, 1, 1, 1}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {1}, {1}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {1}, {1}); const auto convert = make_shared(data, axes, false); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1130,8 +1131,8 @@ TEST(constant_folding, const_reduce_logical_and__keepdims) const Shape input_shape{3, 3}; const vector values_in{0, 1, 1, 0, 1, 0, 1, 1, 1}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {1}, {1}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {1}, {1}); const auto convert = make_shared(data, axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1165,8 +1166,8 @@ TEST(constant_folding, const_reduce_logical_and__keepdims_3d) const Shape input_shape{2, 2, 2}; const vector values_in{1, 1, 0, 0, 1, 0, 0, 1}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {2}, {0, 2}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {2}, {0, 2}); const auto convert = make_shared(data, axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1198,8 +1199,8 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) const Shape input_shape{3, 3}; const vector values_in{1, 0, 0, 1, 0, 1, 0, 0, 0}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {1}, {1}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {1}, {1}); const auto convert = make_shared(data, axes, false); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1229,8 +1230,8 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) TEST(constant_folding, const_concat) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); - auto constant1 = op::Constant::create(element::i32, Shape{2, 1}, vector{7, 8}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + auto constant1 = op::Constant::create(element::Type_t::i32, Shape{2, 1}, vector{7, 8}); auto concat = make_shared(NodeVector{constant0, constant1}, 1); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1255,8 +1256,10 @@ TEST(constant_folding, const_concat) TEST(constant_folding, const_concat_3d_single_elem) { - auto constant_1 = op::Constant::create(element::i32, Shape{1, 1, 1}, vector{1}); - auto constant_2 = op::Constant::create(element::i32, Shape{1, 1, 1}, vector{2}); + auto constant_1 = + op::Constant::create(element::Type_t::i32, Shape{1, 1, 1}, vector{1}); + auto constant_2 = + op::Constant::create(element::Type_t::i32, Shape{1, 1, 1}, vector{2}); auto concat = make_shared(NodeVector{constant_1, constant_2}, 0); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1282,10 +1285,12 @@ TEST(constant_folding, const_concat_3d_single_elem) TEST(constant_folding, const_concat_axis_2) { - auto constant_1 = - op::Constant::create(element::i32, Shape{3, 1, 2}, vector{1, 2, 3, 4, 5, 6}); - auto constant_2 = op::Constant::create( - element::i32, Shape{3, 1, 4}, vector{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); + auto constant_1 = op::Constant::create( + element::Type_t::i32, Shape{3, 1, 2}, vector{1, 2, 3, 4, 5, 6}); + auto constant_2 = + op::Constant::create(element::Type_t::i32, + Shape{3, 1, 4}, + vector{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); auto concat = make_shared(NodeVector{constant_1, constant_2}, 2); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1312,11 +1317,12 @@ TEST(constant_folding, const_concat_axis_2) TEST(constant_folding, const_concat_axis_1_bool_type) { auto constant_1 = - op::Constant::create(element::boolean, Shape{1, 1, 2}, vector{true, true}); + op::Constant::create(element::Type_t::boolean, Shape{1, 1, 2}, vector{true, true}); auto constant_2 = op::Constant::create( - element::boolean, Shape{1, 2, 2}, vector{true, false, true, false}); - auto constant_3 = op::Constant::create( - element::boolean, Shape{1, 3, 2}, vector{true, false, true, false, true, false}); + element::Type_t::boolean, Shape{1, 2, 2}, vector{true, false, true, false}); + auto constant_3 = op::Constant::create(element::Type_t::boolean, + Shape{1, 3, 2}, + vector{true, false, true, false, true, false}); auto concat = make_shared(NodeVector{constant_1, constant_2, constant_3}, 1); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1344,7 +1350,7 @@ TEST(constant_folding, const_concat_axis_1_bool_type) TEST(constant_folding, const_logical_not) { auto constant = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 0, 0, 1, 1}); + op::Constant::create(element::Type_t::boolean, Shape{2, 3}, vector{0, 1, 0, 0, 1, 1}); auto logical_not = make_shared(constant); logical_not->set_friendly_name("test"); auto f = make_shared(logical_not, ParameterVector{}); @@ -1370,9 +1376,9 @@ TEST(constant_folding, const_logical_not) TEST(constant_folding, const_equal) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1398,9 +1404,9 @@ TEST(constant_folding, const_equal) TEST(constant_folding, const_not_equal) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1426,9 +1432,9 @@ TEST(constant_folding, const_not_equal) TEST(constant_folding, const_greater) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1454,9 +1460,9 @@ TEST(constant_folding, const_greater) TEST(constant_folding, const_greater_eq) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1482,9 +1488,9 @@ TEST(constant_folding, const_greater_eq) TEST(constant_folding, const_less) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1510,9 +1516,9 @@ TEST(constant_folding, const_less) TEST(constant_folding, const_less_eq) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1537,10 +1543,10 @@ TEST(constant_folding, const_less_eq) TEST(constant_folding, const_or) { - auto constant0 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); - auto constant1 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); + auto constant0 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); + auto constant1 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1565,10 +1571,10 @@ TEST(constant_folding, const_or) TEST(constant_folding, const_xor) { - auto constant0 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); - auto constant1 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); + auto constant0 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); + auto constant1 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1594,7 +1600,7 @@ TEST(constant_folding, const_xor) TEST(constant_folding, const_ceiling) { auto constant = op::Constant::create( - element::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); + element::Type_t::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); auto ceil = make_shared(constant); ceil->set_friendly_name("test"); auto f = make_shared(ceil, ParameterVector{}); @@ -1620,7 +1626,7 @@ TEST(constant_folding, const_ceiling) TEST(constant_folding, const_floor) { auto constant = op::Constant::create( - element::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); + element::Type_t::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); auto floor = make_shared(constant); floor->set_friendly_name("test"); auto f = make_shared(floor, ParameterVector{}); @@ -1646,12 +1652,12 @@ TEST(constant_folding, const_floor) TEST(constant_folding, const_gather_v1) { auto constant_data = op::Constant::create( - element::f32, + element::Type_t::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); auto constant_indices = - op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); - auto constant_axis = op::Constant::create(element::i64, Shape{1}, vector{1}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 3, 2, 2}); + auto constant_axis = op::Constant::create(element::Type_t::i64, Shape{1}, vector{1}); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); @@ -1677,12 +1683,12 @@ TEST(constant_folding, const_gather_v1) TEST(constant_folding, const_gather_v1_scalar) { auto constant_data = op::Constant::create( - element::f32, + element::Type_t::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); auto constant_indices = - op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); - auto constant_axis = op::Constant::create(element::i64, Shape{}, vector{1}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 3, 2, 2}); + auto constant_axis = op::Constant::create(element::Type_t::i64, Shape{}, vector{1}); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); @@ -1707,17 +1713,18 @@ TEST(constant_folding, const_gather_v1_scalar) TEST(constant_folding, const_gather_v1_subgraph) { - const auto A = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); const float b_value = 3.21f; - const auto B_const = op::Constant::create(element::f32, {1}, {b_value}); - const auto C = make_shared(element::f32, Shape{1}); + const auto B_const = op::Constant::create(element::Type_t::f32, {1}, {b_value}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B_const, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, C}); @@ -1741,17 +1748,18 @@ TEST(constant_folding, const_gather_v1_subgraph) TEST(constant_folding, const_gather_v1_subgraph_neg_axis) { - const auto A = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); const float b_value = 1.23f; - const auto B = make_shared(element::f32, Shape{1}); - const auto C_const = op::Constant::create(element::f32, {1}, {b_value}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C_const = op::Constant::create(element::Type_t::f32, {1}, {b_value}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C_const}, axis); const vector indices{-1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B}); @@ -1775,16 +1783,17 @@ TEST(constant_folding, const_gather_v1_subgraph_neg_axis) TEST(constant_folding, const_gather_v1_subgraph_no_constant_input) { - const auto A = make_shared(element::f32, Shape{1}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1799,16 +1808,16 @@ TEST(constant_folding, const_gather_v1_subgraph_no_constant_input) TEST(constant_folding, const_gather_v1_subgraph_no_constant_input_scalar) { - const auto A = make_shared(element::f32, Shape{1}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {}, indices); + const auto indices_const = op::Constant::create(element::Type_t::i64, {}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1823,16 +1832,17 @@ TEST(constant_folding, const_gather_v1_subgraph_no_constant_input_scalar) TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_zero_axis) { - const auto A = make_shared(element::f32, Shape{2, 2}); - const auto B = make_shared(element::f32, Shape{2, 2}); - const auto C = make_shared(element::f32, Shape{2, 2}); + const auto A = make_shared(element::Type_t::f32, Shape{2, 2}); + const auto B = make_shared(element::Type_t::f32, Shape{2, 2}); + const auto C = make_shared(element::Type_t::f32, Shape{2, 2}); const int64_t axis = 1; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1846,16 +1856,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_zero_axis) TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_single_indices) { - const auto A = make_shared(element::f32, Shape{1}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{0, 1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1869,16 +1880,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_single_indices) TEST(constant_folding, const_gather_v1_subgraph_skip_if_concat_output_shape_dynamic) { - const auto A = make_shared(element::f32, PartialShape::dynamic()); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1892,16 +1904,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_concat_output_shape_dyna TEST(constant_folding, const_gather_v1_subgraph_skip_if_not_single_input) { - const auto A = make_shared(element::f32, Shape{2}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{2}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1918,10 +1931,10 @@ TEST(constant_folding, const_strided_slice) Shape shape_in{16}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - auto constant = make_shared(element::i32, shape_in, values_in); - auto begin = op::Constant::create(element::i64, {1}, {2}); - auto end = op::Constant::create(element::i64, {1}, {15}); - auto stride = op::Constant::create(element::i64, {1}, {3}); + auto constant = make_shared(element::Type_t::i32, shape_in, values_in); + auto begin = op::Constant::create(element::Type_t::i64, {1}, {2}); + auto end = op::Constant::create(element::Type_t::i64, {1}, {15}); + auto stride = op::Constant::create(element::Type_t::i64, {1}, {3}); auto slice = make_shared( constant, begin, end, stride, std::vector{0}, std::vector{0}); slice->set_friendly_name("test"); @@ -1953,8 +1966,9 @@ TEST(constant_folding, constant_dyn_reshape) Shape shape_shape{3}; vector values_shape{2, 4, 1}; - auto constant_in = make_shared(element::f32, shape_in, values_in); - auto constant_shape = make_shared(element::i64, shape_shape, values_shape); + auto constant_in = make_shared(element::Type_t::f32, shape_in, values_in); + auto constant_shape = + make_shared(element::Type_t::i64, shape_shape, values_shape); auto dyn_reshape = make_shared(constant_in, constant_shape, false); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); @@ -1988,9 +2002,11 @@ TEST(constant_folding, constant_dyn_reshape_shape_not_originally_constant) vector values_shape_a{1, 3, 0}; vector values_shape_b{1, 1, 1}; - auto constant_in = make_shared(element::f32, shape_in, values_in); - auto constant_shape_a = make_shared(element::i64, shape_shape, values_shape_a); - auto constant_shape_b = make_shared(element::i64, shape_shape, values_shape_b); + auto constant_in = make_shared(element::Type_t::f32, shape_in, values_in); + auto constant_shape_a = + make_shared(element::Type_t::i64, shape_shape, values_shape_a); + auto constant_shape_b = + make_shared(element::Type_t::i64, shape_shape, values_shape_b); auto dyn_reshape = make_shared( constant_in, std::make_shared(constant_shape_a, constant_shape_b), false); dyn_reshape->set_friendly_name("test"); @@ -2022,8 +2038,8 @@ TEST(constant_folding, constant_transpose) Shape shape_perm{2}; vector values_perm{1, 0}; - auto constant_in = make_shared(element::f64, shape_in, values_in); - auto constant_perm = make_shared(element::i64, shape_perm, values_perm); + auto constant_in = make_shared(element::Type_t::f64, shape_in, values_in); + auto constant_perm = make_shared(element::Type_t::i64, shape_perm, values_perm); auto transpose = make_shared(constant_in, constant_perm); transpose->set_friendly_name("test"); auto f = make_shared(transpose, ParameterVector{}); @@ -2097,9 +2113,9 @@ TEST(constant_folding, constant_v1_select) vector values_f{11, 12, 13, 14, 15, 16, 17, 18}; auto constant_selection = - make_shared(element::boolean, Shape{4}, values_selection); - auto constant_t = make_shared(element::i64, Shape{4}, values_t); - auto constant_f = make_shared(element::i64, Shape{2, 4}, values_f); + make_shared(element::Type_t::boolean, Shape{4}, values_selection); + auto constant_t = make_shared(element::Type_t::i64, Shape{4}, values_t); + auto constant_f = make_shared(element::Type_t::i64, Shape{2, 4}, values_f); auto select = make_shared(constant_selection, constant_t, constant_f); select->set_friendly_name("test"); auto f = make_shared(select, ParameterVector{}); @@ -2124,8 +2140,8 @@ TEST(constant_folding, constant_v1_select) TEST(constant_folding, constant_v1_split) { vector data{.1f, .2f, .3f, .4f, .5f, .6f}; - const auto const_data = op::Constant::create(element::f32, Shape{data.size()}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto const_data = op::Constant::create(element::Type_t::f32, Shape{data.size()}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto num_splits = 3; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2159,8 +2175,8 @@ TEST(constant_folding, constant_v1_split) TEST(constant_folding, constant_v1_split_specialized) { vector data{.1f, .2f, .3f, .4f, .5f, .6f}; - const auto const_data = op::Constant::create(element::f32, Shape{data.size()}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto const_data = op::Constant::create(element::Type_t::f32, Shape{data.size()}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto num_splits = 3; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2201,8 +2217,8 @@ TEST(constant_folding, constant_v1_split_axis_1_4_splits) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto num_splits = 4; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2257,8 +2273,8 @@ TEST(constant_folding, constant_v1_split_axis_1_2_splits) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto num_splits = 2; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2298,11 +2314,11 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_2_splits) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i16, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i16, Shape{}, {1}); vector values_lengths{3, 1}; - auto constant_lengths = - make_shared(element::i64, Shape{values_lengths.size()}, values_lengths); + auto constant_lengths = make_shared( + element::Type_t::i64, Shape{values_lengths.size()}, values_lengths); auto variadic_split_v1 = make_shared(const_data, const_axis, constant_lengths); @@ -2342,11 +2358,11 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_3_splits_neg_length) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i32, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i32, Shape{}, {1}); vector values_lengths{1, 1, -1}; - auto constant_lengths = - make_shared(element::i64, Shape{values_lengths.size()}, values_lengths); + auto constant_lengths = make_shared( + element::Type_t::i64, Shape{values_lengths.size()}, values_lengths); auto variadic_split_v1 = make_shared(const_data, const_axis, constant_lengths); @@ -2387,10 +2403,10 @@ TEST(constant_folding, constant_v1_one_hot) const float on_value = 1.123f; const float off_value = 0.321f; - const auto indices_const = op::Constant::create(element::i64, Shape{3}, indices); - const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); - const auto on_const = op::Constant::create(element::f32, Shape{}, {on_value}); - const auto off_const = op::Constant::create(element::f32, Shape{}, {off_value}); + const auto indices_const = op::Constant::create(element::Type_t::i64, Shape{3}, indices); + const auto depth_const = op::Constant::create(element::Type_t::i64, Shape{}, {3}); + const auto on_const = op::Constant::create(element::Type_t::f32, Shape{}, {on_value}); + const auto off_const = op::Constant::create(element::Type_t::f32, Shape{}, {off_value}); int64_t axis = 1; auto one_hot_v1 = @@ -2427,10 +2443,10 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes) const int32_t on_value = 4; const int32_t off_value = 1; - const auto indices_const = op::Constant::create(element::i64, Shape{4}, indices); - const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); - const auto on_const = op::Constant::create(element::i32, Shape{}, {on_value}); - const auto off_const = op::Constant::create(element::i32, Shape{}, {off_value}); + const auto indices_const = op::Constant::create(element::Type_t::i64, Shape{4}, indices); + const auto depth_const = op::Constant::create(element::Type_t::i64, Shape{}, {3}); + const auto on_const = op::Constant::create(element::Type_t::i32, Shape{}, {on_value}); + const auto off_const = op::Constant::create(element::Type_t::i32, Shape{}, {off_value}); int64_t axis = -1; auto one_hot_v1 = @@ -2470,10 +2486,10 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes_2) auto on_value = true; auto off_value = false; - const auto indices_const = op::Constant::create(element::i64, Shape{2, 2}, indices); - const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); - const auto on_const = op::Constant::create(element::boolean, Shape{}, {on_value}); - const auto off_const = op::Constant::create(element::boolean, Shape{}, {off_value}); + const auto indices_const = op::Constant::create(element::Type_t::i64, Shape{2, 2}, indices); + const auto depth_const = op::Constant::create(element::Type_t::i64, Shape{}, {3}); + const auto on_const = op::Constant::create(element::Type_t::boolean, Shape{}, {on_value}); + const auto off_const = op::Constant::create(element::Type_t::boolean, Shape{}, {off_value}); int64_t axis = -1; auto one_hot_v1 = @@ -2516,9 +2532,9 @@ TEST(constant_folding, constant_tile_1d) Shape shape_out{4}; vector values_in{0, 1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{2}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2547,9 +2563,9 @@ TEST(constant_folding, constant_tile_3d_small_data_rank) Shape shape_out{2, 2, 4}; vector values_in{0, 1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{2, 2, 2}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2578,9 +2594,9 @@ TEST(constant_folding, constant_tile_3d_few_repeats) Shape shape_out{2, 2, 3}; vector values_in{1, 2, 3, 4, 5, 6}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{2, 1}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2609,9 +2625,9 @@ TEST(constant_folding, constant_tile_1d_0_repeats) Shape shape_out{}; vector values_in{0, 1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{0}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2640,9 +2656,9 @@ TEST(constant_folding, constant_tile_0_rank_data) Shape shape_out{4}; vector values_in{1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{4}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2666,7 +2682,7 @@ TEST(constant_folding, constant_tile_0_rank_data) TEST(constant_folding, constant_non_zero_0D) { - auto data = op::Constant::create(element::i32, Shape{}, {1}); + auto data = op::Constant::create(element::Type_t::i32, Shape{}, {1}); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2694,7 +2710,7 @@ TEST(constant_folding, constant_non_zero_0D) TEST(constant_folding, constant_non_zero_1D) { vector values_in{0, 1, 0, 1}; - auto data = make_shared(element::i32, Shape{4}, values_in); + auto data = make_shared(element::Type_t::i32, Shape{4}, values_in); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2720,8 +2736,8 @@ TEST(constant_folding, constant_non_zero_1D) TEST(constant_folding, constant_non_zero_int32_output_type) { vector values_in{0, 1, 0, 1}; - auto data = make_shared(element::i32, Shape{4}, values_in); - auto non_zero = make_shared(data, element::i32); + auto data = make_shared(element::Type_t::i32, Shape{4}, values_in); + auto non_zero = make_shared(data, element::Type_t::i32); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2736,7 +2752,7 @@ TEST(constant_folding, constant_non_zero_int32_output_type) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(element::i32, new_const->get_element_type()); + ASSERT_EQ(element::Type_t::i32, new_const->get_element_type()); const auto values_out = new_const->get_vector(); const vector values_expected{1, 3}; @@ -2747,7 +2763,8 @@ TEST(constant_folding, constant_non_zero_int32_output_type) TEST(constant_folding, constant_non_zero_1D_all_indices) { const vector values_in{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; - const auto data = make_shared(element::f32, Shape{values_in.size()}, values_in); + const auto data = + make_shared(element::Type_t::f32, Shape{values_in.size()}, values_in); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2773,7 +2790,7 @@ TEST(constant_folding, constant_non_zero_1D_all_indices) TEST(constant_folding, constant_non_zero_2D) { vector values_in{1, 0, 0, 0, 1, 0, 1, 1, 0}; - auto data = make_shared(element::i32, Shape{3, 3}, values_in); + auto data = make_shared(element::Type_t::i32, Shape{3, 3}, values_in); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2799,7 +2816,7 @@ TEST(constant_folding, constant_non_zero_2D) TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) { const vector values_in{1, 1, 1, 1, 1, 1, 1, 1, 1}; - const auto data = make_shared(element::i8, Shape{3, 3}, values_in); + const auto data = make_shared(element::Type_t::i8, Shape{3, 3}, values_in); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2825,7 +2842,7 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) TEST(constant_folding, DISABLED_constant_non_zero_2D_all_zeros) { const vector values_in{0, 0, 0, 0, 0, 0}; - const auto data = make_shared(element::u8, Shape{2, 3}, values_in); + const auto data = make_shared(element::Type_t::u8, Shape{2, 3}, values_in); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2848,7 +2865,7 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_zeros) TEST(constant_folding, constant_non_zero_3D) { vector values_in{1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0}; - auto data = make_shared(element::i32, Shape{2, 3, 3}, values_in); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 3}, values_in); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2878,12 +2895,12 @@ TEST(constant_folding, constant_scatter_elements_update_basic) const Shape indices_shape{2, 3}; const auto data_const = op::Constant::create( - element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + element::Type_t::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); const auto indices_const = - op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); - const auto updates_const = - op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {0}); + op::Constant::create(element::Type_t::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + const auto updates_const = op::Constant::create( + element::Type_t::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -2912,12 +2929,12 @@ TEST(constant_folding, constant_scatter_elements_update_negative_axis) const Shape indices_shape{2, 3}; const auto data_const = op::Constant::create( - element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + element::Type_t::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); const auto indices_const = - op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); - const auto updates_const = - op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {-1}); + op::Constant::create(element::Type_t::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + const auto updates_const = op::Constant::create( + element::Type_t::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {-1}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -2944,12 +2961,12 @@ TEST(constant_folding, constant_scatter_elements_update_1d_axis) const Shape indices_shape{2, 3}; const auto data_const = op::Constant::create( - element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + element::Type_t::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); const auto indices_const = - op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); - const auto updates_const = - op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); - const auto axis_const = op::Constant::create(element::i64, Shape{1}, {0}); + op::Constant::create(element::Type_t::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + const auto updates_const = op::Constant::create( + element::Type_t::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{1}, {0}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -2976,12 +2993,12 @@ TEST(constant_folding, constant_scatter_elements_update_3d_i16) const Shape indices_shape{2, 2, 3}; const auto data_const = op::Constant::create( - element::i16, data_shape, std::vector(shape_size(data_shape), 0)); - const auto indices_const = - op::Constant::create(element::i16, indices_shape, {1, 0, 2, 0, 2, 1, 2, 2, 2, 0, 1, 0}); - const auto updates_const = - op::Constant::create(element::i16, indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {1}); + element::Type_t::i16, data_shape, std::vector(shape_size(data_shape), 0)); + const auto indices_const = op::Constant::create( + element::Type_t::i16, indices_shape, {1, 0, 2, 0, 2, 1, 2, 2, 2, 0, 1, 0}); + const auto updates_const = op::Constant::create( + element::Type_t::i16, indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -3009,10 +3026,10 @@ TEST(constant_folding, constant_scatter_elements_update_one_elem) const Shape indices_shape{1, 1, 1}; const auto input_data = std::vector(shape_size(data_shape), 0); - const auto data_const = op::Constant::create(element::i32, data_shape, input_data); - const auto indices_const = op::Constant::create(element::i32, indices_shape, {1}); - const auto updates_const = op::Constant::create(element::i32, indices_shape, {2}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {0}); + const auto data_const = op::Constant::create(element::Type_t::i32, data_shape, input_data); + const auto indices_const = op::Constant::create(element::Type_t::i32, indices_shape, {1}); + const auto updates_const = op::Constant::create(element::Type_t::i32, indices_shape, {2}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -3041,8 +3058,9 @@ void test_constant_folding_reshape_v1(Shape& shape_in, vector values_shape, bool zero_flag = false) { - auto constant_in = make_shared(element::f32, shape_in, values_in); - auto constant_shape = make_shared(element::i64, shape_shape, values_shape); + auto constant_in = make_shared(element::Type_t::f32, shape_in, values_in); + auto constant_shape = + make_shared(element::Type_t::i64, shape_shape, values_shape); auto dyn_reshape = make_shared(constant_in, constant_shape, zero_flag); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); @@ -3094,8 +3112,8 @@ TEST(constant_folding, constant_dyn_reshape_v1_pattern_with_zero_dims) TEST(constant_folding, disable_constant_folding) { - auto input = make_shared(element::f32, Shape{1, 3}); - auto constant_shape = op::Constant::create(element::i64, Shape{1}, {3}); + auto input = make_shared(element::Type_t::f32, Shape{1, 3}); + auto constant_shape = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); auto dyn_reshape = make_shared(input, constant_shape, true); auto& rt_info = dyn_reshape->get_rt_info(); rt_info["DISABLED_CONSTANT_FOLDING"]; diff --git a/ngraph/test/control_dependencies.cpp b/ngraph/test/control_dependencies.cpp index 370df36e5db102..7d6e66da874615 100644 --- a/ngraph/test/control_dependencies.cpp +++ b/ngraph/test/control_dependencies.cpp @@ -80,8 +80,8 @@ constexpr NodeTypeInfo ControlDependencyOp::type_info; TEST(control_dependencies, cdep_ops) { - auto A = make_shared(element::f32, Shape{}); - auto B = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); auto cdop = make_shared(OutputVector{A}, std::set>{absn}); @@ -92,10 +92,10 @@ TEST(control_dependencies, cdep_ops) TEST(control_dependencies, two_cdep_ops) { - auto A = make_shared(element::f32, Shape{}); - auto B = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); - auto C = make_shared(element::f32, Shape{}); + auto C = make_shared(element::Type_t::f32, Shape{}); auto absn_c = make_shared(C); auto cdop = make_shared(OutputVector{A}, std::set>{absn, absn_c}); @@ -106,9 +106,9 @@ TEST(control_dependencies, two_cdep_ops) TEST(control_dependencies, two_cdep_ops_op_on_top) { - auto A = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); - auto B = make_shared(element::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn_b = make_shared(B); auto cdop = make_shared(OutputVector{A}, std::set>{absn, absn_b}); @@ -120,7 +120,7 @@ TEST(control_dependencies, two_cdep_ops_op_on_top) TEST(control_dependencies, clone_function_cdop) { - auto A = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); auto cdop = make_shared(OutputVector{A}, std::set>{absn}); @@ -139,9 +139,9 @@ TEST(control_dependencies, clone_function_cdop) TEST(control_dependencies, clone_function_cdop_abs) { - auto A = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); - auto B = make_shared(element::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn_b = make_shared(B); auto cdop = make_shared(OutputVector{A}, std::set>{absn, absn_b}); @@ -175,8 +175,8 @@ static size_t count_control_dependencies(const shared_ptr& node, TEST(control_dependencies, replace_node) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto MUL_AB = A * B; auto MUL_BA = B * A; auto ADD = A + B; diff --git a/ngraph/test/convert_u1_to_string.cpp b/ngraph/test/convert_u1_to_string.cpp index fd12304831a611..a994a73cbd53d0 100644 --- a/ngraph/test/convert_u1_to_string.cpp +++ b/ngraph/test/convert_u1_to_string.cpp @@ -25,7 +25,7 @@ using namespace std; TEST(convert_u1_to_string, convert_u1_to_string) { vector values{171, 16}; - auto constant = make_shared(element::u1, Shape{12}, &values[0]); + auto constant = make_shared(element::Type_t::u1, Shape{12}, &values[0]); vector ref{"1", "0", "1", "0", "1", "0", "1", "1", "0", "0", "0", "1"}; for (size_t i = 0; i < 12; ++i) diff --git a/ngraph/test/copy.cpp b/ngraph/test/copy.cpp index dfc7bac2674887..f1c97ec4837389 100644 --- a/ngraph/test/copy.cpp +++ b/ngraph/test/copy.cpp @@ -33,8 +33,8 @@ template bool check_unary() { Shape shape{1}; - auto arg0 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0); auto new_node = node->copy_with_new_inputs(new_args); @@ -46,10 +46,10 @@ template bool check_binary() { Shape shape{1}; - auto arg0 = make_shared(element::f32, shape); - auto arg1 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape), - make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::f32, shape); + auto arg1 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape), + make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0, arg1); auto new_node = node->copy_with_new_inputs(new_args); @@ -87,15 +87,16 @@ TEST(copy, broadcast) Shape shape{1, 3}; Shape new_shape{4, 1, 3}; AxisSet axes{1, 2}; - auto arg0 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape), - op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape), - op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector())}; + auto arg0 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{ + make_shared(element::Type_t::f32, shape), + op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape), + op::Constant::create(element::Type_t::i64, Shape{axes.size()}, axes.to_vector())}; auto node = make_shared( arg0, - op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape), - op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector())); + op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape), + op::Constant::create(element::Type_t::i64, Shape{axes.size()}, axes.to_vector())); auto new_node = node->copy_with_new_inputs(new_args); auto node_cast = as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); @@ -117,10 +118,10 @@ TEST(copy, ceiling) TEST(copy, concat) { Shape shape{1}; - auto arg0 = make_shared(element::f32, shape); - auto arg1 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape), - make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::f32, shape); + auto arg1 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape), + make_shared(element::Type_t::f32, shape)}; size_t axis = 0; auto node = make_shared(NodeVector{arg0, arg1}, axis); auto new_node = node->clone_with_new_inputs(new_args); @@ -136,7 +137,7 @@ TEST(copy, constant) { Shape shape{}; vector c{2.4f}; - auto& et = element::f32; + element::Type et = element::Type_t::f32; auto node = op::Constant::create(et, shape, c); auto new_node = node->clone_with_new_inputs(OutputVector{}); auto node_cast = as_type_ptr(new_node); @@ -151,9 +152,9 @@ TEST(copy, constant) TEST(copy, convert) { Shape shape; - auto& et = element::f64; - auto arg0 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape)}; + element::Type et = element::Type_t::f64; + auto arg0 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0, et); auto new_node = node->clone_with_new_inputs(new_args); @@ -248,7 +249,7 @@ TEST(copy, not_equal) TEST(copy, parameter) { Shape shape{1}; - auto node = make_shared(element::f32, shape); + auto node = make_shared(element::Type_t::f32, shape); auto new_node = node->clone_with_new_inputs({}); auto node_cast = as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); @@ -267,12 +268,13 @@ TEST(copy, reduce_sum) { Shape shape{4, 3}; AxisSet axes{1}; - auto arg0 = make_shared(element::f32, shape); + auto arg0 = make_shared(element::Type_t::f32, shape); - auto axes_node = op::Constant::create(element::i64, {axes.size()}, axes.to_vector()); + auto axes_node = op::Constant::create(element::Type_t::i64, {axes.size()}, axes.to_vector()); auto node = make_shared(arg0, axes_node, true); - OutputVector new_args{make_shared(element::f32, shape), - op::Constant::create(element::i64, {axes.size()}, axes.to_vector())}; + OutputVector new_args{ + make_shared(element::Type_t::f32, shape), + op::Constant::create(element::Type_t::i64, {axes.size()}, axes.to_vector())}; auto new_node = node->clone_with_new_inputs(new_args); auto node_cast = as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); @@ -288,11 +290,12 @@ TEST(copy, reshape) Shape shape_in{2, 3, 4}; Shape shape_out{6, 4}; - auto arg0 = make_shared(element::f32, shape_in); - OutputVector new_args{make_shared(element::f32, shape_in), - op::Constant::create(element::u64, {shape_out.size()}, shape_out)}; + auto arg0 = make_shared(element::Type_t::f32, shape_in); + OutputVector new_args{ + make_shared(element::Type_t::f32, shape_in), + op::Constant::create(element::Type_t::u64, {shape_out.size()}, shape_out)}; - auto shape_pattern = op::Constant::create(element::u64, {shape_out.size()}, shape_out); + auto shape_pattern = op::Constant::create(element::Type_t::u64, {shape_out.size()}, shape_out); auto node = make_shared(arg0, shape_pattern, false); auto new_node = node->clone_with_new_inputs(new_args); auto node_cast = as_type_ptr(new_node); @@ -306,12 +309,12 @@ TEST(copy, reshape) TEST(copy, select) { Shape shape{1}; - auto arg0 = make_shared(element::boolean, shape); - auto arg1 = make_shared(element::f32, shape); - auto arg2 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::boolean, shape), - make_shared(element::f32, shape), - make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::boolean, shape); + auto arg1 = make_shared(element::Type_t::f32, shape); + auto arg2 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::boolean, shape), + make_shared(element::Type_t::f32, shape), + make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0, arg1, arg2); auto new_node = node->clone_with_new_inputs(new_args); @@ -344,15 +347,15 @@ TEST(copy, strided_slice) Coordinate upper{2, 3, 4}; Strides strides{1, 1, 1}; - auto arg0 = make_shared(element::f32, shape_in); - OutputVector new_args{make_shared(element::f32, shape_in), - op::Constant::create(element::u64, {lower.size()}, lower), - op::Constant::create(element::u64, {upper.size()}, upper), - op::Constant::create(element::i64, {strides.size()}, strides)}; + auto arg0 = make_shared(element::Type_t::f32, shape_in); + OutputVector new_args{make_shared(element::Type_t::f32, shape_in), + op::Constant::create(element::Type_t::u64, {lower.size()}, lower), + op::Constant::create(element::Type_t::u64, {upper.size()}, upper), + op::Constant::create(element::Type_t::i64, {strides.size()}, strides)}; - auto begin_node = op::Constant::create(element::i64, {lower.size()}, lower); - auto end_node = op::Constant::create(element::i64, {upper.size()}, upper); - auto strides_node = op::Constant::create(element::i64, {strides.size()}, strides); + auto begin_node = op::Constant::create(element::Type_t::i64, {lower.size()}, lower); + auto end_node = op::Constant::create(element::Type_t::i64, {upper.size()}, upper); + auto strides_node = op::Constant::create(element::Type_t::i64, {strides.size()}, strides); auto node = make_shared(arg0, begin_node, end_node, @@ -398,23 +401,23 @@ TEST(copy, tanh) TEST(copy, loop) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); - - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::Type_t::i64, Shape{}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); + + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{}, 10); + auto exec_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -437,9 +440,9 @@ TEST(copy, loop) auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); loop->validate_and_infer_types(); // That which we iterate over - auto X_new = make_shared(element::f32, Shape{3, 2, 5}); - auto Y_new = make_shared(element::f32, Shape{3, 2, 5}); - auto M_new = make_shared(element::f32, Shape{3, 2, 5}); + auto X_new = make_shared(element::Type_t::f32, Shape{3, 2, 5}); + auto Y_new = make_shared(element::Type_t::f32, Shape{3, 2, 5}); + auto M_new = make_shared(element::Type_t::f32, Shape{3, 2, 5}); OutputVector new_args = {trip_count, exec_condition, X_new, Y_new, M_new}; auto loop_copy = loop->clone_with_new_inputs(new_args); diff --git a/ngraph/test/dyn_elimination.cpp b/ngraph/test/dyn_elimination.cpp index a3474cabccbeab..dc18dec85b11e2 100644 --- a/ngraph/test/dyn_elimination.cpp +++ b/ngraph/test/dyn_elimination.cpp @@ -30,10 +30,10 @@ using namespace std; TEST(dyn_elimination, transpose) { Shape shape_in{2, 4, 6, 8}; - auto param = make_shared(element::boolean, shape_in); + auto param = make_shared(element::Type_t::boolean, shape_in); auto constant_perm = - make_shared(element::i64, Shape{4}, vector{2, 3, 1, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{2, 3, 1, 0}); auto transpose = make_shared(param, constant_perm); @@ -52,7 +52,7 @@ TEST(dyn_elimination, transpose) ASSERT_EQ(new_reshape->get_input_order(), (AxisVector{2, 3, 1, 0})); ASSERT_EQ(new_reshape->get_output_shape(0), (Shape{6, 8, 4, 2})); - ASSERT_EQ(new_reshape->get_output_element_type(0), element::boolean); + ASSERT_EQ(new_reshape->get_output_element_type(0), element::Type_t::boolean); } // For now, we can't handle the case where the input has dynamic shapes, @@ -63,10 +63,10 @@ TEST(dyn_elimination, transpose_dyn_shape) { PartialShape shape_in{2, 4, Dimension::dynamic(), 8}; - auto param = make_shared(element::boolean, shape_in); + auto param = make_shared(element::Type_t::boolean, shape_in); auto constant_perm = - make_shared(element::i64, Shape{4}, vector{2, 3, 1, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{2, 3, 1, 0}); auto transpose = make_shared(param, constant_perm); @@ -83,20 +83,23 @@ TEST(dyn_elimination, transpose_dyn_shape) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_transpose); - ASSERT_EQ(new_transpose->get_output_element_type(0), element::boolean); + ASSERT_EQ(new_transpose->get_output_element_type(0), element::Type_t::boolean); ASSERT_TRUE(new_transpose->get_output_partial_shape(0).relaxes( PartialShape{Dimension::dynamic(), 8, 4, 2})); } TEST(dyn_elimination, range) { - auto constant_start = make_shared(element::i64, Shape{}, vector{0}); - auto constant_stop = make_shared(element::i64, Shape{}, vector{5}); - auto constant_step = make_shared(element::i64, Shape{}, vector{2}); + auto constant_start = + make_shared(element::Type_t::i64, Shape{}, vector{0}); + auto constant_stop = + make_shared(element::Type_t::i64, Shape{}, vector{5}); + auto constant_step = + make_shared(element::Type_t::i64, Shape{}, vector{2}); auto range = make_shared(constant_start, constant_stop, constant_step); - ASSERT_EQ(range->get_element_type(), element::i64); + ASSERT_EQ(range->get_element_type(), element::Type_t::i64); ASSERT_EQ(range->get_shape(), (Shape{3})); auto f = make_shared(range, ParameterVector{}); @@ -112,7 +115,7 @@ TEST(dyn_elimination, range) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_NE(replacement, nullptr); - ASSERT_EQ(replacement->get_element_type(), element::i64); + ASSERT_EQ(replacement->get_element_type(), element::Type_t::i64); ASSERT_EQ(replacement->get_shape(), (Shape{3})); auto vals = replacement->get_vector(); @@ -122,13 +125,16 @@ TEST(dyn_elimination, range) TEST(dyn_elimination, range_f64) { - auto constant_start = make_shared(element::f64, Shape{}, vector{-0.5}); - auto constant_stop = make_shared(element::f64, Shape{}, vector{2}); - auto constant_step = make_shared(element::f64, Shape{}, vector{0.25}); + auto constant_start = + make_shared(element::Type_t::f64, Shape{}, vector{-0.5}); + auto constant_stop = + make_shared(element::Type_t::f64, Shape{}, vector{2}); + auto constant_step = + make_shared(element::Type_t::f64, Shape{}, vector{0.25}); auto range = make_shared(constant_start, constant_stop, constant_step); - ASSERT_EQ(range->get_element_type(), element::f64); + ASSERT_EQ(range->get_element_type(), element::Type_t::f64); ASSERT_EQ(range->get_shape(), (Shape{10})); auto f = make_shared(range, ParameterVector{}); @@ -144,7 +150,7 @@ TEST(dyn_elimination, range_f64) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_NE(replacement, nullptr); - ASSERT_EQ(replacement->get_element_type(), element::f64); + ASSERT_EQ(replacement->get_element_type(), element::Type_t::f64); ASSERT_EQ(replacement->get_shape(), (Shape{10})); auto vals = replacement->get_vector(); diff --git a/ngraph/test/element_type.cpp b/ngraph/test/element_type.cpp index 625679f553ee31..767a2939887641 100644 --- a/ngraph/test/element_type.cpp +++ b/ngraph/test/element_type.cpp @@ -24,62 +24,62 @@ using namespace ngraph; TEST(element_type, from) { - EXPECT_EQ(element::from(), element::boolean); - EXPECT_EQ(element::from(), element::boolean); - EXPECT_EQ(element::from(), element::f32); - EXPECT_EQ(element::from(), element::f64); - EXPECT_EQ(element::from(), element::i8); - EXPECT_EQ(element::from(), element::i16); - EXPECT_EQ(element::from(), element::i32); - EXPECT_EQ(element::from(), element::i64); - EXPECT_EQ(element::from(), element::u8); - EXPECT_EQ(element::from(), element::u16); - EXPECT_EQ(element::from(), element::u32); - EXPECT_EQ(element::from(), element::u64); + EXPECT_EQ(element::from(), element::Type_t::boolean); + EXPECT_EQ(element::from(), element::Type_t::boolean); + EXPECT_EQ(element::from(), element::Type_t::f32); + EXPECT_EQ(element::from(), element::Type_t::f64); + EXPECT_EQ(element::from(), element::Type_t::i8); + EXPECT_EQ(element::from(), element::Type_t::i16); + EXPECT_EQ(element::from(), element::Type_t::i32); + EXPECT_EQ(element::from(), element::Type_t::i64); + EXPECT_EQ(element::from(), element::Type_t::u8); + EXPECT_EQ(element::from(), element::Type_t::u16); + EXPECT_EQ(element::from(), element::Type_t::u32); + EXPECT_EQ(element::from(), element::Type_t::u64); } TEST(element_type, mapable) { std::map test_map; - test_map.insert({element::f32, "float"}); + test_map.insert({element::Type_t::f32, "float"}); } TEST(element_type, merge_both_dynamic) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::dynamic, element::dynamic)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::dynamic, element::Type_t::dynamic)); ASSERT_TRUE(t.is_dynamic()); } TEST(element_type, merge_left_dynamic) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::dynamic, element::u64)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::dynamic, element::Type_t::u64)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::u64); + ASSERT_EQ(t, element::Type_t::u64); } TEST(element_type, merge_right_dynamic) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::i16, element::dynamic)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::i16, element::Type_t::dynamic)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::i16); + ASSERT_EQ(t, element::Type_t::i16); } TEST(element_type, merge_both_static_equal) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::f64, element::f64)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::f64, element::Type_t::f64)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::f64); + ASSERT_EQ(t, element::Type_t::f64); } TEST(element_type, merge_both_static_unequal) { - element::Type t = element::f32; - ASSERT_FALSE(element::Type::merge(t, element::i8, element::i16)); + element::Type t = element::Type_t::f32; + ASSERT_FALSE(element::Type::merge(t, element::Type_t::i8, element::Type_t::i16)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::f32); + ASSERT_EQ(t, element::Type_t::f32); } diff --git a/ngraph/test/eval.cpp b/ngraph/test/eval.cpp index b0d4b670b8dc66..f551e39880052f 100644 --- a/ngraph/test/eval.cpp +++ b/ngraph/test/eval.cpp @@ -88,7 +88,7 @@ using namespace ngraph; TEST(eval, bad_get_data_ptr) { - HostTensor c(element::f32, Shape{}); + HostTensor c(element::Type_t::f32, Shape{}); *c.get_data_ptr() = 1.0; EXPECT_EQ(*c.get_data_ptr(), 1.0); try @@ -113,7 +113,7 @@ TEST(eval, bad_get_data_ptr) TEST(eval, max_eval_parameter) { - auto p = make_shared(element::i64, Shape{}); + auto p = make_shared(element::Type_t::i64, Shape{}); auto result = maximum_value(p); EXPECT_FALSE(result.first); @@ -122,7 +122,7 @@ TEST(eval, max_eval_parameter) TEST(eval, max_eval_constant) { - auto c = op::Constant::create(element::i64, Shape{}, {27}); + auto c = op::Constant::create(element::Type_t::i64, Shape{}, {27}); auto result = maximum_value(c); ASSERT_TRUE(result.first); EXPECT_EQ(result.second, 27); @@ -130,8 +130,8 @@ TEST(eval, max_eval_constant) TEST(eval, max_eval_minimum_constant) { - auto c = op::Constant::create(element::i64, Shape{}, {27}); - auto p = make_shared(element::i64, Shape{}); + auto c = op::Constant::create(element::Type_t::i64, Shape{}, {27}); + auto p = make_shared(element::Type_t::i64, Shape{}); auto m = make_shared(c, p); auto result = maximum_value(m); ASSERT_TRUE(result.first); @@ -142,31 +142,31 @@ TEST(eval, max_eval_reduce_min) { auto concat = make_shared( make_shared( - OutputVector{make_shared(element::i64, Shape{4}), - make_shared(element::i64, Shape{4}, 37)}, + OutputVector{make_shared(element::Type_t::i64, Shape{4}), + make_shared(element::Type_t::i64, Shape{4}, 37)}, 0), - element::i32); + element::Type_t::i32); auto reduce = make_shared( - make_shared(concat, - make_shared(element::i32, Shape{1}, 0)), - element::i64); + make_shared( + concat, make_shared(element::Type_t::i32, Shape{1}, 0)), + element::Type_t::i64); auto squeezes = make_shared( - make_shared(reduce, - make_shared(element::i32, Shape{1}, 0)), - make_shared(element::i64, Shape{1}, 0)); + make_shared( + reduce, make_shared(element::Type_t::i32, Shape{1}, 0)), + make_shared(element::Type_t::i64, Shape{1}, 0)); EXPECT_EQ(maximum_value(squeezes).second, 37); } TEST(eval, evaluate_shape_of) { - auto p = make_shared(element::f32, PartialShape{-1, -1}); + auto p = make_shared(element::Type_t::f32, PartialShape{-1, -1}); auto so = make_shared(p); auto fun = make_shared(OutputVector{so}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2})); auto result_shape = read_vector(result); vector arg_shape{2, 3}; @@ -175,10 +175,10 @@ TEST(eval, evaluate_shape_of) TEST(eval, evaluate_dynamic_range_sum) { - auto p_start = make_shared(element::f32, PartialShape{}); - auto p_stop = make_shared(element::f32, PartialShape{}); - auto p_step = make_shared(element::f32, PartialShape{}); - auto p1 = make_shared(element::f32, PartialShape{}); + auto p_start = make_shared(element::Type_t::f32, PartialShape{}); + auto p_stop = make_shared(element::Type_t::f32, PartialShape{}); + auto p_step = make_shared(element::Type_t::f32, PartialShape{}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{}); auto range = make_shared(p_start, p_stop, p_step); auto add = make_shared(range, p1); auto fun = @@ -189,7 +189,7 @@ TEST(eval, evaluate_dynamic_range_sum) make_host_tensor({}, {10.0f}), make_host_tensor({}, {3.0f}), make_host_tensor({}, {7.0f})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3})); auto cval = read_vector(result_tensor); vector seq{8.0f, 11.0f, 14.0f}; @@ -199,27 +199,27 @@ TEST(eval, evaluate_dynamic_range_sum) #ifdef NGRAPH_INTERPRETER_ENABLE TEST(eval, interpret_dynamic_range_sum) { - auto p_start = make_shared(element::f32, PartialShape{}); - auto p_stop = make_shared(element::f32, PartialShape{}); - auto p_step = make_shared(element::f32, PartialShape{}); - auto p1 = make_shared(element::f32, PartialShape{}); + auto p_start = make_shared(element::Type_t::f32, PartialShape{}); + auto p_stop = make_shared(element::Type_t::f32, PartialShape{}); + auto p_step = make_shared(element::Type_t::f32, PartialShape{}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{}); auto range = make_shared(p_start, p_stop, p_step); auto add = make_shared(range, p1); auto fun = make_shared(OutputVector{add}, ParameterVector{p_start, p_stop, p_step, p1}); auto backend = runtime::Backend::create("INTERPRETER"); - auto p_start_val = backend->create_tensor(element::f32, Shape{}); + auto p_start_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p_start_val, vector{1.0f}); - auto p_stop_val = backend->create_tensor(element::f32, Shape{}); + auto p_stop_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p_stop_val, vector{10.0f}); - auto p_step_val = backend->create_tensor(element::f32, Shape{}); + auto p_step_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p_step_val, vector{3.0f}); - auto p1_val = backend->create_tensor(element::f32, Shape{}); + auto p1_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p1_val, vector{7.0f}); auto result = backend->create_tensor(); auto cfun = backend->compile(fun); cfun->call({result}, {p_start_val, p_stop_val, p_step_val, p1_val}); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{3})); auto result_val = read_vector(result); vector seq{8.0f, 11.0f, 14.0f}; @@ -230,8 +230,8 @@ TEST(eval, interpret_dynamic_range_sum) TEST(eval, evaluate_broadcast_v3_bidirectional) { Shape shape_a{4, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i32, Shape{3}, {2, 1, 4}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i32, Shape{3}, {2, 1, 4}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -239,7 +239,7 @@ TEST(eval, evaluate_broadcast_v3_bidirectional) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{4, 1}, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, @@ -250,15 +250,15 @@ TEST(eval, evaluate_broadcast_v3_bidirectional) TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input) { Shape shape_a{1, 1, 1, 1, 1, 1, 1, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 3, 1, 1}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 3, 1, 1}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(shape_a, {1.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 1, 1, 1, 1, 3, 1, 1})); auto result_val = read_vector(result); vector expec{1.0f, 1.0f, 1.0f}; @@ -268,8 +268,8 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input) TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) { Shape shape_a{1, 3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i32, Shape{2}, {3, 1}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i32, Shape{2}, {3, 1}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -277,7 +277,7 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{1, 3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 3, 1})); auto result_val = read_vector(result); vector expec{1.0f, 2.0f, 3.0f}; @@ -287,8 +287,8 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) { Shape shape_a{4, 1}; - auto A = make_shared(element::i32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::Type_t::i32, shape_a); + auto target_shape = make_shared(element::Type_t::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -297,7 +297,7 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{4, 1}, {1, 2, 3, 4}), make_host_tensor(Shape{3}, {2, 1, 4})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, @@ -308,15 +308,15 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) TEST(eval, evaluate_broadcast_v3_numpy) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -329,8 +329,8 @@ TEST(eval, evaluate_broadcast_v3_numpy) TEST(eval, evaluate_broadcast_v3_numpy_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -339,7 +339,7 @@ TEST(eval, evaluate_broadcast_v3_numpy_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -353,21 +353,21 @@ TEST(eval, evaluate_broadcast_v3_numpy_vs_bidi) { Shape in_shape{1, 4, 1}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 4, 4}); + auto A = make_shared(element::Type_t::f32, in_shape); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {1, 4, 4}); auto bcast_v3_num = make_shared(A, target_shape, op::BroadcastType::NUMPY); auto fun_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun_num->evaluate( {result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; ASSERT_EQ(expec, result_val); - auto target_shape2 = op::Constant::create(element::i64, Shape{2}, {1, 4}); + auto target_shape2 = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 4}); auto bcast_v3 = make_shared(A, target_shape2, op::BroadcastType::BIDIRECTIONAL); auto fun_bidi = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); @@ -375,7 +375,7 @@ TEST(eval, evaluate_broadcast_v3_numpy_vs_bidi) auto result2 = make_shared(); ASSERT_TRUE(fun_bidi->evaluate( {result2}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result2->get_element_type(), element::f32); + EXPECT_EQ(result2->get_element_type(), element::Type_t::f32); EXPECT_EQ(result2->get_partial_shape(), (PartialShape{1, 4, 4})); auto result_val2 = read_vector(result2); vector expec2{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; @@ -386,8 +386,8 @@ TEST(eval, evaluate_broadcast_v3_bidi_3d) { Shape in_shape{1, 4, 1}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 1, 3}); + auto A = make_shared(element::Type_t::f32, in_shape); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {1, 1, 3}); auto bcast_v3_num = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); @@ -395,7 +395,7 @@ TEST(eval, evaluate_broadcast_v3_bidi_3d) auto result = make_shared(); ASSERT_TRUE(fun_num->evaluate( {result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 3})); auto result_val = read_vector(result); vector expec{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f}; @@ -407,8 +407,8 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) Shape in_shape{4, 1, 1}; Shape expec_shape{1, 4, 2, 2}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}); + auto A = make_shared(element::Type_t::f32, in_shape); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 1, 2, 2}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -416,7 +416,7 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 2, 2})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; @@ -426,8 +426,8 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) TEST(eval, evaluate_broadcast_v3_pdpd) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared( A, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -435,7 +435,7 @@ TEST(eval, evaluate_broadcast_v3_pdpd) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -448,8 +448,8 @@ TEST(eval, evaluate_broadcast_v3_pdpd) TEST(eval, evaluate_broadcast_v3_pdpd_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i32, Shape{3}); auto bcast_v3 = make_shared( A, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -459,7 +459,7 @@ TEST(eval, evaluate_broadcast_v3_pdpd_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -472,15 +472,15 @@ TEST(eval, evaluate_broadcast_v3_pdpd_dyn) TEST(eval, evaluate_broadcast_v1_numpy) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -493,8 +493,8 @@ TEST(eval, evaluate_broadcast_v1_numpy) TEST(eval, evaluate_broadcast_v1_numpy_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -503,7 +503,7 @@ TEST(eval, evaluate_broadcast_v1_numpy_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -516,8 +516,8 @@ TEST(eval, evaluate_broadcast_v1_numpy_dyn) TEST(eval, evaluate_broadcast_v1_pdpd) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared( A, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -525,7 +525,7 @@ TEST(eval, evaluate_broadcast_v1_pdpd) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -538,8 +538,8 @@ TEST(eval, evaluate_broadcast_v1_pdpd) TEST(eval, evaluate_broadcast_v1_pdpd_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); auto bcast_v3 = make_shared( A, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -549,7 +549,7 @@ TEST(eval, evaluate_broadcast_v1_pdpd_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -562,9 +562,9 @@ TEST(eval, evaluate_broadcast_v1_pdpd_dyn) TEST(eval, evaluate_broadcast_v1_explicit) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i32, Shape{2}, {1, 2}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i32, Shape{2}, {1, 2}); auto bcast_v3 = make_shared( A, target_shape, axes_mapping, op::AutoBroadcastSpec(op::AutoBroadcastType::EXPLICIT)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -572,7 +572,7 @@ TEST(eval, evaluate_broadcast_v1_explicit) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; @@ -582,9 +582,9 @@ TEST(eval, evaluate_broadcast_v1_explicit) TEST(eval, evaluate_broadcast_v1_explicit_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); - auto axes_mapping = make_shared(element::i32, Shape{2}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); + auto axes_mapping = make_shared(element::Type_t::i32, Shape{2}); auto bcast_v1 = make_shared( A, target_shape, axes_mapping, op::AutoBroadcastSpec(op::AutoBroadcastType::EXPLICIT)); @@ -597,7 +597,7 @@ TEST(eval, evaluate_broadcast_v1_explicit_dyn) {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 1}), make_host_tensor(Shape{2}, {1, 2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; @@ -607,9 +607,9 @@ TEST(eval, evaluate_broadcast_v1_explicit_dyn) TEST(eval, evaluate_broadcast_v3_explicit_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); - auto axes_mapping = make_shared(element::i32, Shape{2}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); + auto axes_mapping = make_shared(element::Type_t::i32, Shape{2}); auto bcast_v3 = make_shared( A, target_shape, axes_mapping, op::BroadcastModeSpec(op::BroadcastType::EXPLICIT)); @@ -622,7 +622,7 @@ TEST(eval, evaluate_broadcast_v3_explicit_dyn) {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 1}), make_host_tensor(Shape{2}, {1, 2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; @@ -631,8 +631,8 @@ TEST(eval, evaluate_broadcast_v3_explicit_dyn) TEST(eval, test_op_multi_out) { - auto p = make_shared(element::f32, PartialShape{2, 3}); - auto p2 = make_shared(element::f64, PartialShape{2, 2}); + auto p = make_shared(element::Type_t::f32, PartialShape{2, 3}); + auto p2 = make_shared(element::Type_t::f64, PartialShape{2, 2}); auto so = make_shared(p, p2); auto fun = make_shared(OutputVector{so->output(0), so->output(1)}, ParameterVector{p, p2}); @@ -641,12 +641,12 @@ TEST(eval, test_op_multi_out) HostTensorVector ins{make_host_tensor(Shape{2, 3}), make_host_tensor(Shape{2, 2})}; ASSERT_TRUE(fun->evaluate({result, result2}, ins)); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3})); auto result_val = read_vector(result); auto arg_val = read_vector(ins[0]); ASSERT_EQ(result_val, arg_val); - EXPECT_EQ(result2->get_element_type(), element::f64); + EXPECT_EQ(result2->get_element_type(), element::Type_t::f64); EXPECT_EQ(result2->get_partial_shape(), (PartialShape{2, 2})); auto result_val2 = read_vector(result2); auto arg_val2 = read_vector(ins[1]); @@ -655,8 +655,8 @@ TEST(eval, test_op_multi_out) TEST(eval, evaluate_reshape_v1) { - auto data = make_shared(element::f32, Shape{2, 5}); - auto pattern = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{2, 5}); + auto pattern = make_shared(element::Type_t::i64, Shape{2}); auto dyn_reshape = make_shared(data, pattern, false); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -664,7 +664,7 @@ TEST(eval, evaluate_reshape_v1) {result_tensor}, {make_host_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), make_host_tensor({2}, {5, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{5, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; @@ -673,8 +673,8 @@ TEST(eval, evaluate_reshape_v1) TEST(eval, evaluate_reshape_v1_negative_index) { - auto data = make_shared(element::f32, Shape{2, 5}); - auto pattern = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{2, 5}); + auto pattern = make_shared(element::Type_t::i64, Shape{2}); auto dyn_reshape = make_shared(data, pattern, false); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -682,7 +682,7 @@ TEST(eval, evaluate_reshape_v1_negative_index) {result_tensor}, {make_host_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), make_host_tensor({2}, {2, -1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 5})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; @@ -691,8 +691,8 @@ TEST(eval, evaluate_reshape_v1_negative_index) TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) { - auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); - auto pattern = make_shared(element::i64, Shape{6}); + auto data = make_shared(element::Type_t::f32, Shape{2, 2, 2, 2}); + auto pattern = make_shared(element::Type_t::i64, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -701,7 +701,7 @@ TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) {make_host_tensor( {2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), make_host_tensor({6}, {2, 0, 1, -1, 1, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2, 1, 2, 1, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; @@ -710,8 +710,8 @@ TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) TEST(eval, evaluate_reshape_v1_pattern_int16) { - auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); - auto pattern = make_shared(element::i16, Shape{6}); + auto data = make_shared(element::Type_t::f32, Shape{2, 2, 2, 2}); + auto pattern = make_shared(element::Type_t::i16, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -720,7 +720,7 @@ TEST(eval, evaluate_reshape_v1_pattern_int16) {make_host_tensor( {2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), make_host_tensor({6}, {2, 0, 1, -1, 1, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2, 1, 2, 1, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; @@ -729,8 +729,8 @@ TEST(eval, evaluate_reshape_v1_pattern_int16) TEST(eval, evaluate_convert) { - auto p = make_shared(element::f32, PartialShape{-1, -1}); - auto convert = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::f32, PartialShape{-1, -1}); + auto convert = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{convert}, ParameterVector{p}); std::vector> inputs{{-1, 1}}; @@ -740,7 +740,7 @@ TEST(eval, evaluate_convert) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{1, 2}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{1, 2})); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -749,14 +749,14 @@ TEST(eval, evaluate_convert) TEST(eval, evaluate_abs) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto abs = make_shared(p); auto fun = make_shared(OutputVector{abs}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f}; ASSERT_EQ(result_val, expec); @@ -764,14 +764,14 @@ TEST(eval, evaluate_abs) TEST(eval, evaluate_erf) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto erf = make_shared(p); auto fun = make_shared(OutputVector{erf}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{std::erf(0.0f), std::erf(-1.0f), @@ -784,14 +784,14 @@ TEST(eval, evaluate_erf) TEST(eval, evaluate_exp) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto exp = make_shared(p); auto fun = make_shared(OutputVector{exp}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{std::exp(0.0f), std::exp(-1.0f), @@ -804,14 +804,14 @@ TEST(eval, evaluate_exp) TEST(eval, evaluate_floor) { - auto p = make_shared(element::f32, Shape{2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{2, 2}); auto floor = make_shared(p); auto fun = make_shared(OutputVector{floor}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{2, 2}, {-2.5f, -2.0f, 0.3f, 4.8f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{-3.0f, -2.0f, 0.0f, 4.0f}; ASSERT_EQ(result_val, expec); @@ -819,14 +819,14 @@ TEST(eval, evaluate_floor) TEST(eval, evaluate_floor_int32) { - auto p = make_shared(element::i32, Shape{2, 2}); + auto p = make_shared(element::Type_t::i32, Shape{2, 2}); auto floor = make_shared(p); auto fun = make_shared(OutputVector{floor}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 2}, {-2, -136314888, 0x40000010, 0x40000001})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); auto result_val = read_vector(result); vector expec{-2, -136314888, 0x40000010, 0x40000001}; ASSERT_EQ(result_val, expec); @@ -834,7 +834,7 @@ TEST(eval, evaluate_floor_int32) TEST(eval, evaluate_log) { - auto p = make_shared(element::f32, Shape{2, 2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{2, 2, 2}); auto log = make_shared(p); auto fun = make_shared(OutputVector{log}, ParameterVector{p}); auto result = make_shared(); @@ -842,7 +842,7 @@ TEST(eval, evaluate_log) fun->evaluate({result}, {make_host_tensor( Shape{2, 2, 2}, {0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{std::log(0.125f), std::log(0.25f), @@ -857,7 +857,7 @@ TEST(eval, evaluate_log) TEST(eval, evaluate_negative_f32) { - auto p = make_shared(element::f32, Shape{2, 5}); + auto p = make_shared(element::Type_t::f32, Shape{2, 5}); auto negate = make_shared(p); auto fun = make_shared(OutputVector{negate}, ParameterVector{p}); auto result = make_shared(); @@ -866,7 +866,7 @@ TEST(eval, evaluate_negative_f32) {make_host_tensor( Shape{2, 5}, {1.35f, 8.76f, -8.0f, 17.234f, -2.121f, 1.0f, 8.7f, -8.92f, 17.0f, -1.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{-1.35f, -8.76f, 8.0f, -17.234f, 2.121f, -1.0f, -8.7f, 8.92f, -17.0f, 1.0f}; ASSERT_EQ(result_val, expec); @@ -874,14 +874,14 @@ TEST(eval, evaluate_negative_f32) TEST(eval, evaluate_negative_i32) { - auto p = make_shared(element::i32, Shape{2, 5}); + auto p = make_shared(element::Type_t::i32, Shape{2, 5}); auto negate = make_shared(p); auto fun = make_shared(OutputVector{negate}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, 0})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); auto result_val = read_vector(result); vector expec{-1, -8, 8, -17, 2, -1, -8, 8, -17, 0}; ASSERT_EQ(result_val, expec); @@ -889,14 +889,14 @@ TEST(eval, evaluate_negative_i32) TEST(eval, evaluate_relu_2Ffprop_f32) { - auto p = make_shared(element::f32, Shape{2, 5}); + auto p = make_shared(element::Type_t::f32, Shape{2, 5}); auto relu = make_shared(p); auto fun = make_shared(OutputVector{relu}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 5}, {1, 8, -8, 17, -0.5, 0.1, 8.5, -8, 17, -0.5})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{1, 8, 0, 17, 0, 0.1, 8.5, 0, 17, 0}; ASSERT_EQ(result_val, expec); @@ -904,14 +904,14 @@ TEST(eval, evaluate_relu_2Ffprop_f32) TEST(eval, evaluate_relu_2Ffprop_i32) { - auto p = make_shared(element::i32, Shape{2, 5}); + auto p = make_shared(element::Type_t::i32, Shape{2, 5}); auto relu = make_shared(p); auto fun = make_shared(OutputVector{relu}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, -1})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); auto result_val = read_vector(result); vector expec{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; ASSERT_EQ(result_val, expec); @@ -919,14 +919,14 @@ TEST(eval, evaluate_relu_2Ffprop_i32) TEST(eval, evaluate_round) { - auto p = make_shared(element::f32, Shape{5}); + auto p = make_shared(element::Type_t::f32, Shape{5}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{5}, {0.9f, 2.5f, 2.3f, 1.5f, -4.5f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{1.0f, 2.0f, 2.0f, 2.0f, -4.0f}; ASSERT_EQ(result_val, expec); @@ -934,7 +934,7 @@ TEST(eval, evaluate_round) TEST(eval, evaluate_round_2D) { - auto p = make_shared(element::f32, Shape{3, 5}); + auto p = make_shared(element::Type_t::f32, Shape{3, 5}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); auto result = make_shared(); @@ -955,7 +955,7 @@ TEST(eval, evaluate_round_2D) -2.2f, -2.5f, -2.8f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{ 0.f, 0.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 3.f, -1.f, -2.f, -2.f, -2.f, -2.f, -3.f}; @@ -964,7 +964,7 @@ TEST(eval, evaluate_round_2D) TEST(eval, evaluate_sigmoid) { - auto p = make_shared(element::f32, Shape{1, 1, 2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{1, 1, 2, 2}); auto sigmoid = make_shared(p); auto fun = make_shared(OutputVector{sigmoid}, ParameterVector{p}); auto result = make_shared(); @@ -975,7 +975,7 @@ TEST(eval, evaluate_sigmoid) float sigma2 = 1.0f / (1.0f + std::exp(-x2)); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{1, 1, 2, 2}, {x1, x2, x1, x2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{sigma1, sigma2, sigma1, sigma2}; EXPECT_EQ(result_val.size(), expec.size()); @@ -983,7 +983,7 @@ TEST(eval, evaluate_sigmoid) TEST(eval, evaluate_sign) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto sign = make_shared(p); auto fun = make_shared(OutputVector{sign}, ParameterVector{p}); auto result = make_shared(); @@ -991,7 +991,7 @@ TEST(eval, evaluate_sign) ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{2, 3}, {1, -2, 0, -4.8f, 4.8f, -0.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{1, -1, 0, -1, 1, 0}; ASSERT_EQ(result_val, expec); @@ -999,7 +999,7 @@ TEST(eval, evaluate_sign) TEST(eval, evaluate_sin) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto sin = make_shared(p); auto fun = make_shared(OutputVector{sin}, ParameterVector{p}); auto result = make_shared(); @@ -1008,7 +1008,7 @@ TEST(eval, evaluate_sin) {result}, {make_host_tensor( Shape{11}, {0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{0.00000000f, 0.24740396f, @@ -1026,14 +1026,14 @@ TEST(eval, evaluate_sin) TEST(eval, evaluate_sinh) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto sinh = make_shared(p); auto fun = make_shared(OutputVector{sinh}, ParameterVector{p}); auto result = make_shared(); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); }); @@ -1042,14 +1042,14 @@ TEST(eval, evaluate_sinh) TEST(eval, evaluate_sqrt) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto sqrt = make_shared(p); auto fun = make_shared(OutputVector{sqrt}, ParameterVector{p}); auto result = make_shared(); vector input{16, 4, 81, 100, 10000, 0}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{4, 2, 9, 10, 100, 0}; ASSERT_FLOAT_VECTORS_EQ(expec, result_val); @@ -1057,7 +1057,7 @@ TEST(eval, evaluate_sqrt) TEST(eval, evaluate_acos) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto acos = make_shared(p); auto fun = make_shared(OutputVector{acos}, ParameterVector{p}); auto result = make_shared(); @@ -1065,7 +1065,7 @@ TEST(eval, evaluate_acos) vector input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::acos(x); }); @@ -1074,7 +1074,7 @@ TEST(eval, evaluate_acos) TEST(eval, evaluate_asin) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto asin = make_shared(p); auto fun = make_shared(OutputVector{asin}, ParameterVector{p}); auto result = make_shared(); @@ -1082,7 +1082,7 @@ TEST(eval, evaluate_asin) vector input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::asin(x); }); @@ -1092,7 +1092,7 @@ TEST(eval, evaluate_asin) TEST(eval, evaluate_atan) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto atan = make_shared(p); auto fun = make_shared(OutputVector{atan}, ParameterVector{p}); auto result = make_shared(); @@ -1100,7 +1100,7 @@ TEST(eval, evaluate_atan) vector input{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::atan(x); }); @@ -1110,7 +1110,7 @@ TEST(eval, evaluate_atan) TEST(eval, evaluate_ceiling) { - auto p = make_shared(element::f32, Shape{2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{2, 2}); auto ceil = make_shared(p); auto fun = make_shared(OutputVector{ceil}, ParameterVector{p}); auto result = make_shared(); @@ -1118,7 +1118,7 @@ TEST(eval, evaluate_ceiling) vector input{-2.5f, -2.0f, 0.3f, 4.8f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{2, 2}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{-2.0f, -2.0f, 1.0f, 5.0f}; ASSERT_EQ(result_val, expec); @@ -1126,7 +1126,7 @@ TEST(eval, evaluate_ceiling) TEST(eval, evaluate_cos) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto cos = make_shared(p); auto fun = make_shared(OutputVector{cos}, ParameterVector{p}); auto result = make_shared(); @@ -1134,7 +1134,7 @@ TEST(eval, evaluate_cos) vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::cos(x); }); @@ -1144,14 +1144,14 @@ TEST(eval, evaluate_cos) TEST(eval, evaluate_cosh) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto cosh = make_shared(p); auto fun = make_shared(OutputVector{cosh}, ParameterVector{p}); auto result = make_shared(); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::cosh(x); }); @@ -1161,7 +1161,7 @@ TEST(eval, evaluate_cosh) TEST(eval, evaluate_tan) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto tan = make_shared(p); auto fun = make_shared(OutputVector{tan}, ParameterVector{p}); auto result = make_shared(); @@ -1169,7 +1169,7 @@ TEST(eval, evaluate_tan) vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::tan(x); }); @@ -1179,14 +1179,14 @@ TEST(eval, evaluate_tan) TEST(eval, evaluate_tanh) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto tanh = make_shared(p); auto fun = make_shared(OutputVector{tanh}, ParameterVector{p}); auto result = make_shared(); vector input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::tanh(x); }); @@ -1196,14 +1196,14 @@ TEST(eval, evaluate_tanh) TEST(eval, evaluate_logical_not) { - auto p = make_shared(element::boolean, Shape{2, 2}); + auto p = make_shared(element::Type_t::boolean, Shape{2, 2}); auto logical_not = make_shared(p); auto fun = make_shared(OutputVector{logical_not}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{2, 2}, {1, 0, 1, 0})})); - EXPECT_EQ(result->get_element_type(), element::boolean); + EXPECT_EQ(result->get_element_type(), element::Type_t::boolean); auto result_val = read_vector(result); vector expec{0, 1, 0, 1}; ASSERT_EQ(result_val, expec); @@ -1211,9 +1211,9 @@ TEST(eval, evaluate_logical_not) TEST(eval, evaluate_dynamic_gather) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i32, PartialShape::dynamic()); auto gather = make_shared(arg1, arg2, arg3); auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); auto result_tensor = make_shared(); @@ -1221,7 +1221,7 @@ TEST(eval, evaluate_dynamic_gather) {make_host_tensor({3}, {1.0f, 2.0f, 3.0f}), make_host_tensor({2}, {1, 0}), make_host_tensor({1}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2})); auto cval = read_vector(result_tensor); vector out{2.0f, 1.0f}; @@ -1230,9 +1230,9 @@ TEST(eval, evaluate_dynamic_gather) TEST(eval, evaluate_dynamic_axis_gather) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto gather = make_shared(arg1, arg2, arg3); auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); auto result_tensor = make_shared(); @@ -1241,7 +1241,7 @@ TEST(eval, evaluate_dynamic_axis_gather) {3, 3}, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f, 3.0f, 3.1f, 3.2f}), make_host_tensor({1, 2}, {0, 2}), make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 1, 2})); auto cval = read_vector(result_tensor); vector out{1.0f, 1.2f, 2.0f, 2.2f, 3.0f, 3.2f}; @@ -1250,15 +1250,15 @@ TEST(eval, evaluate_dynamic_axis_gather) TEST(eval, evaluate_dynamic_concat) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto concat = make_shared(NodeVector{arg1, arg2}, 1); auto fun = make_shared(OutputVector{concat}, ParameterVector{arg1, arg2}); auto result_tensor = make_shared(); ASSERT_TRUE(fun->evaluate({result_tensor}, {make_host_tensor({1, 1}, {1.0f}), make_host_tensor({1, 2}, {8.0f, 10.0f})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 3})); auto cval = read_vector(result_tensor); vector out{1.0f, 8.0f, 10.0f}; @@ -1289,17 +1289,25 @@ void test_eval(shared_ptr fun, TEST(eval, eval_transpose) { - auto x = make_shared(element::f32, PartialShape::dynamic()); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); vector> axes; - axes.push_back(make_shared(element::i8, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::i16, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::i32, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::i64, PartialShape{Dimension::dynamic()})); - - axes.push_back(make_shared(element::u8, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u16, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u32, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u64, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i8, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i16, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()})); + + axes.push_back( + make_shared(element::Type_t::u8, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u16, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u32, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u64, PartialShape{Dimension::dynamic()})); std::vector x_shapes{Shape{2, 3}, Shape{2, 3}, Shape{2, 2, 3}}; @@ -1348,7 +1356,7 @@ TEST(eval, eval_transpose) TEST(eval, max_pool_v1_dynamic) { Shape window_shape{3}; - auto A = make_shared(element::f32, PartialShape::dynamic()); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto f = make_shared( make_shared( A, Strides(), Shape(), Shape(), window_shape, op::RoundingType::FLOOR), @@ -1359,7 +1367,7 @@ TEST(eval, max_pool_v1_dynamic) {make_host_tensor( {1, 1, 14}, {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 1, 12})); auto cval = read_vector(result_tensor); vector out{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}; @@ -1369,10 +1377,10 @@ TEST(eval, evaluate_static_scatter_elements_update_basic) { const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::Type_t::f32, data_shape); + auto arg2 = make_shared(element::Type_t::i32, indices_shape); + auto arg3 = make_shared(element::Type_t::f32, indices_shape); + auto arg4 = make_shared(element::Type_t::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_elements_update}, @@ -1386,7 +1394,7 @@ TEST(eval, evaluate_static_scatter_elements_update_basic) make_host_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -1398,10 +1406,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_basic) const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1417,7 +1425,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_basic) {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -1430,10 +1438,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_negative_axis) const Shape indices_shape{2, 3}; const Shape axis_shape{}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1449,7 +1457,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_negative_axis) {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor(axis_shape, {-1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{1.1f, 1.0f, 1.2f, 2.0f, 2.2f, 2.1f, 0.0f, 0.0f, 0.0f}; @@ -1461,10 +1469,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_1d_axis) const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1480,7 +1488,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_1d_axis) {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({1}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -1493,10 +1501,10 @@ TEST(eval, DISABLED_evaluate_dynamic_scatter_elements_update_3d_i16) const Shape data_shape{3, 3, 3}; const Shape indices_shape{2, 2, 3}; - auto arg1 = make_shared(element::i16, PartialShape::dynamic()); - auto arg2 = make_shared(element::i16, PartialShape::dynamic()); - auto arg3 = make_shared(element::i16, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i16, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i16, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i16, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1513,7 +1521,7 @@ TEST(eval, DISABLED_evaluate_dynamic_scatter_elements_update_3d_i16) indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}), make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i16); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::i16); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 3})); auto cval = read_vector(result_tensor); vector out{4, 2, 0, 1, 0, 6, 0, 5, 3, 10, 0, 12, 0, 11, @@ -1526,10 +1534,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_one_elem_i32) const Shape data_shape{3, 3, 3}; const Shape indices_shape{1, 1, 1}; - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1544,7 +1552,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_one_elem_i32) make_host_tensor(indices_shape, {2}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::i32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 3})); auto cval = read_vector(result_tensor); vector out{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, @@ -1557,9 +1565,9 @@ TEST(eval, topk_v1) Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - const auto k = op::Constant::create(element::i32, Shape{}, {2}); - auto B = make_shared(A, k, 1, "max", "index", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + const auto k = op::Constant::create(element::Type_t::i32, Shape{}, {2}); + auto B = make_shared(A, k, 1, "max", "index", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A}); @@ -1568,9 +1576,9 @@ TEST(eval, topk_v1) ASSERT_TRUE(fun->evaluate({result0, result1}, {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); @@ -1587,9 +1595,9 @@ TEST(eval, topk_v1_dyn) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "index", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "index", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1600,9 +1608,9 @@ TEST(eval, topk_v1_dyn) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1617,9 +1625,9 @@ TEST(eval, topk_v3_dyn) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "index", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "index", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1630,9 +1638,9 @@ TEST(eval, topk_v3_dyn) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1647,9 +1655,9 @@ TEST(eval, topk_v3_dyn_values) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1660,9 +1668,9 @@ TEST(eval, topk_v3_dyn_values) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1677,9 +1685,9 @@ TEST(eval, topk_v3_dyn_values_k0) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1690,9 +1698,9 @@ TEST(eval, topk_v3_dyn_values_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1707,10 +1715,10 @@ TEST(eval, topk_v1_dyn_k0) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::i64, Shape{}); - element::Type result_et{element::i32}; + element::Type result_et{element::Type_t::i32}; auto B = make_shared( A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); @@ -1723,9 +1731,9 @@ TEST(eval, topk_v1_dyn_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1739,9 +1747,9 @@ TEST(eval, topk_v1_dyn_k0) TEST(eval, topk_v3_param_dyn_values_k0) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1752,9 +1760,9 @@ TEST(eval, topk_v3_param_dyn_values_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1767,9 +1775,9 @@ TEST(eval, topk_v3_param_dyn_values_k0) TEST(eval, topk_v3_param_dyn_values_k2) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1780,9 +1788,9 @@ TEST(eval, topk_v3_param_dyn_values_k2) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1795,11 +1803,11 @@ TEST(eval, topk_v3_param_dyn_values_k2) TEST(eval, topk_v1_param_dyn_k2) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::i64, Shape{}); auto axis = 1; - element::Type result_et{element::i32}; + element::Type result_et{element::Type_t::i32}; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); @@ -1812,9 +1820,9 @@ TEST(eval, topk_v1_param_dyn_k2) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1828,10 +1836,10 @@ TEST(eval, topk_v1_param_dyn_k2) TEST(eval, topk_v1_param_dyn_k0) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::i64, Shape{}); - element::Type result_et{element::i32}; + element::Type result_et{element::Type_t::i32}; auto B = make_shared( A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); @@ -1845,9 +1853,9 @@ TEST(eval, topk_v1_param_dyn_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1861,8 +1869,8 @@ TEST(eval, topk_v1_param_dyn_k0) TEST(eval, reduce_logical_and__neg_axis) { - const auto data = make_shared(element::boolean, Shape{2, 2, 2}); - const auto axes = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::boolean, Shape{2, 2, 2}); + const auto axes = make_shared(element::Type_t::i64, Shape{}); const auto op = make_shared(data, axes); @@ -1887,10 +1895,10 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32) const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, updates_shape); - auto arg4 = make_shared(element::i32, Shape{}); + auto arg1 = make_shared(element::Type_t::f32, data_shape); + auto arg2 = make_shared(element::Type_t::i32, indices_shape); + auto arg3 = make_shared(element::Type_t::f32, updates_shape); + auto arg4 = make_shared(element::Type_t::i32, Shape{}); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); @@ -1902,7 +1910,7 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32) make_host_tensor( updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; @@ -1915,10 +1923,10 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i64) const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i64, indices_shape); - auto arg3 = make_shared(element::f32, updates_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::Type_t::f32, data_shape); + auto arg2 = make_shared(element::Type_t::i64, indices_shape); + auto arg3 = make_shared(element::Type_t::f32, updates_shape); + auto arg4 = make_shared(element::Type_t::i64, Shape{}); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); @@ -1930,7 +1938,7 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i64) make_host_tensor( updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; @@ -1943,10 +1951,10 @@ TEST(eval, evaluate_dynamic_scatter_update_basic) const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -1960,7 +1968,7 @@ TEST(eval, evaluate_dynamic_scatter_update_basic) updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; @@ -1974,10 +1982,10 @@ TEST(eval, evaluate_dynamic_scatter_update_negative_axis) const Shape updates_shape{3, 1, 2}; const Shape axis_shape{}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -1991,7 +1999,7 @@ TEST(eval, evaluate_dynamic_scatter_update_negative_axis) updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor(axis_shape, {-1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 1.0f, 1.1f, 0.0f, 1.2f, 2.0f, 0.0f, 2.1f, 2.2f}; @@ -2004,10 +2012,10 @@ TEST(eval, evaluate_dynamic_scatter_update_1d_axis) const Shape indices_shape{1, 2}; const Shape updates_shape{3, 1, 2}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -2021,7 +2029,7 @@ TEST(eval, evaluate_dynamic_scatter_update_1d_axis) updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({1}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 1.0f, 1.1f, 0.0f, 1.2f, 2.0f, 0.0f, 2.1f, 2.2f}; @@ -2034,10 +2042,10 @@ TEST(eval, evaluate_dynamic_scatter_update_one_elem_i32) const Shape indices_shape{1, 1}; const Shape updates_shape{1, 1, 3, 2}; - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -2051,7 +2059,7 @@ TEST(eval, evaluate_dynamic_scatter_update_one_elem_i32) make_host_tensor(updates_shape, {1, 2, 3, 4, 5, 6}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::i32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 2})); auto cval = read_vector(result_tensor); vector out{0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0}; diff --git a/ngraph/test/graph_rewrite.cpp b/ngraph/test/graph_rewrite.cpp index 5cb3f5da222ee6..5bfd26086b31ae 100644 --- a/ngraph/test/graph_rewrite.cpp +++ b/ngraph/test/graph_rewrite.cpp @@ -20,7 +20,7 @@ class TestPass : public ngraph::pass::MatcherPass : MatcherPass() { auto divide = std::make_shared( - element::f32, Shape{}, pattern::has_class()); + element::Type_t::f32, Shape{}, pattern::has_class()); ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) { if (m_transformation_callback(m.get_match_root())) { @@ -52,10 +52,10 @@ NGRAPH_RTTI_DEFINITION(Anchor, "Anchor", 0); std::shared_ptr get_function() { - auto data = - std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ngraph::element::Type_t::f32, + ngraph::Shape{3, 1, 2}); auto divide_constant = - ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5}); + ngraph::opset3::Constant::create(ngraph::element::Type_t::f32, ngraph::Shape{1}, {1.5}); auto divide = std::make_shared(data, divide_constant); return std::make_shared(ngraph::NodeVector{divide}, ngraph::ParameterVector{data}); @@ -148,10 +148,10 @@ NGRAPH_RTTI_DEFINITION(PrivateDivide, "PrivateDivide", 0, ngraph::opset3::Divide std::shared_ptr get_derived_function() { - auto data = - std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ngraph::element::Type_t::f32, + ngraph::Shape{3, 1, 2}); auto divide_constant = - ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5}); + ngraph::opset3::Constant::create(ngraph::element::Type_t::f32, ngraph::Shape{1}, {1.5}); auto divide = std::make_shared(data, divide_constant); return std::make_shared(ngraph::NodeVector{divide}, ngraph::ParameterVector{data}); @@ -177,7 +177,7 @@ class TypeBasedTestPass : public ngraph::pass::MatcherPass auto divide = std::make_shared( std::make_shared(), std::make_shared()); - // element::f32, Shape{}, pattern::has_class()); + // element::Type_t::f32, Shape{}, pattern::has_class()); ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) { if (m_transformation_callback(m.get_match_root())) { @@ -384,4 +384,4 @@ TEST(PassConfigTest, Test1) manager.run_passes(f); ASSERT_EQ(count_ops_of_type(f), 1); } -} \ No newline at end of file +} diff --git a/ngraph/test/input_output_assign.cpp b/ngraph/test/input_output_assign.cpp index 4dac79ae7a05a4..61c125bf5f85b6 100644 --- a/ngraph/test/input_output_assign.cpp +++ b/ngraph/test/input_output_assign.cpp @@ -28,7 +28,7 @@ using namespace ngraph; TEST(input_output, param_tensor) { // Params have no arguments, so we can check that the value becomes a tensor output - auto& et = element::f32; + element::Type et = element::Type_t::f32; Shape shape{2, 4}; auto param = make_shared(et, shape); @@ -39,8 +39,8 @@ TEST(input_output, param_tensor) TEST(input_output, simple_output) { - auto param_0 = make_shared(element::f32, Shape{2, 4}); - auto param_1 = make_shared(element::f32, Shape{2, 4}); + auto param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); auto add = make_shared(param_0, param_1); // Sort the ops diff --git a/ngraph/test/matcher_pass.cpp b/ngraph/test/matcher_pass.cpp index be2dadfed1d5f2..1bba9331bb0b1e 100644 --- a/ngraph/test/matcher_pass.cpp +++ b/ngraph/test/matcher_pass.cpp @@ -74,7 +74,7 @@ TEST(pattern, matcher_pass) { { TestMatcherPass test_matcher; - auto a = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); auto b = make_shared(a); auto c = make_shared(b); auto f = std::make_shared(ngraph::NodeVector{c}, ParameterVector{a}); @@ -92,7 +92,7 @@ TEST(pattern, matcher_pass) { TestMatcherPass test_matcher; - auto a = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); auto b = make_shared(a); auto c = make_shared(b); auto f = std::make_shared(ngraph::NodeVector{b, c}, ParameterVector{a}); @@ -103,7 +103,7 @@ TEST(pattern, matcher_pass) { std::shared_ptr f; { - auto a = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); auto b = make_shared(a); auto c = make_shared(b); auto d = make_shared(c); @@ -117,4 +117,4 @@ TEST(pattern, matcher_pass) // Parameter->Relu->Result ASSERT_TRUE(f->get_ops().size() == 3); } -} \ No newline at end of file +} diff --git a/ngraph/test/models/onnx/swish_with_beta.prototxt b/ngraph/test/models/onnx/swish_with_beta.prototxt new file mode 100644 index 00000000000000..170fd43ca6f8d1 --- /dev/null +++ b/ngraph/test/models/onnx/swish_with_beta.prototxt @@ -0,0 +1,57 @@ +ir_version: 7 +producer_name: "onnx-importer-test" +graph { + node { + output: "beta" + op_type: "Constant" + attribute { + name: "value" + t { + data_type: 1 + dims: 1 + float_data: 0.75 + name: "const_tensor" + } + type: TENSOR + } + } + node { + input: "X" + input: "beta" + output: "Y" + op_type: "Swish" + domain: "org.openvinotoolkit" + } + name: "test-model" + input { + name: "X" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 3 + } + } + } + } + } + output { + name: "Y" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 3 + } + } + } + } + } +} +opset_import { + domain: "" + version: 10 +} + diff --git a/ngraph/test/models/onnx/swish_without_beta.prototxt b/ngraph/test/models/onnx/swish_without_beta.prototxt new file mode 100644 index 00000000000000..23cb188dd7af25 --- /dev/null +++ b/ngraph/test/models/onnx/swish_without_beta.prototxt @@ -0,0 +1,42 @@ +ir_version: 7 +producer_name: "onnx-importer-test" +graph { + node { + input: "X" + output: "Y" + op_type: "Swish" + domain: "org.openvinotoolkit" + } + name: "test-model" + input { + name: "X" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 3 + } + } + } + } + } + output { + name: "Y" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 3 + } + } + } + } + } +} +opset_import { + domain: "" + version: 10 +} + diff --git a/ngraph/test/node_input_output.cpp b/ngraph/test/node_input_output.cpp index fdcc98d3ff5b50..4104e68166770d 100644 --- a/ngraph/test/node_input_output.cpp +++ b/ngraph/test/node_input_output.cpp @@ -30,8 +30,8 @@ using namespace std; TEST(node_input_output, input_create) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_in_0 = add->input(0); @@ -39,14 +39,14 @@ TEST(node_input_output, input_create) EXPECT_EQ(add_in_0.get_node(), add.get()); EXPECT_EQ(add_in_0.get_index(), 0); - EXPECT_EQ(add_in_0.get_element_type(), element::f32); + EXPECT_EQ(add_in_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_0.get_source_output(), Output(x, 0)); EXPECT_EQ(add_in_1.get_node(), add.get()); EXPECT_EQ(add_in_1.get_index(), 1); - EXPECT_EQ(add_in_1.get_element_type(), element::f32); + EXPECT_EQ(add_in_1.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_1.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_1.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_1.get_source_output(), Output(y, 0)); @@ -56,8 +56,8 @@ TEST(node_input_output, input_create) TEST(node_input_output, input_create_const) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_in_0 = add->input(0); @@ -65,14 +65,14 @@ TEST(node_input_output, input_create_const) EXPECT_EQ(add_in_0.get_node(), add.get()); EXPECT_EQ(add_in_0.get_index(), 0); - EXPECT_EQ(add_in_0.get_element_type(), element::f32); + EXPECT_EQ(add_in_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_0.get_source_output(), Output(x, 0)); EXPECT_EQ(add_in_1.get_node(), add.get()); EXPECT_EQ(add_in_1.get_index(), 1); - EXPECT_EQ(add_in_1.get_element_type(), element::f32); + EXPECT_EQ(add_in_1.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_1.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_1.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_1.get_source_output(), Output(y, 0)); @@ -82,15 +82,15 @@ TEST(node_input_output, input_create_const) TEST(node_input_output, output_create) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_out_0 = add->output(0); EXPECT_EQ(add_out_0.get_node(), add.get()); EXPECT_EQ(add_out_0.get_index(), 0); - EXPECT_EQ(add_out_0.get_element_type(), element::f32); + EXPECT_EQ(add_out_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_out_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_out_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); @@ -99,15 +99,15 @@ TEST(node_input_output, output_create) TEST(node_input_output, output_create_const) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_out_0 = add->output(0); EXPECT_EQ(add_out_0.get_node(), add.get()); EXPECT_EQ(add_out_0.get_index(), 0); - EXPECT_EQ(add_out_0.get_element_type(), element::f32); + EXPECT_EQ(add_out_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_out_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_out_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); diff --git a/ngraph/test/onnx/onnx_import.in.cpp b/ngraph/test/onnx/onnx_import.in.cpp index 84596b4cfb585d..2d412e58acc180 100644 --- a/ngraph/test/onnx/onnx_import.in.cpp +++ b/ngraph/test/onnx/onnx_import.in.cpp @@ -426,7 +426,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_missing_input) "TestMissingIn", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector { OutputVector ng_inputs{node.get_ng_inputs()}; std::shared_ptr result = std::make_shared( - element::f32, ngraph::Shape{2, 2}, std::vector{1, 1, 1, 1}); + element::Type_t::f32, ngraph::Shape{2, 2}, std::vector{1, 1, 1, 1}); for (const auto& ng_input : ng_inputs) { @@ -2155,6 +2155,34 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_softplus_infinity) test_case.run(); } +NGRAPH_TEST(${BACKEND_NAME}, onnx_model_swish_with_beta) +{ + auto function = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/swish_with_beta.prototxt")); + + const Shape expected_output_shape{3}; + auto test_case = test::TestCase(function); + std::vector input_data{-0.5f, 0, 0.5f}; + test_case.add_input(input_data); + test_case.add_expected_output(expected_output_shape, {-0.2036667, 0.0, 0.2963333}); + + test_case.run_with_tolerance_as_fp(2.0e-5f); +} + +NGRAPH_TEST(${BACKEND_NAME}, onnx_model_swish_without_beta) +{ + auto function = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/swish_without_beta.prototxt")); + + const Shape expected_output_shape{3}; + auto test_case = test::TestCase(function); + std::vector input_data{-0.5f, 0, 0.5f}; + test_case.add_input(input_data); + test_case.add_expected_output(expected_output_shape, {-0.18877034, 0.0, 0.31122968}); + + test_case.run_with_tolerance_as_fp(2.0e-5f); +} + NGRAPH_TEST(${BACKEND_NAME}, onnx_model_sum_opset8) { auto function = onnx_import::import_onnx_model( diff --git a/ngraph/test/onnx/onnx_import_controlflow.in.cpp b/ngraph/test/onnx/onnx_import_controlflow.in.cpp index 827c5b4d716fe0..be3168b40df96c 100644 --- a/ngraph/test/onnx/onnx_import_controlflow.in.cpp +++ b/ngraph/test/onnx/onnx_import_controlflow.in.cpp @@ -49,16 +49,16 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_add) // Shape inference tests const auto& parameters = function->get_parameters(); EXPECT_EQ(parameters.size(), 1); - EXPECT_EQ(parameters.at(0)->get_element_type(), ngraph::element::f32); + EXPECT_EQ(parameters.at(0)->get_element_type(), ngraph::element::Type_t::f32); EXPECT_TRUE(parameters.at(0)->get_partial_shape().is_static()); EXPECT_EQ(parameters.at(0)->get_partial_shape().to_shape(), (Shape{1, 2})); const auto& results = function->get_results(); EXPECT_EQ(results.size(), 2); - EXPECT_EQ(function->get_output_element_type(0), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(0), ngraph::element::Type_t::f32); EXPECT_TRUE(function->get_output_partial_shape(0).is_static()); EXPECT_EQ(function->get_output_shape(0), (Shape{1, 2})); - EXPECT_EQ(function->get_output_element_type(1), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(1), ngraph::element::Type_t::f32); EXPECT_TRUE(function->get_output_partial_shape(1).is_static()); EXPECT_EQ(function->get_output_shape(1), (Shape{3, 2})); @@ -375,10 +375,10 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_trip_count_and_cond_skippe const auto& results = function->get_results(); EXPECT_EQ(results.size(), 2); - EXPECT_EQ(function->get_output_element_type(0), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(0), ngraph::element::Type_t::f32); EXPECT_TRUE(function->get_output_partial_shape(0).is_static()); EXPECT_EQ(function->get_output_shape(0), (Shape{1, 2})); - EXPECT_EQ(function->get_output_element_type(1), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(1), ngraph::element::Type_t::f32); // scan_outputs shape is not know if trip_count and termination condition is not determined EXPECT_TRUE(function->get_output_partial_shape(1).rank().is_dynamic()); } diff --git a/ngraph/test/op.cpp b/ngraph/test/op.cpp index 96e5e8d53da9eb..380b177125d395 100644 --- a/ngraph/test/op.cpp +++ b/ngraph/test/op.cpp @@ -33,14 +33,14 @@ using namespace ngraph; TEST(op, is_op) { - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); ASSERT_NE(nullptr, arg0); EXPECT_TRUE(op::is_parameter(arg0)); } TEST(op, is_parameter) { - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); ASSERT_NE(nullptr, arg0); auto t0 = make_shared(arg0, arg0); ASSERT_NE(nullptr, t0); @@ -49,7 +49,7 @@ TEST(op, is_parameter) TEST(op, provenance_tag) { - auto node = make_shared(element::f32, Shape{1}); + auto node = make_shared(element::Type_t::f32, Shape{1}); auto tag1 = "parameter node"; auto tag2 = "f32 node"; node->add_provenance_tag(tag1); @@ -104,7 +104,7 @@ TEST(op, variant) EXPECT_EQ(ship.x, 3); EXPECT_EQ(ship.y, 4); - auto node = make_shared(element::f32, Shape{1}); + auto node = make_shared(element::Type_t::f32, Shape{1}); node->get_rt_info()["A"] = var_ship; auto node_var_ship = node->get_rt_info().at("A"); ASSERT_TRUE((is_type>(node_var_ship))); diff --git a/ngraph/test/op_eval/floor_mod.cpp b/ngraph/test/op_eval/floor_mod.cpp index 2b2ad9a57df05e..8d1c3c765f95fd 100644 --- a/ngraph/test/op_eval/floor_mod.cpp +++ b/ngraph/test/op_eval/floor_mod.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, floor_mod) { - auto a = make_shared(element::f32, Shape{4}); - auto b = make_shared(element::f32, Shape{4}); + auto a = make_shared(element::Type_t::f32, Shape{4}); + auto b = make_shared(element::Type_t::f32, Shape{4}); auto floor_mod = make_shared(a, b); auto fun = make_shared(OutputVector{floor_mod}, ParameterVector{a, b}); @@ -43,7 +43,7 @@ TEST(op_eval, floor_mod) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{4}, a_value), make_host_tensor(Shape{4}, b_value)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{4}); auto result_data = read_vector(result); for (size_t i = 0; i < expected_result.size(); i++) diff --git a/ngraph/test/op_eval/hsigmoid.cpp b/ngraph/test/op_eval/hsigmoid.cpp index 58e67e8baa35f8..17763841d131c6 100644 --- a/ngraph/test/op_eval/hsigmoid.cpp +++ b/ngraph/test/op_eval/hsigmoid.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, hsigmoid) { - auto p = make_shared(element::f32, Shape{3}); + auto p = make_shared(element::Type_t::f32, Shape{3}); auto swish = make_shared(p); auto fun = make_shared(OutputVector{swish}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, hsigmoid) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/hswish.cpp b/ngraph/test/op_eval/hswish.cpp index 1de6087f5f4e52..3091b208a25ae9 100644 --- a/ngraph/test/op_eval/hswish.cpp +++ b/ngraph/test/op_eval/hswish.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, hswish) { - auto p = make_shared(element::f32, Shape{3}); + auto p = make_shared(element::Type_t::f32, Shape{3}); auto swish = make_shared(p); auto fun = make_shared(OutputVector{swish}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, hswish) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/interpolate.cpp b/ngraph/test/op_eval/interpolate.cpp index 239bcacbaaa5a3..aaa637341f8aaa 100644 --- a/ngraph/test/op_eval/interpolate.cpp +++ b/ngraph/test/op_eval/interpolate.cpp @@ -165,11 +165,11 @@ TEST(op_eval, interpolate_v4_cubic) std::size_t i = 0; for (const auto& s : shapes_and_attrs) { - auto image = std::make_shared(element::f32, data_shape); + auto image = std::make_shared(element::Type_t::f32, data_shape); auto target_spatial_shape = - op::Constant::create(element::i64, Shape{2}, s.spatial_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, s.scales_data); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + op::Constant::create(element::Type_t::i64, Shape{2}, s.spatial_shape); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, s.scales_data); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::cubic; @@ -187,7 +187,7 @@ TEST(op_eval, interpolate_v4_cubic) auto result = std::make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(data_shape, input_data)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), s.out_shape); auto result_vector = read_vector(result); std::size_t num_of_elems = shape_size(s.out_shape); @@ -377,11 +377,11 @@ TEST(op_eval, interpolate_v4_nearest) std::size_t i = 0; for (const auto& s : shapes_and_attrs) { - auto image = std::make_shared(element::f32, s.input_data_shape); + auto image = std::make_shared(element::Type_t::f32, s.input_data_shape); auto target_spatial_shape = - op::Constant::create(element::i64, Shape{2}, s.spatial_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, s.scales_data); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + op::Constant::create(element::Type_t::i64, Shape{2}, s.spatial_shape); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, s.scales_data); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -400,7 +400,7 @@ TEST(op_eval, interpolate_v4_nearest) ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(s.input_data_shape, input_data_list[i])})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), s.out_shape); auto result_vector = read_vector(result); std::size_t num_of_elems = shape_size(s.out_shape); @@ -523,11 +523,11 @@ TEST(op_eval, interpolate_v4_linear_onnx) std::size_t i = 0; for (const auto& s : shapes_and_attrs) { - auto image = std::make_shared(element::f32, s.input_data_shape); + auto image = std::make_shared(element::Type_t::f32, s.input_data_shape); auto target_spatial_shape = - op::Constant::create(element::i64, Shape{2}, s.spatial_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, s.scales_data); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + op::Constant::create(element::Type_t::i64, Shape{2}, s.spatial_shape); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, s.scales_data); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::linear_onnx; @@ -546,7 +546,7 @@ TEST(op_eval, interpolate_v4_linear_onnx) ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(s.input_data_shape, input_data_list[i])})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), s.out_shape); auto result_vector = read_vector(result); std::size_t num_of_elems = shape_size(s.out_shape); diff --git a/ngraph/test/op_eval/matmul.cpp b/ngraph/test/op_eval/matmul.cpp index b74c02a8299be2..265fdc96cd2b4e 100644 --- a/ngraph/test/op_eval/matmul.cpp +++ b/ngraph/test/op_eval/matmul.cpp @@ -28,8 +28,8 @@ using namespace ngraph; TEST(op_eval, matmul_dynamic_1D_arg) { - auto arg0 = make_shared(element::i32, PartialShape::dynamic()); - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -59,8 +59,8 @@ TEST(op_eval, matmul_dynamic_1D_arg) TEST(op_eval, matmul_dynamic_0_elem_arg) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -85,8 +85,8 @@ TEST(op_eval, matmul_dynamic_0_elem_arg) TEST(op_eval, matmul_dynamic_2D_args) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -107,8 +107,8 @@ TEST(op_eval, matmul_dynamic_2D_args) TEST(op_eval, matmul_dynamic_2D_transpose0) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, true, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -128,8 +128,8 @@ TEST(op_eval, matmul_dynamic_2D_transpose0) TEST(op_eval, matmul_dynamic_2D_transpose1) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, true); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -149,8 +149,8 @@ TEST(op_eval, matmul_dynamic_2D_transpose1) TEST(op_eval, matmul_dynamic_same_batch_size) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -177,8 +177,8 @@ TEST(op_eval, matmul_dynamic_same_batch_size) TEST(op_eval, matmul_dynamic_broadcast) { - auto arg0 = make_shared(element::i64, PartialShape::dynamic()); - auto arg1 = make_shared(element::i64, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -229,8 +229,8 @@ TEST(op_eval, matmul_dynamic_broadcast) TEST(op_eval, matmul_dynamic_broadcast_transpose0) { - auto arg0 = make_shared(element::i64, PartialShape::dynamic()); - auto arg1 = make_shared(element::i64, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, true, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -265,8 +265,8 @@ TEST(op_eval, matmul_dynamic_broadcast_transpose0) TEST(op_eval, matmul_dynamic_broadcast_transpose1) { - auto arg0 = make_shared(element::i64, PartialShape::dynamic()); - auto arg1 = make_shared(element::i64, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, true); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); diff --git a/ngraph/test/op_eval/mish.cpp b/ngraph/test/op_eval/mish.cpp index acc81f0e95f17d..2fb4251d155574 100644 --- a/ngraph/test/op_eval/mish.cpp +++ b/ngraph/test/op_eval/mish.cpp @@ -31,7 +31,7 @@ using namespace ngraph; TEST(op_eval, mish_0D) { - auto p = make_shared(element::f32, Shape{}); + auto p = make_shared(element::Type_t::f32, Shape{}); auto mish = make_shared(p); auto fun = make_shared(OutputVector{mish}, ParameterVector{p}); @@ -43,7 +43,7 @@ TEST(op_eval, mish_0D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), (Shape{})); auto result_data = read_vector(result); EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001); diff --git a/ngraph/test/op_eval/non_zero.cpp b/ngraph/test/op_eval/non_zero.cpp index 52f6aa6b607f4e..1d56e80bac002b 100644 --- a/ngraph/test/op_eval/non_zero.cpp +++ b/ngraph/test/op_eval/non_zero.cpp @@ -31,8 +31,8 @@ using namespace ngraph; TEST(op_eval, non_zero_0D) { - auto p = make_shared(element::i32, Shape{}); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, Shape{}); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{{-1}, {1}, {20}}; @@ -43,7 +43,7 @@ TEST(op_eval, non_zero_0D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{1, 1})); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -52,13 +52,13 @@ TEST(op_eval, non_zero_0D) TEST(op_eval, non_zero_0D_0) { - auto p = make_shared(element::i32, Shape{}); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, Shape{}); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{0, 0})); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); @@ -67,8 +67,8 @@ TEST(op_eval, non_zero_0D_0) TEST(op_eval, non_zero_1D) { Shape p_shape{5}; - auto p = make_shared(element::f32, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::f32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{ {1.0, 0, 3.0, 4.0, 0}, {0, 0, 0, 1.0, 3.2}, {1.0, 1.0, 1.0, 1.0, 1.0}}; @@ -79,7 +79,7 @@ TEST(op_eval, non_zero_1D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(p_shape, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -89,14 +89,14 @@ TEST(op_eval, non_zero_1D) TEST(op_eval, non_zero_1D_0s) { Shape p_shape{5}; - auto p = make_shared(element::f32, p_shape); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::f32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector input(shape_size(p_shape), 0); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(p_shape, input)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{1, 0})); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); @@ -105,7 +105,7 @@ TEST(op_eval, non_zero_1D_0s) TEST(op_eval, non_zero_2D) { Shape p_shape{3, 2}; - auto p = make_shared(element::i32, p_shape); + auto p = make_shared(element::Type_t::i32, p_shape); auto non_zero = make_shared(p); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{ @@ -118,7 +118,7 @@ TEST(op_eval, non_zero_2D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(p_shape, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -128,8 +128,8 @@ TEST(op_eval, non_zero_2D) TEST(op_eval, non_zero_3D) { Shape p_shape{3, 2, 2}; - auto p = make_shared(element::i64, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::i64, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{{1, 0, 3, 4, 0, 1, 0, 0, 1, 3, 5, 0}, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}; @@ -143,7 +143,7 @@ TEST(op_eval, non_zero_3D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(p_shape, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -153,14 +153,14 @@ TEST(op_eval, non_zero_3D) TEST(op_eval, non_zero_3D_0s) { Shape p_shape{3, 2, 2}; - auto p = make_shared(element::i64, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::i64, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector input(shape_size(p_shape), 0); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(p_shape, input)})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), (Shape{p_shape.size(), 0})); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); @@ -169,7 +169,7 @@ TEST(op_eval, non_zero_3D_0s) TEST(op_eval, non_zero_dynamic) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::i32, p_shape); + auto p = make_shared(element::Type_t::i32, p_shape); auto non_zero = make_shared(p); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{ @@ -182,7 +182,7 @@ TEST(op_eval, non_zero_dynamic) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(input_shapes[i], inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); diff --git a/ngraph/test/op_eval/reduce_l1.cpp b/ngraph/test/op_eval/reduce_l1.cpp index 544b31fc447697..ed49497178346f 100644 --- a/ngraph/test/op_eval/reduce_l1.cpp +++ b/ngraph/test/op_eval/reduce_l1.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, reduce_l1_one_axis_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, true); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -42,7 +42,7 @@ TEST(op_eval, reduce_l1_one_axis_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2, 1})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) @@ -51,8 +51,8 @@ TEST(op_eval, reduce_l1_one_axis_keep_dims) TEST(op_eval, reduce_l1_one_axis_do_not_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, false); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -63,7 +63,7 @@ TEST(op_eval, reduce_l1_one_axis_do_not_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) diff --git a/ngraph/test/op_eval/reduce_l2.cpp b/ngraph/test/op_eval/reduce_l2.cpp index d79bf067b7dc78..d718e1a36854d7 100644 --- a/ngraph/test/op_eval/reduce_l2.cpp +++ b/ngraph/test/op_eval/reduce_l2.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, reduce_l2_one_axis_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, true); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -43,7 +43,7 @@ TEST(op_eval, reduce_l2_one_axis_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2, 1})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) @@ -52,8 +52,8 @@ TEST(op_eval, reduce_l2_one_axis_keep_dims) TEST(op_eval, reduce_l2_one_axis_do_not_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, false); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -65,7 +65,7 @@ TEST(op_eval, reduce_l2_one_axis_do_not_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) diff --git a/ngraph/test/op_eval/roi_align.cpp b/ngraph/test/op_eval/roi_align.cpp index 3e43e1f810ddd9..9e556671a4031f 100644 --- a/ngraph/test/op_eval/roi_align.cpp +++ b/ngraph/test/op_eval/roi_align.cpp @@ -42,9 +42,9 @@ TEST(op_eval, roi_align_avg_pool) const auto data_shape = Shape{N, C, H, W}; const auto rois_shape = Shape{num_rois, 4}; - const auto data = make_shared(element::f32, data_shape); - const auto rois = make_shared(element::f32, rois_shape); - const auto batch_indices = make_shared(element::i32, Shape{num_rois}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{num_rois}); auto roi_align = make_shared( data, rois, batch_indices, pooled_height, pooled_width, 2, 1.0f / 16.0f, "avg"); @@ -93,7 +93,7 @@ TEST(op_eval, roi_align_avg_pool) 56.8021f, 58.4375f, 58.4375f, 58.4375f, 58.4688f, 60.1042f, 60.1042f, 60.1042f, 60.1354f}; const auto expected_shape = Shape{num_rois, C, pooled_height, pooled_width}; - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), expected_shape); ASSERT_TRUE(test::all_close_f(read_vector(result), expected_vec, 6, 0.001)); } @@ -109,9 +109,9 @@ TEST(op_eval, roi_align_max_pool) const auto data_shape = Shape{N, C, H, W}; const auto rois_shape = Shape{num_rois, 4}; - const auto data = make_shared(element::f32, data_shape); - const auto rois = make_shared(element::f32, rois_shape); - const auto batch_indices = make_shared(element::i32, Shape{num_rois}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{num_rois}); auto roi_align = make_shared( data, rois, batch_indices, pooled_height, pooled_width, 2, 1.0f / 16.0f, "max"); @@ -160,7 +160,7 @@ TEST(op_eval, roi_align_max_pool) 40.1042f, 46.25f, 46.25f, 46.25f, 46.25f, 56.25f, 56.25f, 56.25f, 56.25f}; const auto expected_shape = Shape{num_rois, C, pooled_height, pooled_width}; - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), expected_shape); ASSERT_TRUE(test::all_close_f(read_vector(result), expected_vec, 6, 0.001)); -} \ No newline at end of file +} diff --git a/ngraph/test/op_eval/roi_pooling.cpp b/ngraph/test/op_eval/roi_pooling.cpp new file mode 100644 index 00000000000000..00ed56cc87649a --- /dev/null +++ b/ngraph/test/op_eval/roi_pooling.cpp @@ -0,0 +1,64 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/engine/interpreter_engine.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; + +NGRAPH_TEST(op_eval, roi_pooling_invalid_roi_batch_id) +{ + const int H = 6; + const int W = 6; + const int image_size = H * W; + const int channels = 1; + const int num_rois = 1; + + const int pooled_h = 1; + const int pooled_w = 1; + const float spatial_scale = 1.f; + + Shape feat_maps_shape{1, channels, H, W}; + Shape rois_shape{num_rois, 5}; + Shape pooled_shape{pooled_h, pooled_w}; + Shape output_shape{num_rois, channels, pooled_h, pooled_w}; + + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto roi_pooling = + make_shared(feat_maps, rois, pooled_shape, spatial_scale, "max"); + const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); + + vector feat_maps_vect; + for (unsigned int i = 0; i < channels * image_size; i++) + { + feat_maps_vect.push_back(1.f * i / 10); + } + + auto test_case = test::TestCase(f); + test_case.add_input(feat_maps_shape, feat_maps_vect); + // ROI with invalid batch id, should throw exception + test_case.add_input(rois_shape, {-1, 1, 1, 2, 3}); + test_case.add_expected_output(output_shape, {2.0f}); + ASSERT_THROW(test_case.run(), ngraph::CheckFailure); +} diff --git a/ngraph/test/op_eval/round.cpp b/ngraph/test/op_eval/round.cpp index e9807aa8047ed5..a933b74ce3e6a1 100644 --- a/ngraph/test/op_eval/round.cpp +++ b/ngraph/test/op_eval/round.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, rounding_to_even) { - auto p = make_shared(element::f32, Shape{9}); + auto p = make_shared(element::Type_t::f32, Shape{9}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, rounding_to_even) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{9}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{9}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) @@ -49,7 +49,7 @@ TEST(op_eval, rounding_to_even) TEST(op_eval, rounding_away) { - auto p = make_shared(element::f32, Shape{9}); + auto p = make_shared(element::Type_t::f32, Shape{9}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); @@ -59,7 +59,7 @@ TEST(op_eval, rounding_away) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{9}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{9}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/softplus.cpp b/ngraph/test/op_eval/softplus.cpp index 5404e741056fbc..3e8ec24c724a9e 100644 --- a/ngraph/test/op_eval/softplus.cpp +++ b/ngraph/test/op_eval/softplus.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, softplus_4D) { - auto p = make_shared(element::f32, Shape{4}); + auto p = make_shared(element::Type_t::f32, Shape{4}); auto softplus = make_shared(p); auto fun = make_shared(OutputVector{softplus}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, softplus_4D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{4}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{4}); auto result_data = read_vector(result); for (size_t i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/split.cpp b/ngraph/test/op_eval/split.cpp index 7f806303cc6d25..0e538f76f34cd8 100644 --- a/ngraph/test/op_eval/split.cpp +++ b/ngraph/test/op_eval/split.cpp @@ -32,8 +32,8 @@ using namespace ngraph; TEST(op_eval, split) { const auto data_shape = Shape{3, 8, 3}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 4; auto split = make_shared(data, axis, num_splits); @@ -61,7 +61,7 @@ TEST(op_eval, split) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{3, 2, 3})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -70,8 +70,8 @@ TEST(op_eval, split) TEST(op_eval, split_neg_axis) { const auto data_shape = Shape{2, 1, 4, 1}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 4; auto split = make_shared(data, axis, num_splits); @@ -95,7 +95,7 @@ TEST(op_eval, split_neg_axis) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, 1, 1})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -104,8 +104,8 @@ TEST(op_eval, split_neg_axis) TEST(op_eval, split_boolean_type) { const auto data_shape = Shape{2, 1, 2, 1, 2}; - const auto data = make_shared(element::boolean, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::boolean, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 2; auto split = make_shared(data, axis, num_splits); @@ -129,7 +129,7 @@ TEST(op_eval, split_boolean_type) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::boolean); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::boolean); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, 1, 1, 2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -138,8 +138,8 @@ TEST(op_eval, split_boolean_type) TEST(op_eval, split_1d) { const auto data_shape = Shape{8}; - const auto data = make_shared(element::f32, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 4; auto split = make_shared(data, axis, num_splits); @@ -164,7 +164,7 @@ TEST(op_eval, split_1d) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::f32); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::f32); EXPECT_EQ(results[i]->get_shape(), (Shape{2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } diff --git a/ngraph/test/op_eval/strided_slice.cpp b/ngraph/test/op_eval/strided_slice.cpp index f9229a1dbaacc6..4d9f7c00c51ad6 100644 --- a/ngraph/test/op_eval/strided_slice.cpp +++ b/ngraph/test/op_eval/strided_slice.cpp @@ -32,10 +32,10 @@ using namespace ngraph; TEST(op_eval, strided_slice1) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); auto r = make_shared(A, begin, end, @@ -66,7 +66,7 @@ TEST(op_eval, strided_slice1) make_host_tensor(Shape{3}, begin_vecs[i]), make_host_tensor(Shape{3}, end_vecs[i]), make_host_tensor(Shape{3}, strides_vecs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape[i]); EXPECT_EQ(read_vector(result), expected_results[i]); } @@ -89,10 +89,10 @@ TEST(op_eval, strided_slice1) TEST(op_eval, strided_slice2) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); std::vector begin_vec{1, 0, 0}; std::vector end_vec{0, 0, 0}; @@ -123,7 +123,7 @@ TEST(op_eval, strided_slice2) make_host_tensor(Shape{3}, begin_vec), make_host_tensor(Shape{3}, end_vec), make_host_tensor(Shape{3}, strides_vec)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape); EXPECT_EQ(read_vector(result), expected); } @@ -136,10 +136,10 @@ TEST(op_eval, strided_slice2) TEST(op_eval, strided_slice3) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); std::vector begin_vec{0, 1, 0}; std::vector end_vec{2, 0, 0}; @@ -170,7 +170,7 @@ TEST(op_eval, strided_slice3) make_host_tensor(Shape{3}, begin_vec), make_host_tensor(Shape{3}, end_vec), make_host_tensor(Shape{3}, strides_vec)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape); EXPECT_EQ(read_vector(result), expected); } @@ -183,10 +183,10 @@ TEST(op_eval, strided_slice3) TEST(op_eval, strided_slice_reverse) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); std::vector begin_vec{0, 0, 0}; std::vector end_vec{1, 0, 0}; @@ -217,7 +217,7 @@ TEST(op_eval, strided_slice_reverse) make_host_tensor(Shape{3}, begin_vec), make_host_tensor(Shape{3}, end_vec), make_host_tensor(Shape{3}, strides_vec)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape); EXPECT_EQ(read_vector(result), expected); } diff --git a/ngraph/test/op_eval/swish.cpp b/ngraph/test/op_eval/swish.cpp index 26997dfc0fb5b7..9d57235b5884a3 100644 --- a/ngraph/test/op_eval/swish.cpp +++ b/ngraph/test/op_eval/swish.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, swish_with_beta1) { - auto p = make_shared(element::f32, Shape{3}); - auto beta = make_shared(element::f32, Shape{}); + auto p = make_shared(element::Type_t::f32, Shape{3}); + auto beta = make_shared(element::Type_t::f32, Shape{}); auto swish = make_shared(p, beta); auto fun = make_shared(OutputVector{swish}, ParameterVector{p, beta}); @@ -42,7 +42,7 @@ TEST(op_eval, swish_with_beta1) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs), make_host_tensor(Shape{}, {1.0})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) @@ -51,8 +51,8 @@ TEST(op_eval, swish_with_beta1) TEST(op_eval, swish_with_beta0_75) { - auto p = make_shared(element::f32, Shape{3}); - auto beta = make_shared(element::f32, Shape{}); + auto p = make_shared(element::Type_t::f32, Shape{3}); + auto beta = make_shared(element::Type_t::f32, Shape{}); auto swish = make_shared(p, beta); auto fun = make_shared(OutputVector{swish}, ParameterVector{p, beta}); @@ -63,7 +63,7 @@ TEST(op_eval, swish_with_beta0_75) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs), make_host_tensor(Shape{}, {0.75})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) @@ -72,7 +72,7 @@ TEST(op_eval, swish_with_beta0_75) TEST(op_eval, swish_without_beta) { - auto p = make_shared(element::f32, Shape{3}); + auto p = make_shared(element::Type_t::f32, Shape{3}); auto swish = make_shared(p); auto fun = make_shared(OutputVector{swish}, ParameterVector{p}); @@ -82,7 +82,7 @@ TEST(op_eval, swish_without_beta) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/variadic_split.cpp b/ngraph/test/op_eval/variadic_split.cpp index ff8942dbd231fe..40b8ec9ad8871a 100644 --- a/ngraph/test/op_eval/variadic_split.cpp +++ b/ngraph/test/op_eval/variadic_split.cpp @@ -32,9 +32,9 @@ using namespace ngraph; TEST(op_eval, variadic_split_same_lengths) { const auto data_shape = Shape{3, 8, 3}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{4}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{4}); auto var_split = make_shared(data, axis, split_lengths); @@ -62,7 +62,7 @@ TEST(op_eval, variadic_split_same_lengths) for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{3, static_cast(split_lengths_vec[i]), 3})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); @@ -72,9 +72,9 @@ TEST(op_eval, variadic_split_same_lengths) TEST(op_eval, variadic_split_different_lengths) { const auto data_shape = Shape{6, 2, 3}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -101,7 +101,7 @@ TEST(op_eval, variadic_split_different_lengths) for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{static_cast(split_lengths_vec[i]), 2, 3})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); @@ -111,9 +111,9 @@ TEST(op_eval, variadic_split_different_lengths) TEST(op_eval, variadic_split_neg_length) { const auto data_shape = Shape{2, 7, 1}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -139,7 +139,7 @@ TEST(op_eval, variadic_split_neg_length) const vector expected_lengths{3, 1, 3}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, expected_lengths[i], 1})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -148,9 +148,9 @@ TEST(op_eval, variadic_split_neg_length) TEST(op_eval, variadic_split_neg_length_neg_axis) { const auto data_shape = Shape{2, 1, 5, 2}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -176,7 +176,7 @@ TEST(op_eval, variadic_split_neg_length_neg_axis) const vector expected_lengths{1, 2, 2}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i], 2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -185,9 +185,9 @@ TEST(op_eval, variadic_split_neg_length_neg_axis) TEST(op_eval, variadic_split_neg_length_bool_data_type) { const auto data_shape = Shape{2, 1, 5}; - const auto data = make_shared(element::boolean, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::boolean, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -212,7 +212,7 @@ TEST(op_eval, variadic_split_neg_length_bool_data_type) const vector expected_lengths{1, 2, 2}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::boolean); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::boolean); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i]})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -221,9 +221,9 @@ TEST(op_eval, variadic_split_neg_length_bool_data_type) TEST(op_eval, variadic_split_neg_length_axis_ui64) { const auto data_shape = Shape{2, 1, 4, 2}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::u64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{2}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::u64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{2}); auto var_split = make_shared(data, axis, split_lengths); @@ -250,7 +250,7 @@ TEST(op_eval, variadic_split_neg_length_axis_ui64) const vector expected_lengths{2, 2}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i], 2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -259,9 +259,9 @@ TEST(op_eval, variadic_split_neg_length_axis_ui64) TEST(op_eval, variadic_split_data_float_length_i32) { const auto data_shape = Shape{2, 3, 3}; - const auto data = make_shared(element::f32, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i32, Shape{3}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i32, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -288,7 +288,7 @@ TEST(op_eval, variadic_split_data_float_length_i32) const vector expected_lengths{1, 1, 1}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::f32); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::f32); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 3, expected_lengths[i]})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } diff --git a/ngraph/test/partial_shape.cpp b/ngraph/test/partial_shape.cpp index 390f7a35201625..e57ab91aacdf50 100644 --- a/ngraph/test/partial_shape.cpp +++ b/ngraph/test/partial_shape.cpp @@ -218,7 +218,7 @@ TEST(partial_shape, to_shape_rank_dynamic) TEST(partial_shape, tensor_descriptor_from_shape) { - descriptor::Tensor t{element::i32, Shape{1, 2, 3}, "Ankeny"}; + descriptor::Tensor t{element::Type_t::i32, Shape{1, 2, 3}, "Ankeny"}; ASSERT_EQ(t.get_shape(), (Shape{1, 2, 3})); ASSERT_EQ(t.get_partial_shape().rank().get_length(), 3); @@ -227,7 +227,7 @@ TEST(partial_shape, tensor_descriptor_from_shape) TEST(partial_shape, tensor_descriptor_from_static_partial_shape) { - descriptor::Tensor t{element::i32, PartialShape{1, 2, 3}, "Burnside"}; + descriptor::Tensor t{element::Type_t::i32, PartialShape{1, 2, 3}, "Burnside"}; ASSERT_EQ(t.get_shape(), (Shape{1, 2, 3})); ASSERT_EQ(t.get_partial_shape().rank().get_length(), 3); @@ -236,7 +236,7 @@ TEST(partial_shape, tensor_descriptor_from_static_partial_shape) TEST(partial_shape, tensor_descriptor_from_rank_static_dynamic_partial_shape) { - descriptor::Tensor t{element::i32, PartialShape{1, Dimension::dynamic(), 3}, "Couch"}; + descriptor::Tensor t{element::Type_t::i32, PartialShape{1, Dimension::dynamic(), 3}, "Couch"}; ASSERT_EQ(t.get_partial_shape().rank().get_length(), 3); ASSERT_THROW({ t.get_shape(); }, std::invalid_argument); @@ -245,7 +245,7 @@ TEST(partial_shape, tensor_descriptor_from_rank_static_dynamic_partial_shape) TEST(partial_shape, tensor_descriptor_from_rank_dynamic_partial_shape) { - descriptor::Tensor t{element::i32, PartialShape::dynamic(), "Davis"}; + descriptor::Tensor t{element::Type_t::i32, PartialShape::dynamic(), "Davis"}; ASSERT_TRUE(t.get_partial_shape().rank().is_dynamic()); ASSERT_THROW({ t.get_shape(); }, std::invalid_argument); @@ -877,7 +877,7 @@ TEST(partial_shape, changed_dimension_by_reference) TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -904,7 +904,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_ok) TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_data_dilation) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 0, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -931,7 +931,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_data TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_window_dilation) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -958,7 +958,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_wind TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_window_strides) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -985,7 +985,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_wind TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), 2, 3, Dimension::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1012,7 +1012,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_ok TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_zero_data_post_padding) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), 2, 3, Dimension::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, -1, 0, 0}; @@ -1039,7 +1039,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_neg_padding_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), 4, 3, Dimension::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, -1, 0, 0}; @@ -1064,7 +1064,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_ne TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1090,7 +1090,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_ok TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_dim_zero) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1119,7 +1119,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_wi TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_dilated_dim_zero) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1148,7 +1148,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_all_in_padding_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 3, 0}; @@ -1175,7 +1175,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_all_in_padding_not_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 3, 0}; @@ -1204,7 +1204,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_dilated_window_not_all_in_padding) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 3, 0}; @@ -1230,7 +1230,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1258,7 +1258,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dyn TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_with_padding_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 2, 0}; @@ -1286,7 +1286,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_with_padding_and_stride_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 2, 0}; @@ -1313,7 +1313,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_window_too_big) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1342,7 +1342,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dyn TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_window_not_too_big_padding) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 5, 0}; @@ -1370,7 +1370,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_window_dilated_too_big) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 5, 0}; diff --git a/ngraph/test/pass_config.cpp b/ngraph/test/pass_config.cpp index f350c4a5658389..264d5e90a71635 100644 --- a/ngraph/test/pass_config.cpp +++ b/ngraph/test/pass_config.cpp @@ -90,8 +90,8 @@ NGRAPH_RTTI_DEFINITION(TestGraphRewritePass, "TestGraphRewritePass", 0); std::tuple, std::shared_ptr, std::shared_ptr> get_test_function() { - auto data = - std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ngraph::element::Type_t::f32, + ngraph::Shape{3, 1, 2}); auto relu = std::make_shared(data); relu->set_friendly_name("relu"); auto sigmoid = std::make_shared(relu); @@ -378,4 +378,4 @@ TEST(PassConfig, EnableDisablePasses11) ASSERT_EQ(relu->get_friendly_name(), "renamed"); ASSERT_EQ(sigmoid->get_friendly_name(), "renamed"); -} \ No newline at end of file +} diff --git a/ngraph/test/pass_liveness.cpp b/ngraph/test/pass_liveness.cpp index 63ef1126582d9e..89433c2e12e472 100644 --- a/ngraph/test/pass_liveness.cpp +++ b/ngraph/test/pass_liveness.cpp @@ -36,7 +36,7 @@ namespace ng = ngraph; TEST(liveness, constant) { Shape shape{1}; - auto c = op::Constant::create(element::i32, shape, {5}); + auto c = op::Constant::create(element::Type_t::i32, shape, {5}); auto f = make_shared(make_shared(c), ParameterVector{}); pass::Manager pass_manager; diff --git a/ngraph/test/pass_shape_relevance.cpp b/ngraph/test/pass_shape_relevance.cpp index 18470dc05110e1..18be6e268a3d2c 100644 --- a/ngraph/test/pass_shape_relevance.cpp +++ b/ngraph/test/pass_shape_relevance.cpp @@ -32,8 +32,8 @@ using namespace std; TEST(shape_relevance, simple) { - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); + auto param1 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared(param0, param1); auto f = make_shared(x, ParameterVector{param0, param1}); @@ -48,8 +48,8 @@ TEST(shape_relevance, simple) TEST(shape_relevance, param_direct) { - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::i64, Shape{4}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); + auto param1 = make_shared(element::Type_t::i64, Shape{4}); auto x = make_shared(param0, param1, true); auto f = make_shared(x, ParameterVector{param0, param1}); @@ -64,9 +64,9 @@ TEST(shape_relevance, param_direct) TEST(shape_relevance, param_indirect) { - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::i64, Shape{4}); - auto param2 = make_shared(element::i64, Shape{2}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); + auto param1 = make_shared(element::Type_t::i64, Shape{4}); + auto param2 = make_shared(element::Type_t::i64, Shape{2}); auto c = make_shared(NodeVector{param1, param2}, 0); auto x = make_shared(param0, c, true); @@ -84,7 +84,7 @@ TEST(shape_relevance, param_indirect) TEST(shape_relevance, param_shape_of_direct_v0) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared(param0, make_shared(param0), true); @@ -99,7 +99,7 @@ TEST(shape_relevance, param_shape_of_direct_v0) TEST(shape_relevance, param_shape_of_direct_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared(param0, make_shared(param0), true); @@ -114,10 +114,10 @@ TEST(shape_relevance, param_shape_of_direct_v3) TEST(shape_relevance, param_shape_of_direct_i32_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared( - param0, make_shared(param0, element::i32), true); + param0, make_shared(param0, element::Type_t::i32), true); auto f = make_shared(x, ParameterVector{param0}); @@ -130,11 +130,11 @@ TEST(shape_relevance, param_shape_of_direct_i32_v3) TEST(shape_relevance, param_shape_of_indirect_v0) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto s = make_shared(param0); auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + s, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); auto x = make_shared(param0, r, true); auto f = make_shared(x, ParameterVector{param0}); @@ -148,11 +148,11 @@ TEST(shape_relevance, param_shape_of_indirect_v0) TEST(shape_relevance, param_shape_of_indirect_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto s = make_shared(param0); auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + s, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); auto x = make_shared(param0, r, true); auto f = make_shared(x, ParameterVector{param0}); @@ -166,11 +166,11 @@ TEST(shape_relevance, param_shape_of_indirect_v3) TEST(shape_relevance, param_shape_of_indirect_i32_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); - auto s = make_shared(param0, element::i32); + auto s = make_shared(param0, element::Type_t::i32); auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + s, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); auto x = make_shared(param0, r, true); auto f = make_shared(x, ParameterVector{param0}); diff --git a/ngraph/test/pattern.cpp b/ngraph/test/pattern.cpp index 97c2e63cbcbea4..0ee8871b3b283d 100644 --- a/ngraph/test/pattern.cpp +++ b/ngraph/test/pattern.cpp @@ -52,20 +52,20 @@ using namespace std; static std::shared_ptr construct_constant_node(int n) { - return op::Constant::create(element::i32, Shape{}, {n}); + return op::Constant::create(element::Type_t::i32, Shape{}, {n}); } static std::shared_ptr construct_variance_graph() { // construct varaiance - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto input = std::make_shared(element::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); auto input_sq = std::make_shared(input, input); - auto sum_input = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto sum_input = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto square_sumed_input = std::make_shared(sum_input, sum_input); - auto sum_squared_input = - std::make_shared(input_sq, op::Constant::create(element::i64, {1}, {0})); + auto sum_squared_input = std::make_shared( + input_sq, op::Constant::create(element::Type_t::i64, {1}, {0})); auto avg_input_sum_sq = std::make_shared(square_sumed_input, N); auto xmu = std::make_shared(sum_squared_input, avg_input_sum_sq); auto variance = std::make_shared(xmu, N); @@ -78,10 +78,10 @@ static std::shared_ptr construct_variance_graph() static std::shared_ptr construct_mean_graph() { // construct mean; - auto input = std::make_shared(element::f32, Shape{2, 3}); - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto sum_input1 = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto sum_input1 = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto mean = std::make_shared(sum_input1, N); auto mean_label = std::make_shared(mean, nullptr, NodeVector{mean}); return mean_label; @@ -212,9 +212,9 @@ TEST(pattern, graph_rewrite) pass_manager.register_pass(); { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); - auto c = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); + auto c = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto graph_a = a + iconst0; auto graph_b = b + iconst0; @@ -231,8 +231,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto sum = (a + iconst0); auto graph = b + sum; @@ -247,8 +247,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst1 = construct_constant_node(1); auto mul = (a * iconst1); auto graph = b + mul; @@ -263,8 +263,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst1 = construct_constant_node(1); auto graph = ((((a * iconst1) * iconst1) * iconst1) * iconst1) + b; run_passes(pass_manager, graph, {a, b}); @@ -275,8 +275,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto iconst1 = construct_constant_node(1); auto graph = b + (iconst0 + ((a + iconst0) * iconst1)); @@ -288,8 +288,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst1 = construct_constant_node(1); auto graph = b + (iconst1 * (iconst1 * (iconst1 * (iconst1 * a)))); run_passes(pass_manager, graph, {a, b}); @@ -303,7 +303,7 @@ TEST(pattern, graph_rewrite) TEST(pattern, matcher) { Shape shape{}; - auto a = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); TestMatcher n; ASSERT_TRUE(n.match(a, a)); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{a})); @@ -327,7 +327,7 @@ TEST(pattern, matcher) ASSERT_FALSE(n.match(pattern_false, a)); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{})); - auto b = make_shared(element::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto is_bea = [](std::shared_ptr node) -> bool { return op::is_binary_elementwise_arithmetic(node); @@ -363,7 +363,7 @@ TEST(pattern, matcher) ASSERT_TRUE(n.match(bea_label, ab)); ASSERT_EQ(n.get_pattern_map()[bea_label], ab); - auto d = make_shared(element::i32, shape); + auto d = make_shared(element::Type_t::i32, shape); ASSERT_FALSE(n.match(d, b)); ASSERT_FALSE(n.match(abs + b, b + b)); @@ -381,7 +381,7 @@ TEST(pattern, matcher) ASSERT_EQ(n.get_pattern_map()[pattern], abs); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{add_absb, abs, b})); - auto c = make_shared(element::i32, shape); + auto c = make_shared(element::Type_t::i32, shape); auto mul_add_absb = c * (add_absb); ASSERT_TRUE(n.match(c * (b + pattern), mul_add_absb)); ASSERT_EQ(n.get_pattern_map()[pattern], abs); @@ -399,7 +399,7 @@ TEST(pattern, matcher) auto iconst1_1 = construct_constant_node(1); ASSERT_TRUE(n.match(pattern * iconst1_0, a * iconst1_1)); // different iconst ASSERT_EQ(n.get_pattern_map()[pattern], a); - auto fconst1_0 = op::Constant::create(element::f32, shape, {1}); + auto fconst1_0 = op::Constant::create(element::Type_t::f32, shape, {1}); auto patternf = std::make_shared(fconst1_0); ASSERT_TRUE(n.match(patternf * fconst1_0, a * iconst1_1)); // different iconst @@ -462,22 +462,22 @@ TEST(pattern, matcher) { TestMatcher sm(Output{}, "TestMatcher", true); // exact shape and type - auto scalar_param = make_shared(element::i32, Shape{}); + auto scalar_param = make_shared(element::Type_t::i32, Shape{}); auto label_dynamic_shape = - make_shared(element::i32, PartialShape::dynamic()); - auto param = make_shared(element::f32, Shape{}); + make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto param = make_shared(element::Type_t::f32, Shape{}); ASSERT_TRUE(sm.match(label_dynamic_shape, scalar_param)); // wrong type - auto scalar_param_wrong_type = make_shared(element::f32, Shape{}); + auto scalar_param_wrong_type = make_shared(element::Type_t::f32, Shape{}); ASSERT_FALSE(sm.match(label, scalar_param_wrong_type)); // dynamic dimension - auto label_dynamic_dimension = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto vector_param = make_shared(element::i32, Shape{10}); + auto label_dynamic_dimension = make_shared( + element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto vector_param = make_shared(element::Type_t::i32, Shape{10}); ASSERT_TRUE(sm.match(label_dynamic_dimension, vector_param)); // dynamic type - auto label_dynamic_type = - make_shared(element::dynamic, PartialShape{Dimension::dynamic()}); + auto label_dynamic_type = make_shared( + element::Type_t::dynamic, PartialShape{Dimension::dynamic()}); ASSERT_TRUE(sm.match(label_dynamic_type, vector_param)); } } @@ -487,10 +487,10 @@ TEST(pattern, mean) // construct mean TestMatcher n; - auto input = std::make_shared(element::f32, Shape{2, 3}); - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto sum_input1 = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto sum_input1 = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto mean = std::make_shared(sum_input1, N); auto mean_graph = construct_mean_graph(); @@ -502,14 +502,14 @@ TEST(pattern, variance) { // construct variance TestMatcher n; - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto input = std::make_shared(element::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); auto input_sq = std::make_shared(input, input); - auto sum_input = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto sum_input = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto square_sumed_input = std::make_shared(sum_input, sum_input); - auto sum_squared_input = - std::make_shared(input_sq, op::Constant::create(element::i64, {1}, {0})); + auto sum_squared_input = std::make_shared( + input_sq, op::Constant::create(element::Type_t::i64, {1}, {0})); auto avg_input_sum_sq = std::make_shared(square_sumed_input, N); auto xmu = std::make_shared(sum_squared_input, avg_input_sum_sq); auto variance = std::make_shared(xmu, N); @@ -524,8 +524,8 @@ TEST(pattern, previous_matches) using ngraph::pattern::Matcher; Shape shape{}; Matcher::PatternMap previous_matches; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto pattern = std::make_shared(b); auto abs = make_shared(a); auto add = abs + b; @@ -547,14 +547,14 @@ TEST(pattern, test_sort) using ngraph::pattern::Matcher; Shape shape{}; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto abs1 = make_shared(a); auto abs2 = make_shared(b); auto add = abs1 + abs2; - auto pa = make_shared(element::i32, shape); - auto pb = make_shared(element::i32, shape); + auto pa = make_shared(element::Type_t::i32, shape); + auto pb = make_shared(element::Type_t::i32, shape); auto pabs1 = make_shared(pa); auto pabs1_label = std::make_shared(pabs1); auto pabs2 = make_shared(b); @@ -574,8 +574,8 @@ TEST(pattern, recurrent_pattern) using ngraph::pattern::RecurrentMatcher; Shape shape{}; ngraph::pattern::Matcher::PatternMap previous_matches; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto rpattern = std::make_shared(b); auto iconst0 = construct_constant_node(0); auto abs = make_shared(a); @@ -643,7 +643,7 @@ class TestRecurrentGraphRewrite : public ngraph::pass::RecurrentGraphRewrite auto iconst0 = construct_constant_node(0); auto iconst_label = std::make_shared(iconst0, nullptr, NodeVector{iconst0}); - auto rpattern = std::make_shared(element::i32, shape); + auto rpattern = std::make_shared(element::Type_t::i32, shape); auto padd = iconst_label + rpattern; auto callback = [iconst_label, rpattern](pattern::RecurrentMatcher& rm) { @@ -697,14 +697,14 @@ TEST(pattern, recurrent_graph_rewrite) pass_manager.register_pass(); { - auto a = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto add_a1 = a + iconst0; auto add_a2 = add_a1 + iconst0; auto add_a3 = add_a2 + iconst0; auto abs_add_a3 = std::make_shared(add_a3); - auto b = make_shared(element::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto add_b1 = b + iconst0; auto add_b2 = add_b1 + iconst0; auto abs_add_b2 = std::make_shared(add_b2); @@ -727,9 +727,9 @@ TEST(pattern, recurrent_graph_rewrite) TEST(pattern, label_on_skip) { Shape shape{2, 2}; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, Shape{}); - auto iconst = ngraph::make_zero(element::i32, Shape{}); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, Shape{}); + auto iconst = ngraph::make_zero(element::Type_t::i32, Shape{}); auto label = std::make_shared(iconst); auto const_label = std::make_shared(iconst, ngraph::is_zero, NodeVector{iconst}); @@ -738,8 +738,8 @@ TEST(pattern, label_on_skip) return as_type_ptr(n) != nullptr; }; - auto shape_const = op::Constant::create(element::u64, Shape{shape.size()}, shape); - auto axes_const = op::Constant::create(element::u8, Shape{}, {0}); + auto shape_const = op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape); + auto axes_const = op::Constant::create(element::Type_t::u8, Shape{}, {0}); auto bcst = std::make_shared( OutputVector{const_label, shape_const, axes_const}, bcst_pred); auto bcst_label = std::make_shared(bcst, nullptr, NodeVector{bcst}); @@ -762,7 +762,7 @@ TEST(pattern, label_on_skip) TEST(pattern, is_contained_match) { Shape shape{}; - auto a = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); auto absn = make_shared(a); TestMatcher n; @@ -781,11 +781,13 @@ TEST(pattern, is_contained_match) TEST(pattern, wrap_type) { - auto a = make_shared(element::f32, Shape{1, 3, 64, 64}); + auto a = make_shared(element::Type_t::f32, Shape{1, 3, 64, 64}); auto b = make_shared(a); auto c = make_shared(a); - auto mul1 = make_shared(a, op::Constant::create(element::f32, Shape{}, {1})); - auto mul2 = make_shared(op::Constant::create(element::f32, Shape{}, {1}), a); + auto mul1 = + make_shared(a, op::Constant::create(element::Type_t::f32, Shape{}, {1})); + auto mul2 = + make_shared(op::Constant::create(element::Type_t::f32, Shape{}, {1}), a); { auto m = pattern::wrap_type(); diff --git a/ngraph/test/provenance.cpp b/ngraph/test/provenance.cpp index e9a28d3cec6fbd..6ac66b39b68c8e 100644 --- a/ngraph/test/provenance.cpp +++ b/ngraph/test/provenance.cpp @@ -69,8 +69,8 @@ TEST(provenance, provenance) // of the graph. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -114,8 +114,8 @@ TEST(provenance, provenance) // of the graph. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -152,8 +152,8 @@ TEST(provenance, provenance) // * D is the replacement root, and its insertion kills A, B, and C. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -164,7 +164,7 @@ TEST(provenance, provenance) auto f = make_shared(c, ParameterVector{x, y}); - auto d = make_zero(element::i32, Shape{2, 3, 4}); + auto d = make_zero(element::Type_t::i32, Shape{2, 3, 4}); d->add_provenance_tag("tag_d"); replace_node(c, d); @@ -190,8 +190,8 @@ TEST(provenance, provenance) // * D is the replacement root, and its insertion kills A, B, and C. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -202,7 +202,7 @@ TEST(provenance, provenance) auto f = make_shared(c, ParameterVector{x, y}); - auto d = make_zero(element::i32, Shape{2, 3, 4}); + auto d = make_zero(element::Type_t::i32, Shape{2, 3, 4}); replace_node(c, d); EXPECT_EQ(d->get_provenance_tags(), (ProvSet{"tag_a", "tag_b", "tag_c"})); @@ -237,8 +237,8 @@ TEST(provenance, provenance) // * D is the replacement root replacing C and creating a new argument node E // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -288,8 +288,8 @@ TEST(provenance, provenance) // * D is the replacement root replacing C and creating a new argument node E // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -314,9 +314,9 @@ TEST(provenance, provenance) TEST(provenance, add_group_above) { - auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); - auto p2 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p2 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p2->add_provenance_tag("P2"); auto a1 = p1 + p2; auto m1 = (a1 * a1)->add_provenance_group_members_above({p1, p2}); @@ -329,8 +329,8 @@ TEST(provenance, add_group_above) TEST(provenance, add_tags_above) { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); auto b = make_shared(x, y); @@ -379,9 +379,10 @@ TEST(provenance, add_tags_above) TEST(provenance, builder) { - auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); - auto norm = builder::opset1::lp_norm(p1, op::Constant::create(element::i64, {}, {0}), 1, 0); + auto norm = + builder::opset1::lp_norm(p1, op::Constant::create(element::Type_t::i64, {}, {0}), 1, 0); norm->add_provenance_tag("norm"); for (auto node : topological_sort(NodeVector{norm})) { @@ -400,7 +401,7 @@ TEST(provenance, fused_copy_origin_tags) { test::ProvenanceEnabler provenance_enabler; - auto p1 = make_shared(element::f32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); auto g = make_shared(p1); g->add_provenance_tag("G"); @@ -433,7 +434,7 @@ TEST(provenance, fused_decomposition_tag) { test::ProvenanceEnabler provenance_enabler; - auto p1 = make_shared(element::f32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{2, 3, 4}); auto fused_op = make_shared(p1); auto result = make_shared(fused_op); auto f = make_shared(ResultVector{result}, ParameterVector{p1}); @@ -453,7 +454,7 @@ TEST(provenance, fused_decomposition_tag) TEST(provenance, empty_group) { - auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); auto abs = make_shared(p1); // Make sure group is empty @@ -476,8 +477,8 @@ TEST(provenance, opset1_upgrade_pass_graph) { test::ProvenanceEnabler provenance_enabler; - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); auto b = make_shared(x, y); @@ -519,8 +520,8 @@ TEST(provenance, opset0_downgrade_pass_graph) { test::ProvenanceEnabler provenance_enabler; - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); auto b = make_shared(x, y); diff --git a/ngraph/test/replace_node.cpp b/ngraph/test/replace_node.cpp index 8564a1e1c9920d..816f1f8356920a 100644 --- a/ngraph/test/replace_node.cpp +++ b/ngraph/test/replace_node.cpp @@ -63,24 +63,26 @@ using namespace ngraph; // TEST(replace_node, replace_nodes) { - auto x = make_shared(element::f32, Shape{2}); - auto y = make_shared(element::f32, Shape{2}); - auto z = make_shared(element::f32, Shape{2}); + auto x = make_shared(element::Type_t::f32, Shape{2}); + auto y = make_shared(element::Type_t::f32, Shape{2}); + auto z = make_shared(element::Type_t::f32, Shape{2}); auto add = x + y; - auto k = make_shared(element::f32, Shape{2}, vector{1, 2}); + auto k = make_shared(element::Type_t::f32, Shape{2}, vector{1, 2}); auto mul = add * k; auto sub = mul - z; auto f = make_shared(NodeVector{sub}, ParameterVector{x, y, z}); unordered_map, shared_ptr> parameter_replacement_map; - auto x_replacement = make_shared(element::f32, Shape{2}); + auto x_replacement = make_shared(element::Type_t::f32, Shape{2}); parameter_replacement_map[x] = x_replacement; unordered_map, shared_ptr> body_replacement_map; - auto y_replacement = make_shared(element::f32, Shape{2}, vector{3, 4}); - auto k_replacement = make_shared(element::f32, Shape{2}, vector{5, 6}); + auto y_replacement = + make_shared(element::Type_t::f32, Shape{2}, vector{3, 4}); + auto k_replacement = + make_shared(element::Type_t::f32, Shape{2}, vector{5, 6}); auto z_replacement = x_replacement + mul; body_replacement_map[y] = y_replacement; body_replacement_map[k] = k_replacement; diff --git a/ngraph/test/runtime/ie/ie_executable.cpp b/ngraph/test/runtime/ie/ie_executable.cpp index eba5a300e346e9..e1f456c9ad233e 100644 --- a/ngraph/test/runtime/ie/ie_executable.cpp +++ b/ngraph/test/runtime/ie/ie_executable.cpp @@ -56,6 +56,7 @@ namespace switch (elem_type) { case element::Type_t::f32: blob = MAKE_IE_TBLOB(float, FP32, shape, layout); break; + case element::Type_t::f64: blob = MAKE_IE_TBLOB(double, FP64, shape, layout); break; case element::Type_t::i16: blob = MAKE_IE_TBLOB(int16_t, I16, shape, layout); break; case element::Type_t::u8: blob = MAKE_IE_TBLOB(uint8_t, U8, shape, layout); break; case element::Type_t::i8: blob = MAKE_IE_TBLOB(int8_t, I8, shape, layout); break; diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index 3da8e8c21d9ee5..af9e7f51121fad 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -1141,6 +1141,12 @@ IE_CPU.nonmaxsuppression_suppress_by_IOU_and_scores IE_CPU.nonmaxsuppression_two_batches IE_CPU.nonmaxsuppression_two_classes +# Bug in CPU plugin for ROIPooling when pooled size is 1x1 and method is bilinear +IE_CPU.roi_pooling_1x1_bilinear + +# output mismatch +IE_CPU.gather_nd_batch_1d_from_3d_negative + #------------------------------------------------------------------------------- # # Inference Engine GPU plugin excludes diff --git a/ngraph/test/runtime/interpreter/int_executable.cpp b/ngraph/test/runtime/interpreter/int_executable.cpp index cb450ae453483f..88506e6117b880 100644 --- a/ngraph/test/runtime/interpreter/int_executable.cpp +++ b/ngraph/test/runtime/interpreter/int_executable.cpp @@ -454,7 +454,7 @@ void runtime::interpreter::INTExecutable::perform_nan_check( for (const shared_ptr& tensor : tensors) { const element::Type& type = tensor->get_element_type(); - if (type == element::f32) + if (type == element::Type_t::f32) { const float* data = tensor->get_data_ptr(); for (size_t i = 0; i < tensor->get_element_count(); i++) @@ -473,7 +473,7 @@ void runtime::interpreter::INTExecutable::perform_nan_check( } } } - else if (type == element::f64) + else if (type == element::Type_t::f64) { const double* data = tensor->get_data_ptr(); for (size_t i = 0; i < tensor->get_element_count(); i++) diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index 60732fe17c6e79..8d01ec56477727 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -85,6 +85,7 @@ #include "ngraph/runtime/reference/reverse.hpp" #include "ngraph/runtime/reference/reverse_sequence.hpp" #include "ngraph/runtime/reference/rnn_cell.hpp" +#include "ngraph/runtime/reference/roi_pooling.hpp" #include "ngraph/runtime/reference/round.hpp" #include "ngraph/runtime/reference/scatter_nd_update.hpp" #include "ngraph/runtime/reference/select.hpp" @@ -438,7 +439,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ { const op::v4::CTCLoss* ctc_loss = static_cast(&node); auto t_int = node.get_input_element_type(1); - if (t_int == element::i32) + if (t_int == element::Type_t::i32) { reference::CTCLoss( args[0]->get_data_ptr(), @@ -452,7 +453,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ ctc_loss->get_unique(), out[0]->get_data_ptr()); } - else if (t_int == element::i64) + else if (t_int == element::Type_t::i64) { reference::CTCLoss( args[0]->get_data_ptr(), @@ -472,7 +473,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ { const op::CumSum* cumsum = static_cast(&node); auto axis_et = node.get_input_element_type(1); - if (axis_et == element::i32) + if (axis_et == element::Type_t::i32) { reference::cumsum(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -481,7 +482,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ cumsum->is_exclusive(), cumsum->is_reverse()); } - else if (axis_et == element::i64) + else if (axis_et == element::Type_t::i64) { reference::cumsum(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -499,7 +500,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ auto indicesType = embed->input(1).get_element_type(); size_t indices_num = shape_size(embed->get_input_shape(1)); - if (indicesType == element::u64 || indicesType == element::i64) + if (indicesType == element::Type_t::u64 || indicesType == element::Type_t::i64) { reference::embeddingBagOffsetsSum( args[0]->get_data_ptr(), @@ -511,7 +512,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ indices_num, embed->get_shape()); } - else if (indicesType == element::u32 || indicesType == element::i32) + else if (indicesType == element::Type_t::u32 || indicesType == element::Type_t::i32) { reference::embeddingBagOffsetsSum( args[0]->get_data_ptr(), @@ -537,7 +538,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ static_cast(&node); auto indicesType = embed->input(1).get_element_type(); - if (indicesType == element::u64 || indicesType == element::i64) + if (indicesType == element::Type_t::u64 || indicesType == element::Type_t::i64) { reference::embeddingBagPackedSum( args[0]->get_data_ptr(), @@ -547,7 +548,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ embed->get_input_shape(1), embed->get_shape()); } - else if (indicesType == element::u32 || indicesType == element::i32) + else if (indicesType == element::Type_t::u32 || indicesType == element::Type_t::i32) { reference::embeddingBagPackedSum( args[0]->get_data_ptr(), @@ -572,7 +573,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ auto indicesType = embed->input(1).get_element_type(); size_t indices_num = shape_size(embed->get_input_shape(1)); - if (indicesType == element::u64 || indicesType == element::i64) + if (indicesType == element::Type_t::u64 || indicesType == element::Type_t::i64) { reference::embeddingSegmentsSum( args[0]->get_data_ptr(), @@ -585,7 +586,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ embed->get_input_shape(1), embed->get_shape()); } - else if (indicesType == element::u32 || indicesType == element::i32) + else if (indicesType == element::Type_t::u32 || indicesType == element::Type_t::i32) { reference::embeddingSegmentsSum( args[0]->get_data_ptr(), @@ -666,7 +667,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ case OP_TYPEID::GatherND_v5: { const op::v5::GatherND* gatherNDNode = static_cast(&node); - if (node.get_input_element_type(1) == element::i64) + if (node.get_input_element_type(1) == element::Type_t::i64) { reference::gather_nd(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -676,7 +677,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ node.get_output_shape(0), gatherNDNode->get_batch_dims()); } - else if (node.get_input_element_type(1) == element::i32) + else if (node.get_input_element_type(1) == element::Type_t::i32) { reference::gather_nd(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -759,7 +760,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ { auto lstm_seq = static_cast(&node); auto type = args[3]->get_element_type(); - if (type == element::i64 || type == element::u64) + if (type == element::Type_t::i64 || type == element::Type_t::u64) { runtime::reference::lstm_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -784,7 +785,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ lstm_seq->get_clip(), lstm_seq->get_direction()); } - else if (type == element::i32 || type == element::u32) + else if (type == element::Type_t::i32 || type == element::Type_t::u32) { runtime::reference::lstm_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -821,7 +822,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ { auto gru_seq = static_cast(&node); auto type = args[2]->get_element_type(); - if (type == element::i64 || type == element::u64) + if (type == element::Type_t::i64 || type == element::Type_t::u64) { runtime::reference::gru_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -843,7 +844,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ gru_seq->get_direction(), gru_seq->get_linear_before_reset()); } - else if (type == element::i32 || type == element::u32) + else if (type == element::Type_t::i32 || type == element::Type_t::u32) { runtime::reference::gru_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -890,7 +891,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ { auto rnn_seq = static_cast(&node); auto type = args[2]->get_element_type(); - if (type == element::i64 || type == element::u64) + if (type == element::Type_t::i64 || type == element::Type_t::u64) { runtime::reference::rnn_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -910,7 +911,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ rnn_seq->get_clip(), rnn_seq->get_direction()); } - else if (type == element::i32 || type == element::u32) + else if (type == element::Type_t::i32 || type == element::Type_t::u32) { runtime::reference::rnn_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -1105,7 +1106,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ const op::Quantize* quantize = static_cast(&node); auto type = quantize->get_element_type(); - if (type == element::u8) + if (type == element::Type_t::u8) { reference::quantize(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1116,7 +1117,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ quantize->get_axes(), quantize->get_round_mode()); } - else if (type == element::i8) + else if (type == element::Type_t::i8) { reference::quantize(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1127,7 +1128,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ quantize->get_axes(), quantize->get_round_mode()); } - else if (type == element::i32) + else if (type == element::Type_t::i32) { reference::quantize(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1171,7 +1172,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ { const op::ReverseSequence* reverse = static_cast(&node); - if (node.get_input_element_type(1) == element::i32) + if (node.get_input_element_type(1) == element::Type_t::i32) { reference::reverse_sequence(args[0]->get_data_ptr(), out[0]->get_data_ptr(), @@ -1180,7 +1181,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ reverse->get_sequence_axis(), args[1]->get_data_ptr()); } - else if (node.get_input_element_type(1) == element::i64) + else if (node.get_input_element_type(1) == element::Type_t::i64) { reference::reverse_sequence(args[0]->get_data_ptr(), out[0]->get_data_ptr(), @@ -1195,6 +1196,19 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ } break; } + case OP_TYPEID::ROIPooling_v0: + { + const op::ROIPooling* roi_pooling = static_cast(&node); + reference::roi_pooling(args[0]->get_data_ptr(), + args[1]->get_data_ptr(), + out[0]->get_data_ptr(), + node.get_input_shape(0), + node.get_input_shape(1), + node.get_output_shape(0), + roi_pooling->get_spatial_scale(), + roi_pooling->get_method()); + break; + } case OP_TYPEID::Select: { size_t element_count = shape_size(node.get_output_shape(0)); @@ -1364,7 +1378,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ const op::ScatterNDUpdate* scatterNDUpd = static_cast(&node); auto idxType = scatterNDUpd->get_input_element_type(1); - if (idxType == element::i32) + if (idxType == element::Type_t::i32) { reference::scatterNdUpdate(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1374,7 +1388,7 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ node.get_input_shape(1), node.get_input_shape(2)); } - else if (idxType == element::i64) + else if (idxType == element::Type_t::i64) { reference::scatterNdUpdate(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1443,8 +1457,8 @@ class INTERPRETER_BACKEND_API ngraph::runtime::interpreter::INTExecutable : publ &valid_outputs, info.sort_result_descending); - auto selected_scores_type = - (args.size() < 4) ? element::f32 : args[3]->get_element_type(); + auto selected_scores_type = (args.size() < 4) ? element::Type(element::Type_t::f32) + : args[3]->get_element_type(); reference::nms5_postprocessing(out, info.output_type, diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index 8cebe8d3ade780..985070bc251a46 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -25,6 +25,7 @@ NGRAPH_OP(LSTMCell, op::v0) NGRAPH_OP(RegionYolo, op::v0) NGRAPH_OP(ReorgYolo, op::v0) NGRAPH_OP(RNNCell, op::v0) +NGRAPH_OP(ROIPooling, op::v0) #undef ID_SUFFIX #define ID_SUFFIX(NAME) NAME##_v1 diff --git a/ngraph/test/runtime/pass/dyn_elimination.cpp b/ngraph/test/runtime/pass/dyn_elimination.cpp index 0f82643a877519..8fcbf143481cfd 100644 --- a/ngraph/test/runtime/pass/dyn_elimination.cpp +++ b/ngraph/test/runtime/pass/dyn_elimination.cpp @@ -56,12 +56,12 @@ std::shared_ptr make_range_replacement(const element::Type& et, void pass::DynElimination::construct_range() { - auto start_arg_label = - make_shared(element::f32, Shape{}, pattern::has_class()); - auto stop_arg_label = - make_shared(element::f32, Shape{}, pattern::has_class()); - auto step_arg_label = - make_shared(element::f32, Shape{}, pattern::has_class()); + auto start_arg_label = make_shared( + element::Type_t::f32, Shape{}, pattern::has_class()); + auto stop_arg_label = make_shared( + element::Type_t::f32, Shape{}, pattern::has_class()); + auto step_arg_label = make_shared( + element::Type_t::f32, Shape{}, pattern::has_class()); auto range_pat = make_shared(start_arg_label, stop_arg_label, step_arg_label); diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp index 82e8bf3c7d54ac..bd7ca068162df6 100644 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset0_downgrade.cpp @@ -85,7 +85,7 @@ namespace opset0_downgrade reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1); } auto shape_pattern = op::Constant::create( - element::u64, {reshaped_output_shape.size()}, reshaped_output_shape); + element::Type_t::u64, {reshaped_output_shape.size()}, reshaped_output_shape); auto reshaped_product = make_shared(replacement_node->output(0), shape_pattern, false); return reshaped_product; diff --git a/ngraph/test/runtime/pass/opset1_downgrade.cpp b/ngraph/test/runtime/pass/opset1_downgrade.cpp index b4fd099c8e2323..23fe9aa970e0d5 100644 --- a/ngraph/test/runtime/pass/opset1_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_downgrade.cpp @@ -39,7 +39,7 @@ namespace opset1_downgrade { const auto const_filled_with_ones = make_shared( op::Constant::create(data->get_element_type(), {}, {1}), target_shape); - if (const_filled_with_ones->get_element_type() == element::boolean) + if (const_filled_with_ones->get_element_type() == element::Type_t::boolean) { replacement_node = make_shared(data, const_filled_with_ones); } diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp index 333ab280f7152a..4258eaea3ac621 100644 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_upgrade.cpp @@ -106,7 +106,7 @@ namespace opset1_upgrade node->input_value(1), // data node->input_value(0), // filters op::Constant::create( - element::i64, + element::Type_t::i64, Shape{data_batch_shape.size() - 2}, vector(data_batch_shape.begin() + 2, data_batch_shape.end())), strides, @@ -230,7 +230,8 @@ namespace opset1_upgrade auto replacement_node = make_shared( node->input_value(2), reshaped_filters, - op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape), + op::Constant::create( + element::Type_t::i64, Shape{data_batch_shape.size()}, data_batch_shape), strides, pads_begin, pads_end, diff --git a/ngraph/test/specialize_function.cpp b/ngraph/test/specialize_function.cpp index 6a8e91cfb6585e..fe09800a1b5b2d 100644 --- a/ngraph/test/specialize_function.cpp +++ b/ngraph/test/specialize_function.cpp @@ -27,10 +27,10 @@ using namespace ngraph; // shapes. TEST(specialize_function, et_shape_static) { - auto p0 = std::make_shared(element::f32, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -38,21 +38,21 @@ TEST(specialize_function, et_shape_static) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of dynamic element types. TEST(specialize_function, et_dynamic_shape_static) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -60,21 +60,21 @@ TEST(specialize_function, et_dynamic_shape_static) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of rank-dynamic shapes. TEST(specialize_function, et_static_shape_rank_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -82,21 +82,21 @@ TEST(specialize_function, et_static_shape_rank_dynamic) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of rank-static dynamic shapes. TEST(specialize_function, et_static_shape_rank_static_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -104,21 +104,21 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of values to a shape-dynamic parameters. TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -128,12 +128,12 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) std::vector param_vals{nullptr, p1_subst_vals.data()}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); auto plus_node = as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); @@ -143,7 +143,7 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) auto const_node = as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(const_node); - ASSERT_EQ(const_node->get_output_element_type(0), element::i32); + ASSERT_EQ(const_node->get_output_element_type(0), element::Type_t::i32); ASSERT_EQ(const_node->get_output_shape(0), (Shape{1, 2, 3})); ASSERT_EQ(const_node->get_vector(), p1_subst_vals); } @@ -153,10 +153,10 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) // (The input shapes we provide at specialization time are inconsistent.) TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -166,7 +166,7 @@ TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3, 4}}, param_vals); }, @@ -178,10 +178,10 @@ TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) // (The input element types we provide at specialization time are inconsistent.) TEST(specialize_function, et_dynamic_shape_static_validation_fails) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -191,7 +191,7 @@ TEST(specialize_function, et_dynamic_shape_static_validation_fails) ASSERT_THROW( { specialize_function(f, - {element::u32, element::i32}, + {element::Type_t::u32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); }, @@ -206,10 +206,10 @@ TEST(specialize_function, et_dynamic_shape_static_validation_fails) // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -219,7 +219,7 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3, 4}}, param_vals); }, @@ -234,11 +234,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = - std::make_shared(element::i32, PartialShape{1, Dimension::dynamic(), 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, + PartialShape{1, Dimension::dynamic(), 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -248,7 +248,7 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 9, 4}}, param_vals); }, @@ -258,10 +258,10 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) // Test for failure when we supply the wrong number of replacement element types. TEST(specialize_function, et_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -271,7 +271,7 @@ TEST(specialize_function, et_count_wrong) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32, element::u32}, + {element::Type_t::f32, element::Type_t::i32, element::Type_t::u32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); }, @@ -281,10 +281,10 @@ TEST(specialize_function, et_count_wrong) // Test for failure when we supply the wrong number of replacement shapes. TEST(specialize_function, shape_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -295,7 +295,7 @@ TEST(specialize_function, shape_count_wrong) { specialize_function( f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}, PartialShape{4, 5, 6}}, param_vals); }, @@ -305,10 +305,10 @@ TEST(specialize_function, shape_count_wrong) // Test for failure when we supply the wrong number of replacement parameter values. TEST(specialize_function, value_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -318,7 +318,7 @@ TEST(specialize_function, value_count_wrong) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); }, diff --git a/ngraph/test/tensor.cpp b/ngraph/test/tensor.cpp index 650e5c5ffad61a..0eab2f21e1dfb3 100644 --- a/ngraph/test/tensor.cpp +++ b/ngraph/test/tensor.cpp @@ -39,7 +39,7 @@ TEST(tensor, size) pass_manager.register_pass(); { - auto arg0 = make_shared(element::f32, Shape{2, 3}); + auto arg0 = make_shared(element::Type_t::f32, Shape{2, 3}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); @@ -51,7 +51,7 @@ TEST(tensor, size) } { - auto arg0 = make_shared(element::f32, Shape{}); + auto arg0 = make_shared(element::Type_t::f32, Shape{}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); @@ -63,7 +63,7 @@ TEST(tensor, size) } { - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); @@ -80,7 +80,7 @@ TEST(tensor, output_flag) pass::Manager pass_manager; pass_manager.register_pass(); - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); diff --git a/ngraph/test/type_prop/assign.cpp b/ngraph/test/type_prop/assign.cpp index 3bffbd8a931a15..5e29020f72d3d5 100644 --- a/ngraph/test/type_prop/assign.cpp +++ b/ngraph/test/type_prop/assign.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, assign_variable_not_found) { - auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); try { auto space_to_depth = make_shared(A, "variable_id"); @@ -43,10 +43,10 @@ TEST(type_prop, assign_variable_not_found) TEST(type_prop, assign_deduce) { - auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); auto read_value = make_shared(input, "variable_id"); auto assign = make_shared(read_value, "variable_id"); - ASSERT_EQ(assign->get_element_type(), element::f32); + ASSERT_EQ(assign->get_element_type(), element::Type_t::f32); ASSERT_EQ(assign->get_shape(), (Shape{1, 2, 64, 64})); } diff --git a/ngraph/test/type_prop/avg_pool.cpp b/ngraph/test/type_prop/avg_pool.cpp index a08c58a2139d91..1837f39c0f285d 100644 --- a/ngraph/test/type_prop/avg_pool.cpp +++ b/ngraph/test/type_prop/avg_pool.cpp @@ -32,7 +32,7 @@ TEST(type_prop, avg_pool_auto_padding) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -52,7 +52,7 @@ TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_lower) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -73,7 +73,7 @@ TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -94,7 +94,7 @@ TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -106,12 +106,12 @@ TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) TEST(type_prop, avg_pool_1d_deduce) { - const auto param = make_shared(element::f32, Shape{64, 3, 100}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100}); const Shape kernel{10}; const auto avg_pool = make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 91})); EXPECT_EQ(avg_pool->get_strides(), Strides{1}); @@ -122,13 +122,13 @@ TEST(type_prop, avg_pool_1d_deduce) TEST(type_prop, avg_pool_1d_deduce_strided) { - const auto param = make_shared(element::f32, Shape{64, 3, 100}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100}); const Shape kernel{10}; const auto move_strides = Strides{2}; const auto avg_pool = make_shared( param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 46})); EXPECT_EQ(avg_pool->get_strides(), Strides{2}); @@ -139,13 +139,13 @@ TEST(type_prop, avg_pool_1d_deduce_strided) TEST(type_prop, avg_pool_1d_deduce_strided_small_uneven) { - const auto param = make_shared(element::f32, Shape{64, 3, 5}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 5}); const Shape kernel{2}; const auto move_strides = Strides{2}; const auto avg_pool = make_shared( param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 2})); EXPECT_EQ(avg_pool->get_strides(), Strides{2}); @@ -156,13 +156,13 @@ TEST(type_prop, avg_pool_1d_deduce_strided_small_uneven) TEST(type_prop, avg_pool_1d_deduce_strided_small_even) { - const auto param = make_shared(element::f32, Shape{64, 3, 6}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 6}); const Shape kernel{2}; const auto move_strides = Strides{2}; const auto avg_pool = make_shared( param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 3})); EXPECT_EQ(avg_pool->get_strides(), Strides{2}); @@ -173,12 +173,12 @@ TEST(type_prop, avg_pool_1d_deduce_strided_small_even) TEST(type_prop, avg_pool_2d_deduce) { - const auto param = make_shared(element::f32, Shape{64, 3, 100, 150}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); const Shape kernel{10, 20}; const auto avg_pool = make_shared( param, Strides{1, 1}, Shape{0, 0}, Shape{0, 0}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 91, 131})); EXPECT_EQ(avg_pool->get_strides(), (Strides{1, 1})); @@ -189,13 +189,13 @@ TEST(type_prop, avg_pool_2d_deduce) TEST(type_prop, avg_pool_2d_deduce_strided) { - const auto param = make_shared(element::f32, Shape{64, 3, 100, 150}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); const Shape kernel{10, 20}; const auto move_strides = Strides{2, 3}; const auto avg_pool = make_shared( param, move_strides, Shape{0, 0}, Shape{0, 0}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 46, 44})); EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3})); @@ -206,13 +206,13 @@ TEST(type_prop, avg_pool_2d_deduce_strided) TEST(type_prop, avg_pool_3d_deduce_strided_small) { - const auto param = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); const Shape kernel{2, 3, 2}; const auto move_strides = Strides{2, 3, 4}; const auto avg_pool = make_shared( param, move_strides, Shape{0, 0, 0}, Shape{0, 0, 0}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 3, 2, 3})); EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3, 4})); @@ -223,7 +223,7 @@ TEST(type_prop, avg_pool_3d_deduce_strided_small) TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) { - const auto param = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); const Shape kernel{2, 3, 2}; const auto move_strides = Strides{2, 3, 4}; const Shape pads_begin{5, 6, 4}; @@ -231,7 +231,7 @@ TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) const auto avg_pool = make_shared( param, move_strides, pads_begin, pads_end, kernel, false, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 9, 6, 5})); EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3, 4})); @@ -242,7 +242,7 @@ TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) TEST(type_prop, avg_pool_invalid_0d_input) { - const auto param = make_shared(element::f32, Shape{}); + const auto param = make_shared(element::Type_t::f32, Shape{}); const Shape kernel{}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -251,7 +251,7 @@ TEST(type_prop, avg_pool_invalid_0d_input) TEST(type_prop, avg_pool_invalid_1d_input) { - const auto param = make_shared(element::f32, Shape{2}); + const auto param = make_shared(element::Type_t::f32, Shape{2}); const Shape kernel{}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -260,7 +260,7 @@ TEST(type_prop, avg_pool_invalid_1d_input) TEST(type_prop, avg_pool_invalid_2d_input) { - const auto param = make_shared(element::f32, Shape{2, 6}); + const auto param = make_shared(element::Type_t::f32, Shape{2, 6}); const Shape kernel{}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -269,7 +269,7 @@ TEST(type_prop, avg_pool_invalid_2d_input) TEST(type_prop, avg_pool_invalid_0_batch_size) { - const auto param = make_shared(element::f32, Shape{0, 6, 1}); + const auto param = make_shared(element::Type_t::f32, Shape{0, 6, 1}); const Shape kernel{1}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -278,7 +278,7 @@ TEST(type_prop, avg_pool_invalid_0_batch_size) TEST(type_prop, avg_pool_invalid_0_channels) { - const auto param = make_shared(element::f32, Shape{6, 0, 1}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 0, 1}); const Shape kernel{1}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -287,7 +287,7 @@ TEST(type_prop, avg_pool_invalid_0_channels) TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3, 3}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -296,7 +296,7 @@ TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many) TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -305,7 +305,7 @@ TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few) TEST(type_prop, avg_pool_invalid_movement_stride_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{2, 3, 8}; EXPECT_THROW(make_shared( @@ -315,7 +315,7 @@ TEST(type_prop, avg_pool_invalid_movement_stride_rank) TEST(type_prop, avg_pool_invalid_padding_below_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{2, 3}; const Shape pads_begin{1, 2, 3}; @@ -328,7 +328,7 @@ TEST(type_prop, avg_pool_invalid_padding_below_rank) TEST(type_prop, avg_pool_invalid_padding_above_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{2, 3}; const Shape pads_begin{1, 2}; @@ -341,7 +341,7 @@ TEST(type_prop, avg_pool_invalid_padding_above_rank) TEST(type_prop, avg_pool_invalid_input_item_size_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 0, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 0, 10}); const Shape kernel{3, 3}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -350,7 +350,7 @@ TEST(type_prop, avg_pool_invalid_input_item_size_0) TEST(type_prop, avg_pool_invalid_window_size_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 0}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -359,7 +359,7 @@ TEST(type_prop, avg_pool_invalid_window_size_0) TEST(type_prop, avg_pool_invalid_dilated_too_large) { - const auto param = make_shared(element::f32, Shape{6, 2, 8, 8}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 8, 8}); const Shape kernel{9, 9}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -368,7 +368,7 @@ TEST(type_prop, avg_pool_invalid_dilated_too_large) TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding) { - const auto param = make_shared(element::f32, Shape{6, 2, 8, 8}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 8, 8}); const Shape kernel{9, 9}; const Strides window_strides{1, 1}; const Shape pads_begin{0, 0}; @@ -376,13 +376,13 @@ TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding) const auto avg_pool = make_shared( param, window_strides, pads_begin, pads_end, kernel, true, op::RoundingType::FLOOR); - ASSERT_EQ(avg_pool->get_output_element_type(0), element::f32); + ASSERT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); ASSERT_EQ(avg_pool->get_output_shape(0), (Shape{6, 2, 1, 1})); } TEST(type_prop, avg_pool_invalid_movement_stride_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{0, 1}; EXPECT_THROW(make_shared( @@ -398,7 +398,7 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_ok) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -407,7 +407,7 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_ok) false, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6))); } @@ -419,7 +419,7 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_attrib_rank_mismatch) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, @@ -439,7 +439,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -448,7 +448,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok) false, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6))); } @@ -460,7 +460,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -469,7 +469,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) false, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme( PartialShape{5, Dimension::dynamic(), 7, Dimension::dynamic(), 1, 3})); } @@ -482,7 +482,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_attrib_rank_mismatch) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, @@ -502,7 +502,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_not_too_big) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, @@ -522,7 +522,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{1, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -531,7 +531,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big) true, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme( PartialShape{5, Dimension::dynamic(), 1, Dimension::dynamic(), 1, 3})); } @@ -544,7 +544,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding) const Shape pads_begin{0, 0, 0, 4}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, diff --git a/ngraph/test/type_prop/batch_norm.cpp b/ngraph/test/type_prop/batch_norm.cpp index 0ab600a8a51ff6..61eb0b2349f1c5 100644 --- a/ngraph/test/type_prop/batch_norm.cpp +++ b/ngraph/test/type_prop/batch_norm.cpp @@ -29,11 +29,11 @@ TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -58,11 +58,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_ok) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -88,11 +88,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_zero_chan PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -124,11 +124,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -152,11 +152,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -191,11 +191,11 @@ TEST(type_prop, PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -229,11 +229,11 @@ TEST(type_prop, PartialShape mean_shape{4}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -266,11 +266,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_some_stat PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -296,11 +296,11 @@ TEST(type_prop, PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -334,11 +334,11 @@ TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic_v5) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -363,11 +363,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_ok_v5) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -393,11 +393,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_zero_chan PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -429,11 +429,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -458,11 +458,11 @@ TEST(type_prop, PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -497,11 +497,11 @@ TEST(type_prop, PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -535,11 +535,11 @@ TEST(type_prop, PartialShape mean_shape{4}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -572,11 +572,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_some_stat PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -603,11 +603,11 @@ TEST( PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); diff --git a/ngraph/test/type_prop/batch_to_space.cpp b/ngraph/test/type_prop/batch_to_space.cpp index dfc9a1eef2a7bc..ab6f8fc7c0be1a 100644 --- a/ngraph/test/type_prop/batch_to_space.cpp +++ b/ngraph/test/type_prop/batch_to_space.cpp @@ -23,70 +23,75 @@ using namespace ngraph; TEST(type_prop, batch_to_space_output_shape_2D) { - auto data = make_shared(element::f32, Shape{10, 26}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::Type_t::f32, Shape{10, 26}); + auto block_shape = + make_shared(element::Type_t::i64, Shape{2}, vector{1, 5}); + auto pads_begin = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 2}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{10 / 5, 26 * 5 - 2})); } TEST(type_prop, batch_to_space_output_shape_4D) { - auto data = make_shared(element::f32, Shape{100, 7, 13, 3}); + auto data = make_shared(element::Type_t::f32, Shape{100, 7, 13, 3}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 10, 5, 1}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 0, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); } TEST(type_prop, batch_to_space_output_shape_5D) { - auto data = make_shared(element::f32, Shape{960, 6, 13, 128, 16}); + auto data = make_shared(element::Type_t::f32, Shape{960, 6, 13, 128, 16}); auto block_shape = - make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + make_shared(element::Type_t::i32, Shape{5}, vector{1, 6, 5, 1, 16}); auto pads_begin = - make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 0, 0, 0}); auto pads_end = - make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 1, 0, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); } TEST(type_prop, batch_to_space_and_space_to_batch) { - auto data = make_shared(element::f32, Shape{4800, 9, 11, 2}); + auto data = make_shared(element::Type_t::f32, Shape{4800, 9, 11, 2}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 12, 100, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 5, 38, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, 11 * 100 - 38 - 38, 2 * 2 - 1})); auto space_to_batch = make_shared(batch_to_space, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{4800, 9, 11, 2})); } diff --git a/ngraph/test/type_prop/binary_convolution.cpp b/ngraph/test/type_prop/binary_convolution.cpp index 2c62adff237e1d..498a0f88a20fb2 100644 --- a/ngraph/test/type_prop/binary_convolution.cpp +++ b/ngraph/test/type_prop/binary_convolution.cpp @@ -33,8 +33,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same) const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); @@ -56,8 +56,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lo const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); @@ -79,8 +79,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_up const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); @@ -102,8 +102,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same_spatial_dims_dynamic) const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); diff --git a/ngraph/test/type_prop/binary_elementwise.cpp b/ngraph/test/type_prop/binary_elementwise.cpp index 4a77a3bbbafef1..a3eba00c806476 100644 --- a/ngraph/test/type_prop/binary_elementwise.cpp +++ b/ngraph/test/type_prop/binary_elementwise.cpp @@ -30,10 +30,10 @@ void test_binary(std::string /* node_type */, shared_ptr(f)(const shared_ptr& x, const shared_ptr& y)) { // Check for bad arguments - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); - auto tv0_4_2_param = make_shared(element::f32, Shape{4, 2}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::i32, Shape{2, 4}); + auto tv0_4_2_param = make_shared(element::Type_t::f32, Shape{4, 2}); auto test_binary_bad_arguments_view_shapes = [&](const shared_ptr& x, const shared_ptr& y) { @@ -121,11 +121,11 @@ void test_binary_logical(std::string /* node_type */, shared_ptr(f)(const shared_ptr& x, const shared_ptr& y)) { // Check for bad arguments - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); - auto tv0_2_4_param_3 = make_shared(element::i32, Shape{2, 4}); - auto tv0_4_2_param = make_shared(element::boolean, Shape{4, 2}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::i32, Shape{2, 4}); + auto tv0_2_4_param_3 = make_shared(element::Type_t::i32, Shape{2, 4}); + auto tv0_4_2_param = make_shared(element::Type_t::boolean, Shape{4, 2}); auto test_binary_bad_arguments_view_shapes = [&](const shared_ptr& x, const shared_ptr& y) { @@ -229,36 +229,37 @@ void test_binary_eltwise_numpy(const element::Type& et, const op::AutoBroadcastS TEST(type_prop, eltwise_auto_bcast) { - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::boolean, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::boolean, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::boolean, + op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::boolean, op::AutoBroadcastType::NUMPY); } TEST(type_prop, comparison_good) { - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); auto eq = make_shared(tv0_2_4_param_0, tv0_2_4_param_1); - EXPECT_EQ(eq->get_element_type(), element::boolean); + EXPECT_EQ(eq->get_element_type(), element::Type_t::boolean); EXPECT_EQ(eq->get_shape(), (Shape{2, 4})); } TEST(type_prop, binary_arithmetic_bad_argument_element_types) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::boolean, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1); @@ -278,8 +279,8 @@ TEST(type_prop, binary_arithmetic_bad_argument_element_types) TEST(type_prop, binary_elementwise_arithmetic_both_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto b = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_dynamic()); @@ -287,8 +288,8 @@ TEST(type_prop, binary_elementwise_arithmetic_both_dynamic) TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_static) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, Shape{1, 2, 3}); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto b = make_shared(element::Type_t::f32, Shape{1, 2, 3}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -297,8 +298,8 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_static) TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_dynamic) { - auto a = make_shared(element::f32, Shape{1, 2, 3}); - auto b = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto b = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -307,8 +308,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_dynamic) TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_dynamic) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape::dynamic()); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 3}); + auto b = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); @@ -319,8 +321,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_ran TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_rank_static_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 3}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); @@ -332,8 +335,10 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_rank_stati TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_static) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -345,8 +350,9 @@ TEST( binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_rank_static_dynamic) { auto a = make_shared( - element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic()}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); @@ -357,8 +363,9 @@ TEST( TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_static_dynamic) { - auto a = make_shared(element::f32, PartialShape{1, 2, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -367,8 +374,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_static_dyna TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_static) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, 3}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -377,8 +385,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_sta TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 3, 3}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{1, 3, 3}); try { @@ -397,8 +406,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_inconsist TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{1, 3, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, PartialShape{1, 3, 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -417,8 +427,10 @@ TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_inconsis TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{Dimension::dynamic(), 3, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 3, 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -437,8 +449,9 @@ TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_inconsist TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, 3, 4}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4}); try { @@ -457,8 +470,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_different TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, 2, 3, 4}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -477,8 +491,10 @@ TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_differen TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3, 4}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, + PartialShape{1, Dimension::dynamic(), 3, 4}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -497,8 +513,8 @@ TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_different TEST(type_prop, binary_elementwise_arithmetic_both_et_dynamic) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto b = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); + auto b = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_element_type(0).is_dynamic()); @@ -506,20 +522,20 @@ TEST(type_prop, binary_elementwise_arithmetic_both_et_dynamic) TEST(type_prop, binary_elementwise_arithmetic_left_et_dynamic) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto b = make_shared(element::u32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); + auto b = make_shared(element::Type_t::u32, Shape{1, 2, 3, 4}); auto add = make_shared(a, b); - ASSERT_EQ(add->get_output_element_type(0), element::u32); + ASSERT_EQ(add->get_output_element_type(0), element::Type_t::u32); } TEST(type_prop, binary_elementwise_arithmetic_right_et_dynamic) { - auto a = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto b = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::i64, Shape{1, 2, 3, 4}); + auto b = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto add = make_shared(a, b); - ASSERT_EQ(add->get_output_element_type(0), element::i64); + ASSERT_EQ(add->get_output_element_type(0), element::Type_t::i64); } TEST(type_prop, logic_arith_compare_partial_et) @@ -552,15 +568,19 @@ TEST(type_prop, logic_arith_compare_partial_et) // dyn int -> int // dyn boo -> ! // dyn dyn -> dyn - ASSERT_EQ(test_arith(element::i32, element::i32)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::i32, element::boolean); }); - ASSERT_EQ(test_arith(element::i32, element::dynamic)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::i32); }); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::boolean); }); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::dynamic); }); - ASSERT_EQ(test_arith(element::dynamic, element::i32)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::dynamic, element::boolean); }); - ASSERT_EQ(test_arith(element::dynamic, element::dynamic)->get_element_type(), element::dynamic); + ASSERT_EQ(test_arith(element::Type_t::i32, element::Type_t::i32)->get_element_type(), + element::Type_t::i32); + ASSERT_ANY_THROW({ test_arith(element::Type_t::i32, element::Type_t::boolean); }); + ASSERT_EQ(test_arith(element::Type_t::i32, element::Type_t::dynamic)->get_element_type(), + element::Type_t::i32); + ASSERT_ANY_THROW({ test_arith(element::Type_t::boolean, element::Type_t::i32); }); + ASSERT_ANY_THROW({ test_arith(element::Type_t::boolean, element::Type_t::boolean); }); + ASSERT_ANY_THROW({ test_arith(element::Type_t::boolean, element::Type_t::dynamic); }); + ASSERT_EQ(test_arith(element::Type_t::dynamic, element::Type_t::i32)->get_element_type(), + element::Type_t::i32); + ASSERT_ANY_THROW({ test_arith(element::Type_t::dynamic, element::Type_t::boolean); }); + ASSERT_EQ(test_arith(element::Type_t::dynamic, element::Type_t::dynamic)->get_element_type(), + element::Type_t::dynamic); // Comparison ops: // @@ -573,19 +593,22 @@ TEST(type_prop, logic_arith_compare_partial_et) // dyn int -> boo // dyn boo -> boo // dyn dyn -> boo - ASSERT_EQ(test_compare(element::i32, element::i32)->get_element_type(), element::boolean); - ASSERT_ANY_THROW({ test_compare(element::i32, element::boolean); }); - ASSERT_EQ(test_compare(element::i32, element::dynamic)->get_element_type(), element::boolean); - ASSERT_ANY_THROW({ test_compare(element::boolean, element::i32); }); - ASSERT_EQ(test_compare(element::boolean, element::boolean)->get_element_type(), - element::boolean); - ASSERT_EQ(test_compare(element::boolean, element::dynamic)->get_element_type(), - element::boolean); - ASSERT_EQ(test_compare(element::dynamic, element::i32)->get_element_type(), element::boolean); - ASSERT_EQ(test_compare(element::dynamic, element::boolean)->get_element_type(), - element::boolean); - ASSERT_EQ(test_compare(element::dynamic, element::dynamic)->get_element_type(), - element::boolean); + ASSERT_EQ(test_compare(element::Type_t::i32, element::Type_t::i32)->get_element_type(), + element::Type_t::boolean); + ASSERT_ANY_THROW({ test_compare(element::Type_t::i32, element::Type_t::boolean); }); + ASSERT_EQ(test_compare(element::Type_t::i32, element::Type_t::dynamic)->get_element_type(), + element::Type_t::boolean); + ASSERT_ANY_THROW({ test_compare(element::Type_t::boolean, element::Type_t::i32); }); + ASSERT_EQ(test_compare(element::Type_t::boolean, element::Type_t::boolean)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::boolean, element::Type_t::dynamic)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::dynamic, element::Type_t::i32)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::dynamic, element::Type_t::boolean)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::dynamic, element::Type_t::dynamic)->get_element_type(), + element::Type_t::boolean); // Logical negation op: // @@ -598,7 +621,9 @@ TEST(type_prop, logic_arith_compare_partial_et) // int -> ! // boo -> boo // dyn -> boo - ASSERT_EQ(test_logical_not(element::i32)->get_element_type(), element::i32); - ASSERT_EQ(test_logical_not(element::boolean)->get_element_type(), element::boolean); - ASSERT_EQ(test_logical_not(element::dynamic)->get_element_type(), element::dynamic); + ASSERT_EQ(test_logical_not(element::Type_t::i32)->get_element_type(), element::Type_t::i32); + ASSERT_EQ(test_logical_not(element::Type_t::boolean)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_logical_not(element::Type_t::dynamic)->get_element_type(), + element::Type_t::dynamic); } diff --git a/ngraph/test/type_prop/broadcast.cpp b/ngraph/test/type_prop/broadcast.cpp index dc4d89f13a65bc..12ed855445e1c0 100644 --- a/ngraph/test/type_prop/broadcast.cpp +++ b/ngraph/test/type_prop/broadcast.cpp @@ -32,39 +32,43 @@ TYPED_TEST_CASE_P(BroadcastTests); TYPED_TEST_P(BroadcastTests, broadcast_numpy) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bc = make_shared(param, target_shape); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); } TYPED_TEST_P(BroadcastTests, broadcast_axes_mapping) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 2}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); auto bc = make_shared(param, target_shape, axes_mapping); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 1})); } TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_constants) { - auto param = make_shared(element::f32, Shape{16}); - auto target_shape_constant_1 = op::Constant::create(element::i64, Shape{1}, {1}); - auto target_shape_constant_2 = op::Constant::create(element::i64, Shape{1}, {16}); - auto target_shape_constant_3 = op::Constant::create(element::i64, Shape{1}, {50}); - auto target_shape_constant_4 = op::Constant::create(element::i64, Shape{1}, {50}); + auto param = make_shared(element::Type_t::f32, Shape{16}); + auto target_shape_constant_1 = + op::Constant::create(element::Type_t::i64, Shape{1}, {1}); + auto target_shape_constant_2 = + op::Constant::create(element::Type_t::i64, Shape{1}, {16}); + auto target_shape_constant_3 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); + auto target_shape_constant_4 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); std::int64_t axis = 0; std::vector> args{target_shape_constant_1, target_shape_constant_2, target_shape_constant_3, target_shape_constant_4}; auto target_shape = make_shared(args, axis); - auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{1}, {1}); auto bc = make_shared(param, target_shape, axes_mapping, "NONE"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().same_scheme(Rank{4})); @@ -74,18 +78,21 @@ TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_constants) TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_node) { - auto param = make_shared(element::f32, Shape{16}); - auto target_shape_constant_1 = make_shared(element::i64, Shape{1}); - auto target_shape_constant_2 = op::Constant::create(element::i64, Shape{1}, {16}); - auto target_shape_constant_3 = op::Constant::create(element::i64, Shape{1}, {50}); - auto target_shape_constant_4 = op::Constant::create(element::i64, Shape{1}, {50}); + auto param = make_shared(element::Type_t::f32, Shape{16}); + auto target_shape_constant_1 = make_shared(element::Type_t::i64, Shape{1}); + auto target_shape_constant_2 = + op::Constant::create(element::Type_t::i64, Shape{1}, {16}); + auto target_shape_constant_3 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); + auto target_shape_constant_4 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); std::int64_t axis = 0; std::vector> args{target_shape_constant_1, target_shape_constant_2, target_shape_constant_3, target_shape_constant_4}; auto target_shape = make_shared(args, axis); - auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{1}, {1}); auto bc = make_shared(param, target_shape, axes_mapping, "NONE"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().same_scheme(Rank{4})); @@ -96,9 +103,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_node) TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{3}, {1, 2, 3}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{3}, {1, 2, 3}); try { @@ -119,9 +126,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) TYPED_TEST_P(BroadcastTests, broadcast_fail_transpose) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 1, 3}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {2, 1}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 1, 3}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 1}); try { @@ -142,9 +149,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_transpose) TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 3}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 3}); try { @@ -163,9 +170,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map) TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map_shape) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 3}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 2}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 3}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); try { @@ -184,9 +191,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map_shape) TYPED_TEST_P(BroadcastTests, broadcast_axes_wrong_rank) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{1}); - auto bc_axes = make_shared(element::i64, Shape{2, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto bc_shape = make_shared(element::Type_t::i64, Shape{1}); + auto bc_axes = make_shared(element::Type_t::i64, Shape{2, 2}); try { @@ -205,24 +212,24 @@ TYPED_TEST_P(BroadcastTests, broadcast_axes_wrong_rank) TYPED_TEST_P(BroadcastTests, broadcast_fully_dynamic_target_shape) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, PartialShape::dynamic()); - auto bc_axes = make_shared(element::i64, Shape{2}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto bc_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto bc_axes = make_shared(element::Type_t::i64, Shape{2}); auto bc = make_shared(arg, bc_shape, bc_axes); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); - bc_shape = make_shared(element::i64, Shape{1}); + bc_shape = make_shared(element::Type_t::i64, Shape{1}); bc = make_shared(arg, bc_shape, bc_axes); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_broadcast_shape_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); // wrong element type - auto bc_shape = make_shared(element::boolean, Shape{1}); - auto bc_axes = make_shared(element::i64, Shape{2}); + auto bc_shape = make_shared(element::Type_t::boolean, Shape{1}); + auto bc_axes = make_shared(element::Type_t::i64, Shape{2}); try { @@ -242,10 +249,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_broadcast_shape_et_wrong) TYPED_TEST_P(BroadcastTests, broadcast_axes_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto bc_shape = make_shared(element::Type_t::i64, Shape{1}); // wrong element type - auto bc_axes = make_shared(element::f32, Shape{2}); + auto bc_axes = make_shared(element::Type_t::f32, Shape{2}); try { @@ -267,42 +274,47 @@ TYPED_TEST_P(BroadcastTests, broadcast_axes_et_wrong) TYPED_TEST_P(BroadcastTests, broadcast_explicit_all_inputs_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic()); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 1, 2}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 1, 2}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_static_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic(1)); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 1, 2}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 1, 2}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); const auto target_shape = - op::Constant::create(element::i64, Shape{3}, vector{1, 2, 3}); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); @@ -312,7 +324,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); @@ -322,16 +334,18 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) TYPED_TEST_P(BroadcastTests, broadcast_explicit_input_rank_static) { - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic()); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } @@ -339,16 +353,17 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_input_rank_static) TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_and_input_data_rank_static) { // static rank data - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic(1)); + auto axes_mapping = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } @@ -356,10 +371,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_and_input_data_rank TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_input) { const auto target_shape = - op::Constant::create(element::i64, Shape{4}, vector{1, 1, 5, 10}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{1, 1, 5, 10}); // static rank data - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto axes_mapping = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); @@ -368,7 +383,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_i // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{4}, vector{0, 2, 1, 3}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 2, 1, 3}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -377,37 +392,39 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_i TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 2, 3, 4}); + const auto data = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4}); // dynamic target shape and axes mapping - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto axes_mapping = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{4}, vector{0, 2, 1, 3}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 2, 1, 3}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape and const axes mapping - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_shape) { - const auto data = make_shared(element::f32, PartialShape{4}); - auto target_shape = op::Constant::create(element::i64, Shape{4}, vector{1, 4, 2, 3}); + const auto data = make_shared(element::Type_t::f32, PartialShape{4}); + auto target_shape = + op::Constant::create(element::Type_t::i64, Shape{4}, vector{1, 4, 2, 3}); // dynamic axes mapping - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); @@ -416,7 +433,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_ // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{1}, vector{1}); + op::Constant::create(element::Type_t::i64, Shape{1}, vector{1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -426,9 +443,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) { // dynamic input - auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape{4}); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto target_shape = make_shared(element::Type_t::i64, PartialShape{4}); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -436,7 +454,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); // static rank input - data = make_shared(element::f32, PartialShape::dynamic(2)); + data = make_shared(element::Type_t::f32, PartialShape::dynamic(2)); bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -447,15 +465,15 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_shape_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); // dynamic output shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } @@ -463,16 +481,16 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_shape_dynamic) TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_constant) { // dynamic data - auto data = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); const auto target_shape = - op::Constant::create(element::i64, Shape{3}, vector{1, 2, 3}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); // static rank data - data = make_shared(element::f32, PartialShape::dynamic(2)); + data = make_shared(element::Type_t::f32, PartialShape::dynamic(2)); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); @@ -481,22 +499,24 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_constant) TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_dynamic) { // static rank data - auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static shape data - data = make_shared(element::f32, PartialShape{3, 4, 5, 6}); + data = make_shared(element::Type_t::f32, PartialShape{3, 4, 5, 6}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_target_shape_static_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic(1)); const auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); @@ -504,16 +524,16 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_target_shape_static_rank) TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_static_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 2, 3}); + const auto data = make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); // static rank target_shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // constant target_shape const auto target_shape_const = - op::Constant::create(element::i64, Shape{3}, vector{3, 2, 3}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{3, 2, 3}); bc = make_shared(data, target_shape_const, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); @@ -525,24 +545,25 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) { const Shape expected_target_shape{1, 2, 3, 4}; const auto target_shape = op::Constant::create( - element::i64, + element::Type_t::i64, {expected_target_shape.size()}, std::vector(expected_target_shape.begin(), expected_target_shape.end())); - auto data = make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + auto data = + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -550,7 +571,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -560,10 +581,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) { - const auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 2, 3, 4}); + const auto target_shape = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 2, 3, 4}); - auto data = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 999, 3, 4}); + auto data = make_shared(element::Type_t::f32, + PartialShape{Dimension::dynamic(), 999, 3, 4}); try { auto bc = make_shared(data, target_shape, "NUMPY"); @@ -580,7 +601,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) } data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 888}); try { @@ -598,7 +619,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) } data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{5, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); try { @@ -654,30 +675,30 @@ INSTANTIATE_TYPED_TEST_CASE_P(type_prop, BroadcastTests, BroadcastTypes, ); // changing AutoBroadcastSpec to BroadcastModeSpec forces runing pdpd tests separately TEST(type_prop, broadcast_v1_pdpd) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bc = make_shared( param, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); } TEST(type_prop, broadcast_v3_pdpd) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bc = make_shared( param, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); } TEST(type_prop, broadcast_v3_bidirectional_mode_string) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i32, Shape{2}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = make_shared(element::Type_t::i32, Shape{2}); const auto broadcast_v3 = make_shared(arg, shape, "BIDIRECTIONAL"); @@ -687,9 +708,9 @@ TEST(type_prop, broadcast_v3_bidirectional_mode_string) TEST(type_prop, broadcast_v3_shape_unexpected_axes_mapping_input) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i16, Shape{2}); - const auto axes_mapping = make_shared(element::f32, Shape{3}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = make_shared(element::Type_t::i16, Shape{2}); + const auto axes_mapping = make_shared(element::Type_t::f32, Shape{3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; try @@ -712,8 +733,8 @@ TEST(type_prop, broadcast_v3_shape_unexpected_axes_mapping_input) TEST(type_prop, broadcast_v3_not_provided_axes_input_for_explicit_mode) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i16, Shape{2}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = make_shared(element::Type_t::i16, Shape{2}); const auto broadcast_spec = op::BroadcastType::EXPLICIT; try @@ -735,65 +756,65 @@ TEST(type_prop, broadcast_v3_not_provided_axes_input_for_explicit_mode) TEST(type_prop, broadcast_v3_shape) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {1, 4}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {1, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 4, 4})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{2}))); } TEST(type_prop, broadcast_v3_shape_2) { - const auto arg = make_shared(element::f32, Shape{3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {2, 1, 6}); + const auto arg = make_shared(element::Type_t::f32, Shape{3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {2, 1, 6}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{2, 3, 6})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_shape_3) { - const auto arg = make_shared(element::f32, Shape{2, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {2, 4}); + const auto arg = make_shared(element::Type_t::f32, Shape{2, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {2, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{2, 4})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{1}))); } TEST(type_prop, broadcast_v3_shape_4) { - const auto arg = make_shared(element::f32, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {3, 1}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {3, 1}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 3, 1})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{}))); } TEST(type_prop, broadcast_v3_shape_5) { - const auto arg = make_shared(element::f32, Shape{16, 1, 1}); - const auto shape = op::Constant::create(element::i64, {4}, {1, 1, 50, 50}); + const auto arg = make_shared(element::Type_t::f32, Shape{16, 1, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {4}, {1, 1, 50, 50}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 16, 50, 50})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2, 3}))); @@ -801,34 +822,34 @@ TEST(type_prop, broadcast_v3_shape_5) TEST(type_prop, broadcast_v3_shape_6) { - const auto arg = make_shared(element::f32, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {3, 1, 3}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {3, 1, 3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{3, 3, 3})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_shape_6_type_infer) { - const auto arg = make_shared(element::u16, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {3, 1, 3}); + const auto arg = make_shared(element::Type_t::u16, Shape{1, 3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {3, 1, 3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::u16); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::u16); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{3, 3, 3})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_incorrect_target_shape) { - const auto arg = make_shared(element::f32, Shape{4, 3, 2}); - const auto shape = op::Constant::create(element::i64, {3}, {8, 6, 4}); + const auto arg = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {8, 6, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; try @@ -850,8 +871,8 @@ TEST(type_prop, broadcast_v3_incorrect_target_shape) TEST(type_prop, broadcast_v3_incorrect_target_shape_2) { - const auto arg = make_shared(element::f32, Shape{1, 1, 2}); - const auto shape = op::Constant::create(element::i64, {2}, {2, 3}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 1, 2}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {2, 3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; try @@ -873,8 +894,8 @@ TEST(type_prop, broadcast_v3_incorrect_target_shape_2) TEST(type_prop, broadcast_v3_output_rank_not_deduced) { - const auto arg = make_shared(element::f32, PartialShape::dynamic()); - const auto shape = make_shared(element::i64, PartialShape::dynamic(1)); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -884,8 +905,8 @@ TEST(type_prop, broadcast_v3_output_rank_not_deduced) TEST(type_prop, broadcast_v3_output_rank_deduced_from_arg) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); - const auto shape = op::Constant::create(element::i64, {3}, {8, 6, 4}); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {8, 6, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -895,8 +916,8 @@ TEST(type_prop, broadcast_v3_output_rank_deduced_from_arg) TEST(type_prop, broadcast_v3_output_rank_deduced_from_new_shape_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); - const auto shape = op::Constant::create(element::i64, {5}, {8, 6, 1, 5, 1}); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto shape = op::Constant::create(element::Type_t::i64, {5}, {8, 6, 1, 5, 1}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -908,40 +929,40 @@ TEST(type_prop, broadcast_v3_output_rank_deduced_from_new_shape_input) TEST(type_prop, broadcast_v3_bidirectional_dynamic_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic()); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - const auto target_shape_const = op::Constant::create(element::i64, {3}, {2, 4, 6}); + const auto target_shape_const = op::Constant::create(element::Type_t::i64, {3}, {2, 4, 6}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, broadcast_v3_bidirectional_static_rank_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - const auto target_shape_const = op::Constant::create(element::i64, {3}, {2, 4, 6}); + const auto target_shape_const = op::Constant::create(element::Type_t::i64, {3}, {2, 4, 6}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); @@ -950,27 +971,27 @@ TEST(type_prop, broadcast_v3_bidirectional_static_rank_input) TEST(type_prop, broadcast_v3_bidirectional_static_shape_input) { - const auto arg = make_shared(element::f32, PartialShape{1, 2, 3, 1}); + const auto arg = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 1}); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - auto target_shape_const = op::Constant::create(element::i64, {4}, {2, 2, 3, 2}); + auto target_shape_const = op::Constant::create(element::Type_t::i64, {4}, {2, 2, 3, 2}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (PartialShape{2, 2, 3, 2})); - target_shape_const = op::Constant::create(element::i64, {4}, {5, 2, 3, 7}); + target_shape_const = op::Constant::create(element::Type_t::i64, {4}, {5, 2, 3, 7}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); @@ -981,22 +1002,23 @@ TEST(type_prop, broadcast_v3_bidirectional_static_shape_input) TEST(type_prop, broadcast_v3_bidirectional_partially_dynamic_input) { const auto target_shape = - op::Constant::create(element::i64, Shape{4}, vector{1, 1, 50, 50}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{1, 1, 50, 50}); - auto data = make_shared(element::f32, PartialShape{16, 1, Dimension::dynamic()}); + auto data = + make_shared(element::Type_t::f32, PartialShape{16, 1, Dimension::dynamic()}); auto bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()}); bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 50, 50})); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{16, Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -1004,7 +1026,7 @@ TEST(type_prop, broadcast_v3_bidirectional_partially_dynamic_input) ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/bucketize.cpp b/ngraph/test/type_prop/bucketize.cpp index 44fbc8cbf1e531..89cd4d30b399ce 100644 --- a/ngraph/test/type_prop/bucketize.cpp +++ b/ngraph/test/type_prop/bucketize.cpp @@ -23,62 +23,66 @@ using namespace ngraph; TEST(type_prop, bucketize) { - auto data = make_shared(element::f32, Shape{2, 3, 2}); - auto buckets = make_shared(element::f32, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 2}); + auto buckets = make_shared(element::Type_t::f32, Shape{4}); auto bucketize = make_shared(data, buckets); - EXPECT_EQ(bucketize->get_element_type(), element::i64); + EXPECT_EQ(bucketize->get_element_type(), element::Type_t::i64); EXPECT_TRUE(bucketize->get_output_partial_shape(0).same_scheme(PartialShape{2, 3, 2})); } TEST(type_prop, bucketize_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); - auto bucketize = make_shared(data, buckets, element::i32); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); + auto bucketize = make_shared(data, buckets, element::Type_t::i32); - ASSERT_EQ(bucketize->get_output_element_type(0), element::i32); + ASSERT_EQ(bucketize->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE(bucketize->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, 3, 4})); } TEST(type_prop, bucketize_output_type_right_bound) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); - auto bucketize = make_shared(data, buckets, element::i32, false); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); + auto bucketize = make_shared(data, buckets, element::Type_t::i32, false); - ASSERT_EQ(bucketize->get_output_element_type(0), element::i32); + ASSERT_EQ(bucketize->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE(bucketize->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, 3, 4})); } TEST(type_prop, bucketize_dynamic_input) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, Shape{5}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); auto bucketize = make_shared(data, buckets); - EXPECT_EQ(bucketize->get_element_type(), element::i64); + EXPECT_EQ(bucketize->get_element_type(), element::Type_t::i64); EXPECT_TRUE( bucketize->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, bucketize_dynamic_buckets) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); auto bucketize = make_shared(data, buckets); - EXPECT_EQ(bucketize->get_element_type(), element::i64); + EXPECT_EQ(bucketize->get_element_type(), element::Type_t::i64); EXPECT_TRUE( bucketize->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, bucketize_fail_output_type) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, Shape{5}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); try { - auto bucketize = make_shared(data, buckets, element::f64); + auto bucketize = make_shared(data, buckets, element::Type_t::f64); // Should have thrown, so fail if it didn't FAIL() << "Invalid output type not detected"; } @@ -94,8 +98,9 @@ TEST(type_prop, bucketize_fail_output_type) TEST(type_prop, bucketize_fail_buckets_dim) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, Shape{5, 5}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = make_shared(element::Type_t::f32, Shape{5, 5}); try { auto bucketize = make_shared(data, buckets); diff --git a/ngraph/test/type_prop/clamp.cpp b/ngraph/test/type_prop/clamp.cpp index 63652be87425d4..8c696d5cb93f31 100644 --- a/ngraph/test/type_prop/clamp.cpp +++ b/ngraph/test/type_prop/clamp.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, fused_clamp) { - const auto data = make_shared(element::f64, Shape{2, 2}); + const auto data = make_shared(element::Type_t::f64, Shape{2, 2}); try { @@ -38,6 +38,6 @@ TEST(type_prop, fused_clamp) } const auto clamp = make_shared(data, 1.0, 2.0); - EXPECT_EQ(clamp->get_element_type(), element::f64); + EXPECT_EQ(clamp->get_element_type(), element::Type_t::f64); EXPECT_EQ(clamp->get_shape(), (Shape{2, 2})); } diff --git a/ngraph/test/type_prop/concat.cpp b/ngraph/test/type_prop/concat.cpp index 450a7feb933cb6..7d912ef6c0359c 100644 --- a/ngraph/test/type_prop/concat.cpp +++ b/ngraph/test/type_prop/concat.cpp @@ -24,19 +24,19 @@ using namespace ngraph; TEST(type_prop, concat_deduce) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 4}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); - ASSERT_EQ(c->get_element_type(), element::f32); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 12, 4})); } TEST(type_prop, concat_deduce_wrong_rank) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{ 2, 2, }); @@ -61,9 +61,9 @@ TEST(type_prop, concat_deduce_wrong_rank) TEST(type_prop, concat_deduce_wrong_shape) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 5}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 5}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -85,9 +85,9 @@ TEST(type_prop, concat_deduce_wrong_shape) TEST(type_prop, concat_deduce_axis_oob) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 5}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 5}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 3); @@ -107,19 +107,19 @@ TEST(type_prop, concat_deduce_axis_oob) TEST(type_prop, concat_deduce_axis_barely_in_bounds) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 3, 8}); - auto param2 = make_shared(element::f32, Shape{2, 3, 12}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 3, 8}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 3, 12}); auto c = make_shared(NodeVector{param0, param1, param2}, 2); - ASSERT_EQ(c->get_element_type(), element::f32); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 3, 24})); } TEST(type_prop, concat_deduce_elem_type_mismatch) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::i32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::i32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 4}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -138,20 +138,20 @@ TEST(type_prop, concat_deduce_elem_type_mismatch) TEST(type_prop, concat_partial_et_consistent) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::dynamic, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::dynamic, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 4}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); - ASSERT_EQ(c->get_element_type(), element::f32); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 12, 4})); } TEST(type_prop, concat_partial_et_inconsistent) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::dynamic, Shape{2, 7, 4}); - auto param2 = make_shared(element::i32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::dynamic, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::i32, Shape{2, 2, 4}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -170,9 +170,9 @@ TEST(type_prop, concat_partial_et_inconsistent) TEST(type_prop, concat_partial_all_rank_dynamic) { - auto param0 = make_shared(element::f32, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE(c->get_output_partial_shape(0).rank().is_dynamic()); @@ -181,10 +181,10 @@ TEST(type_prop, concat_partial_all_rank_dynamic) TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_consistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE( @@ -194,10 +194,10 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_cons TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_rank_inconsistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, + PartialShape{2, 3, Dimension::dynamic(), 4}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -221,10 +221,10 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_rank TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_dims_inconsistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -249,12 +249,12 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_dims_intransitively_inconsistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); auto param3 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { auto c = make_shared(NodeVector{param0, param1, param2, param3}, 1); @@ -277,10 +277,10 @@ TEST(type_prop, TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_with_concat_axis_static) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE( @@ -290,10 +290,10 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_with_concat_ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_with_concat_axis_static_dims_inconsistent) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { @@ -317,11 +317,11 @@ TEST(type_prop, TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_result_static) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); auto param1 = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4, 3}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4, 3}); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_EQ(c->get_shape(), (Shape{2, 9, 3})); @@ -330,11 +330,11 @@ TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_res TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_result_dynamic) { auto param0 = - make_shared(element::f32, PartialShape{2, 2, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 2, Dimension::dynamic()}); auto param1 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 4, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 4, Dimension::dynamic()}); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE( @@ -343,11 +343,11 @@ TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_res TEST(type_prop, concat_partial_all_static_with_concat_axis_static_dims_incompatible) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); auto param1 = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4, 3}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4, 3}); auto param2 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); diff --git a/ngraph/test/type_prop/constant.cpp b/ngraph/test/type_prop/constant.cpp index 1de8b9e8f99a36..b28e89ed7d06f8 100644 --- a/ngraph/test/type_prop/constant.cpp +++ b/ngraph/test/type_prop/constant.cpp @@ -23,29 +23,29 @@ using namespace ngraph; TEST(type_prop, scalar_constant_deduce_float32) { - auto c = op::Constant::create(element::f32, Shape{}, {208}); - ASSERT_EQ(c->get_element_type(), element::f32); + auto c = op::Constant::create(element::Type_t::f32, Shape{}, {208}); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{})); } TEST(type_prop, scalar_constant_deduce_bool) { - auto c = op::Constant::create(element::boolean, Shape{}, {1}); - ASSERT_EQ(c->get_element_type(), element::boolean); + auto c = op::Constant::create(element::Type_t::boolean, Shape{}, {1}); + ASSERT_EQ(c->get_element_type(), element::Type_t::boolean); ASSERT_EQ(c->get_shape(), (Shape{})); } TEST(type_prop, tensor_constant_deduce_float32) { - auto c = op::Constant::create(element::f32, Shape{2, 2}, {208, 208, 208, 208}); - ASSERT_EQ(c->get_element_type(), element::f32); + auto c = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {208, 208, 208, 208}); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 2})); } TEST(type_prop, tensor_constant_deduce_bool) { - auto c = op::Constant::create(element::boolean, Shape{2, 2}, {1, 1, 1, 1}); - ASSERT_EQ(c->get_element_type(), element::boolean); + auto c = op::Constant::create(element::Type_t::boolean, Shape{2, 2}, {1, 1, 1, 1}); + ASSERT_EQ(c->get_element_type(), element::Type_t::boolean); ASSERT_EQ(c->get_shape(), (Shape{2, 2})); } @@ -53,7 +53,7 @@ TEST(type_prop, tensor_constant_bad_count) { try { - auto c = op::Constant::create(element::boolean, Shape{2, 2}, {1, 1, 1}); + auto c = op::Constant::create(element::Type_t::boolean, Shape{2, 2}, {1, 1, 1}); // Should have thrown, so fail if it didn't FAIL() << "Incorrect number of literals not detected"; } @@ -71,8 +71,8 @@ TEST(type_prop, tensor_constant_bad_count) TEST(type_prop, constant_zero_elements_one_string) { - auto c = - make_shared(element::i64, Shape{2, 0, 2, 2}, std::vector{"42"}); - ASSERT_EQ(c->get_element_type(), element::i64); + auto c = make_shared( + element::Type_t::i64, Shape{2, 0, 2, 2}, std::vector{"42"}); + ASSERT_EQ(c->get_element_type(), element::Type_t::i64); ASSERT_EQ(c->get_shape(), (Shape{2, 0, 2, 2})); } diff --git a/ngraph/test/type_prop/convert.cpp b/ngraph/test/type_prop/convert.cpp index c16b0dcab0c194..e3b69a6c93c0ab 100644 --- a/ngraph/test/type_prop/convert.cpp +++ b/ngraph/test/type_prop/convert.cpp @@ -24,8 +24,8 @@ using namespace ngraph; TEST(type_prop, convert_deduce) { // Deduce type - auto param = make_shared(element::f32, Shape{2, 3, 4}); - auto c = make_shared(param, element::i32); - ASSERT_EQ(c->get_element_type(), element::i32); + auto param = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto c = make_shared(param, element::Type_t::i32); + ASSERT_EQ(c->get_element_type(), element::Type_t::i32); ASSERT_EQ(c->get_shape(), (Shape{2, 3, 4})); } diff --git a/ngraph/test/type_prop/convolution.cpp b/ngraph/test/type_prop/convolution.cpp index b298f0aa4bccbe..4a1ca667b469f7 100644 --- a/ngraph/test/type_prop/convolution.cpp +++ b/ngraph/test/type_prop/convolution.cpp @@ -25,10 +25,10 @@ using namespace ngraph; TEST(type_prop, conv_1d_deduce) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto conv = make_shared(param0, param1); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -43,8 +43,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 91}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 91}); // output delta auto conv = make_shared(data_batch_shape, param0, param1, @@ -53,7 +54,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -67,15 +68,15 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) TEST(type_prop, conv_1d_deduce_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto conv = make_shared( param0, param1, move_strides, dilation_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -90,8 +91,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 96}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 96}); // output delta auto move_strides = Strides{1}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; @@ -104,7 +106,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) padding_below, padding_above, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -118,11 +120,11 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) TEST(type_prop, conv_1d_deduce_strided) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -137,8 +139,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 46}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 46}); // output delta auto move_strides = Strides{2}; auto conv = make_shared(data_batch_shape, param0, @@ -148,7 +151,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -162,15 +165,15 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) TEST(type_prop, conv_1d_deduce_strided_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{2}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto conv = make_shared( param0, param1, move_strides, dilation_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 48})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -185,8 +188,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 48}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 48}); // output delta auto move_strides = Strides{2}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; @@ -199,7 +203,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) padding_below, padding_above, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -213,11 +217,11 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) TEST(type_prop, conv_1d_deduce_strided_small_uneven) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 5}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 5}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); auto move_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -232,8 +236,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) { // Deduce type Shape data_batch_shape{64, 3, 5}; - auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 2}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 2}); // output delta auto move_strides = Strides{2}; auto conv = make_shared(data_batch_shape, param0, @@ -243,7 +248,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -257,11 +262,11 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) TEST(type_prop, conv_1d_deduce_strided_small_even) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 6}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 6}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); auto move_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 3})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -276,8 +281,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) { // Deduce type Shape data_batch_shape{64, 3, 6}; - auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 3}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 3}); // output delta auto move_strides = Strides{2}; auto conv = make_shared(data_batch_shape, param0, @@ -287,7 +293,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -301,12 +307,12 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) TEST(type_prop, conv_1d_deduce_window_dilated) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 82})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -321,8 +327,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 82}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 82}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto conv = make_shared(data_batch_shape, @@ -333,7 +340,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -347,15 +354,15 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) TEST(type_prop, conv_1d_deduce_window_dilated_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 87})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -370,8 +377,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 87}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 87}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; @@ -384,7 +392,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) padding_below, padding_above, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -398,8 +406,8 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; @@ -412,7 +420,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded) padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 285})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -427,8 +435,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 285}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 285}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; @@ -442,7 +451,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -456,10 +465,10 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde TEST(type_prop, conv_2d_deduce) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto conv = make_shared(param0, param1); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); @@ -473,15 +482,15 @@ TEST(type_prop, conv_2d_deduce) TEST(type_prop, conv_2d_deduce_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{1, 1}; auto dilate_strides = Strides{1, 1}; auto padding_below = CoordinateDiff{2, 3}; auto padding_above = CoordinateDiff{3, 4}; auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 138})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); @@ -495,15 +504,15 @@ TEST(type_prop, conv_2d_deduce_padded) TEST(type_prop, conv_2d_deduce_padded_neg) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{1, 1}; auto dilate_strides = Strides{1, 1}; auto padding_below = CoordinateDiff{2, -3}; auto padding_above = CoordinateDiff{3, -4}; auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 124})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); @@ -526,8 +535,8 @@ TEST_P(DeduceAutoPadTest, same_lower) image_shape.insert(image_shape.begin(), {1, 1}); // Add {N, C} auto filter_shape = std::get<1>(GetParam()); filter_shape.insert(filter_shape.begin(), {1, 1}); // Add {O, I} - auto param0 = make_shared(element::f32, image_shape); - auto param1 = make_shared(element::f32, filter_shape); + auto param0 = make_shared(element::Type_t::f32, image_shape); + auto param1 = make_shared(element::Type_t::f32, filter_shape); auto conv = make_shared(param0, param1, @@ -589,11 +598,11 @@ INSTANTIATE_TEST_CASE_P(type_prop, TEST(type_prop, conv_2d_deduce_strided) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46, 44})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -607,12 +616,12 @@ TEST(type_prop, conv_2d_deduce_strided) TEST(type_prop, conv_2d_deduce_strided_window_dilated) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 37, 38})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -626,8 +635,8 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated) TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; auto padding_below = CoordinateDiff{0, 0}; @@ -640,7 +649,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 86, 137})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -654,12 +663,12 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) TEST(type_prop, conv_2d_deduce_strided_window_dilated_small) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2, 3}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -673,12 +682,12 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_small) TEST(type_prop, conv_3d_deduce_strided_window_dilated_small) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2, 3, 2}); auto move_strides = Strides{2, 3, 4}; auto dilate_strides = Strides{3, 2, 2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2, 2})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3, 4})); @@ -692,8 +701,8 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_small) TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2, 3, 2}); auto move_strides = Strides{2, 3, 4}; auto dilate_strides = Strides{3, 2, 2}; auto padding_below = CoordinateDiff{0, 0, 0}; @@ -706,7 +715,7 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 5, 6, 5})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3, 4})); @@ -720,8 +729,8 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) TEST(type_prop, conv_invalid_element_type_mismatch) { // Deduce type - auto param0 = make_shared(element::f32, Shape{3, 3, 3, 3}); - auto param1 = make_shared(element::i32, Shape{3, 3, 2, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{3, 3, 3, 3}); + auto param1 = make_shared(element::Type_t::i32, Shape{3, 3, 2, 2}); try { auto conv = make_shared(param0, param1); @@ -743,8 +752,8 @@ TEST(type_prop, conv_invalid_element_type_mismatch) TEST(type_prop, conv_invalid_0d_input) { // Deduce type - auto param0 = make_shared(element::f32, Shape{}); - auto param1 = make_shared(element::f32, Shape{}); + auto param0 = make_shared(element::Type_t::f32, Shape{}); + auto param1 = make_shared(element::Type_t::f32, Shape{}); try { auto conv = make_shared(param0, param1); @@ -768,8 +777,8 @@ TEST(type_prop, conv_invalid_0d_input) TEST(type_prop, conv_invalid_1d_input) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2}); - auto param1 = make_shared(element::f32, Shape{2}); + auto param0 = make_shared(element::Type_t::f32, Shape{2}); + auto param1 = make_shared(element::Type_t::f32, Shape{2}); try { auto conv = make_shared(param0, param1); @@ -793,8 +802,8 @@ TEST(type_prop, conv_invalid_1d_input) TEST(type_prop, conv_invalid_2d_input) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 6}); - auto param1 = make_shared(element::f32, Shape{2, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 6}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 6}); try { auto conv = make_shared(param0, param1); @@ -818,8 +827,8 @@ TEST(type_prop, conv_invalid_2d_input) TEST(type_prop, conv_invalid_0_batch_size) { // Deduce type - auto param0 = make_shared(element::f32, Shape{0, 6, 1}); - auto param1 = make_shared(element::f32, Shape{0, 6, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{0, 6, 1}); + auto param1 = make_shared(element::Type_t::f32, Shape{0, 6, 1}); try { auto conv = make_shared(param0, param1); @@ -840,8 +849,8 @@ TEST(type_prop, conv_invalid_0_batch_size) TEST(type_prop, conv_invalid_0_input_channels) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 0, 1}); - auto param1 = make_shared(element::f32, Shape{5, 0, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 0, 1}); + auto param1 = make_shared(element::Type_t::f32, Shape{5, 0, 1}); try { auto conv = make_shared(param0, param1); @@ -864,8 +873,8 @@ TEST(type_prop, conv_invalid_0_input_channels) TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{5, 2, 3, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{5, 2, 3, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -886,8 +895,8 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many) TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{5, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{5, 2, 3}); try { auto conv = make_shared(param0, param1); @@ -908,8 +917,8 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few) TEST(type_prop, conv_invalid_0_output_channels) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{0, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{0, 2, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -930,8 +939,8 @@ TEST(type_prop, conv_invalid_0_output_channels) TEST(type_prop, conv_invalid_input_channel_mismatch) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 3, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 3, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -955,8 +964,8 @@ TEST(type_prop, conv_invalid_input_channel_mismatch) TEST(type_prop, conv_invalid_movement_stride_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{2, 3, 8}); @@ -984,8 +993,8 @@ TEST(type_prop, conv_invalid_movement_stride_rank) TEST(type_prop, conv_invalid_window_dilation_stride_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = @@ -1014,8 +1023,8 @@ TEST(type_prop, conv_invalid_window_dilation_stride_rank) TEST(type_prop, conv_invalid_data_dilation_stride_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1049,8 +1058,8 @@ TEST(type_prop, conv_invalid_data_dilation_stride_rank) TEST(type_prop, conv_invalid_padding_below_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1083,8 +1092,8 @@ TEST(type_prop, conv_invalid_padding_below_rank) TEST(type_prop, conv_invalid_padding_above_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1117,8 +1126,8 @@ TEST(type_prop, conv_invalid_padding_above_rank) TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1146,8 +1155,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding) TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1175,8 +1184,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding) TEST(type_prop, conv_invalid_input_spatial_size_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 0, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 0, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -1199,8 +1208,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_0) TEST(type_prop, conv_invalid_window_size_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 0}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 0}); try { auto conv = make_shared(param0, param1); @@ -1223,8 +1232,8 @@ TEST(type_prop, conv_invalid_window_size_0) TEST(type_prop, conv_invalid_window_dilation_stride_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{2, 3}, Strides{2, 0}); @@ -1247,8 +1256,8 @@ TEST(type_prop, conv_invalid_window_dilation_stride_0) TEST(type_prop, conv_invalid_data_dilation_stride_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1277,8 +1286,8 @@ TEST(type_prop, conv_invalid_data_dilation_stride_0) TEST(type_prop, conv_invalid_dilated_window_too_large) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 8, 8}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 8, 8}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{1, 1}, Strides{4, 4}); @@ -1301,8 +1310,8 @@ TEST(type_prop, conv_invalid_dilated_window_too_large) TEST(type_prop, conv_invalid_movement_stride_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{0, 1}); @@ -1332,8 +1341,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1343,7 +1352,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok) padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1357,8 +1366,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_rank_wrong CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1398,8 +1407,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_dim_zero) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1435,8 +1444,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_rank_wron CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1476,8 +1485,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_dim_zero) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1513,8 +1522,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_below_rank_wrong) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1554,8 +1563,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_above_rank_wrong) CoordinateDiff padding_above{0, 0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1595,8 +1604,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_rank_wrong) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1636,8 +1645,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_dim_zero) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 0}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1673,8 +1682,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1684,7 +1693,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok) padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1698,8 +1707,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_data_batch_rank_wr CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1741,8 +1750,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_o CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1752,7 +1761,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_o padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); } @@ -1768,8 +1777,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_z CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1804,8 +1813,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1815,7 +1824,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1830,8 +1839,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1868,8 +1877,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1879,7 +1888,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), 32, Dimension::dynamic(), Dimension::dynamic()})); } @@ -1894,8 +1903,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1929,8 +1938,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1940,7 +1949,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1954,8 +1963,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1991,8 +2000,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2002,7 +2011,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok) padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -2016,8 +2025,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_arg_ranks_m CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2054,8 +2063,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2065,7 +2074,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -2081,8 +2090,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2119,8 +2128,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspat CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2130,7 +2139,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspat padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, Dimension::dynamic(), Dimension::dynamic()})); } @@ -2146,8 +2155,8 @@ TEST(type_prop, CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2157,7 +2166,7 @@ TEST(type_prop, padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 196, Dimension::dynamic()})); } @@ -2174,8 +2183,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2213,8 +2222,8 @@ TEST( CoordinateDiff padding_above{-1, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2224,7 +2233,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 1, Dimension::dynamic()})); } @@ -2241,8 +2250,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{2, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2252,7 +2261,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 199, Dimension::dynamic()})); } @@ -2269,8 +2278,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{2, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2280,7 +2289,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 67, Dimension::dynamic()})); } @@ -2297,8 +2306,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2336,8 +2345,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2375,8 +2384,8 @@ TEST( CoordinateDiff padding_above{0, -1}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2386,7 +2395,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 196, Dimension::dynamic()})); } @@ -2403,8 +2412,8 @@ TEST( CoordinateDiff padding_above{0, -20}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2442,8 +2451,8 @@ TEST( CoordinateDiff padding_above{0, -20}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2481,8 +2490,8 @@ TEST(type_prop, conv_partial_dynamic_et) CoordinateDiff padding_above{-1, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::dynamic, data_batch_shape); - auto param1 = make_shared(element::dynamic, filters_shape); + auto param0 = make_shared(element::Type_t::dynamic, data_batch_shape); + auto param1 = make_shared(element::Type_t::dynamic, filters_shape); auto conv = make_shared(param0, param1, @@ -2500,11 +2509,11 @@ TEST(type_prop, conv_partial_dynamic_et) TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic) { Shape shape_filter{6, 3, 3, 3}; - auto filters = make_shared(element::f32, shape_filter); + auto filters = make_shared(element::Type_t::f32, shape_filter); Shape shape_delta{2, 6, 3, 3}; - auto deltas = make_shared(element::f32, shape_delta); + auto deltas = make_shared(element::Type_t::f32, shape_delta); Shape shape_data_batch_shape{2, 3, 5, 5}; - auto data_batch_shape = make_shared(element::i64, Shape{2, 3, 5, 5}); + auto data_batch_shape = make_shared(element::Type_t::i64, Shape{2, 3, 5, 5}); auto strides = Strides{1, 1}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; @@ -2519,9 +2528,9 @@ TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic) TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic_static_rank) { PartialShape shape_filter{20, 10, 3, 3}; - auto filters = make_shared(element::f32, shape_filter); + auto filters = make_shared(element::Type_t::f32, shape_filter); PartialShape shape_delta{Dimension(), 20, 224, 224}; - auto deltas = make_shared(element::f32, shape_delta); + auto deltas = make_shared(element::Type_t::f32, shape_delta); auto strides = Strides{2, 2}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{1, 1}; @@ -2546,8 +2555,8 @@ TEST(type_prop, conv_v1_partial_rank) CoordinateDiff padding_below{0, 0}; CoordinateDiff padding_above{0, 0}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2569,8 +2578,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2590,8 +2599,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2611,8 +2620,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2632,8 +2641,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_spatial_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2654,8 +2663,8 @@ TEST(type_prop, conv_v1_partial_data_shape_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2676,10 +2685,10 @@ TEST(type_prop, conv_bprop_v1_partial_auto_padding_upper) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto in1 = make_shared(element::f32, shape1); - auto in2 = make_shared(element::f32, shape2); + auto in1 = make_shared(element::Type_t::f32, shape1); + auto in2 = make_shared(element::Type_t::f32, shape2); std::vector data = {1, 74}; - element::Type type = element::i64; + element::Type type = element::Type_t::i64; auto in3 = make_shared(type, shape3, data); auto conv = make_shared( @@ -2701,10 +2710,10 @@ TEST(type_prop, conv_bprop_v1_partial_auto_padding_lower) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto in1 = make_shared(element::f32, shape1); - auto in2 = make_shared(element::f32, shape2); + auto in1 = make_shared(element::Type_t::f32, shape1); + auto in2 = make_shared(element::Type_t::f32, shape2); std::vector data = {1, 74}; - element::Type type = element::i64; + element::Type type = element::Type_t::i64; auto in3 = make_shared(type, shape3, data); auto conv = make_shared( @@ -2721,9 +2730,9 @@ TEST(type_prop, deformable_conv_incorrect_group) const PartialShape deformable_values_shape{1, 50, 5, 5}; const PartialShape filters_shape{4, 3, 5, 5}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, deformable_values_shape); - auto param2 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, deformable_values_shape); + auto param2 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2770,9 +2779,9 @@ TEST(type_prop, deformable_conv_incorrect_deformable_group) const PartialShape deformable_values_shape{1, 50, 5, 5}; const PartialShape filters_shape{3, 3, 5, 5}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, deformable_values_shape); - auto param2 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, deformable_values_shape); + auto param2 = make_shared(element::Type_t::f32, filters_shape); try { diff --git a/ngraph/test/type_prop/ctc_greedy_decoder.cpp b/ngraph/test/type_prop/ctc_greedy_decoder.cpp index b02593244de026..119c5ceb3ece17 100644 --- a/ngraph/test/type_prop/ctc_greedy_decoder.cpp +++ b/ngraph/test/type_prop/ctc_greedy_decoder.cpp @@ -26,10 +26,10 @@ TEST(type_prop, ctc_greedy_decoder_static_shapes) PartialShape logits_shape{100, 3, 1200}; PartialShape seq_mask_shape{100, 3}; Shape out_shape{3, 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); } @@ -38,10 +38,10 @@ TEST(type_prop, ctc_greedy_decoder_output_static_shape1) PartialShape logits_shape{Dimension::dynamic(), 3, 1200}; PartialShape seq_mask_shape{100, 3}; Shape out_shape{3, 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); } @@ -50,10 +50,10 @@ TEST(type_prop, ctc_greedy_decoder_output_static_shape2) PartialShape logits_shape{Dimension::dynamic(), 3, 1200}; PartialShape seq_mask_shape{100, Dimension::dynamic()}; Shape out_shape{3, 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); } @@ -62,10 +62,10 @@ TEST(type_prop, ctc_greedy_decoder_dynamic_shapes) PartialShape logits_shape{Dimension::dynamic(), Dimension::dynamic(), 1200}; PartialShape seq_mask_shape{Dimension::dynamic(), Dimension::dynamic()}; PartialShape out_shape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -74,10 +74,10 @@ TEST(type_prop, ctc_greedy_decoder_dynamic_ranks1) PartialShape logits_shape = PartialShape::dynamic(); PartialShape seq_mask_shape{100, Dimension::dynamic()}; PartialShape out_shape{Dimension::dynamic(), 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -86,10 +86,10 @@ TEST(type_prop, ctc_greedy_decoder_dynamic_ranks2) PartialShape logits_shape = PartialShape::dynamic(); PartialShape seq_mask_shape = PartialShape::dynamic(); PartialShape out_shape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -97,8 +97,8 @@ TEST(type_prop, ctc_greedy_decoder_incorrect_rank) { PartialShape logits_shape{Dimension::dynamic(), 3, 1200, 5}; PartialShape seq_mask_shape{100, 3}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { @@ -121,8 +121,8 @@ TEST(type_prop, ctc_greedy_decoder_incorrect_rank2) { PartialShape logits_shape{Dimension::dynamic(), 3, 1200}; PartialShape seq_mask_shape{100, 3, 2}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { @@ -145,8 +145,8 @@ TEST(type_prop, ctc_greedy_decoder_mismatched_dim1) { PartialShape logits_shape{100, 4, 1200}; PartialShape seq_mask_shape{100, 3}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { @@ -169,8 +169,8 @@ TEST(type_prop, ctc_greedy_decoder_mismatched_dim2) { PartialShape logits_shape{101, 3, 1200}; PartialShape seq_mask_shape{100, 3}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { diff --git a/ngraph/test/type_prop/ctc_loss.cpp b/ngraph/test/type_prop/ctc_loss.cpp index 2b2cc6f1847d79..4933c1c24c6e21 100644 --- a/ngraph/test/type_prop/ctc_loss.cpp +++ b/ngraph/test/type_prop/ctc_loss.cpp @@ -24,91 +24,92 @@ using namespace ngraph; TEST(type_prop, ctc_loss) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_no_blank_index) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_output_type) { // create inputs - auto logits = make_shared(element::f64, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f64, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f64); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f64); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_non_default_parameters) { // create inputs - auto logits = make_shared(element::f64, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f64, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared( logits, logit_length, labels, label_length, blank_index, true, false, false); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f64); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f64); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_dynamic_input) { // create inputs - auto logits = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 120, 28}); + auto logits = make_shared(element::Type_t::f32, + PartialShape{Dimension::dynamic(), 120, 28}); auto logit_length = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto labels = make_shared(element::i32, PartialShape{Dimension::dynamic(), 120}); + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto labels = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic(), 120}); auto label_length = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto blank_index = make_shared(element::i32, Shape{}); + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE( ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic()})); } @@ -116,31 +117,32 @@ TEST(type_prop, ctc_loss_dynamic_input) TEST(type_prop, ctc_loss_partly_dynamic_input) { // create inputs - auto logits = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 120, 28}); - auto logit_length = make_shared(element::i32, PartialShape{10}); - auto labels = make_shared(element::i32, PartialShape{Dimension::dynamic(), 120}); + auto logits = make_shared(element::Type_t::f32, + PartialShape{Dimension::dynamic(), 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, PartialShape{10}); + auto labels = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic(), 120}); auto label_length = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto blank_index = make_shared(element::i32, Shape{}); + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_fail_inputs_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 40, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 40, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -164,11 +166,11 @@ TEST(type_prop, ctc_loss_fail_inputs_dim) TEST(type_prop, ctc_loss_fail_logit_length_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10, 20}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10, 20}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -192,11 +194,11 @@ TEST(type_prop, ctc_loss_fail_logit_length_dim) TEST(type_prop, ctc_loss_fail_labels_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -220,11 +222,11 @@ TEST(type_prop, ctc_loss_fail_labels_dim) TEST(type_prop, ctc_loss_fail_label_length_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10, 40}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10, 40}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -248,11 +250,11 @@ TEST(type_prop, ctc_loss_fail_label_length_dim) TEST(type_prop, ctc_loss_fail_blank_index_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{4}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{4}); try { @@ -276,11 +278,11 @@ TEST(type_prop, ctc_loss_fail_blank_index_dim) TEST(type_prop, ctc_loss_fail_batch_dim_mismatch) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{40}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{40}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -307,11 +309,11 @@ TEST(type_prop, ctc_loss_fail_batch_dim_mismatch) TEST(type_prop, ctc_loss_fail_time_dim_mismatch) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 130}); - auto label_length = make_shared(element::i32, Shape{40}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 130}); + auto label_length = make_shared(element::Type_t::i32, Shape{40}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { diff --git a/ngraph/test/type_prop/deformable_convolution.cpp b/ngraph/test/type_prop/deformable_convolution.cpp index 508ce147176c91..83b97c12e9dc39 100644 --- a/ngraph/test/type_prop/deformable_convolution.cpp +++ b/ngraph/test/type_prop/deformable_convolution.cpp @@ -34,9 +34,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same) const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, @@ -67,9 +67,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_nc_dims_dynamic_sam const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, @@ -101,9 +101,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_nc_dims_dynamic_sam const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, @@ -135,9 +135,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_spatial_dims_dynami const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, diff --git a/ngraph/test/type_prop/deformable_psroi_pooling.cpp b/ngraph/test/type_prop/deformable_psroi_pooling.cpp index d4b204763df654..7d71de721a4c5e 100644 --- a/ngraph/test/type_prop/deformable_psroi_pooling.cpp +++ b/ngraph/test/type_prop/deformable_psroi_pooling.cpp @@ -23,9 +23,9 @@ using namespace ngraph; TEST(type_prop, deformable_psroi_pooling_output_shape) { - auto input = make_shared(element::f32, Shape{1, 1024, 63, 38}); - auto coords = make_shared(element::f32, Shape{300, 5}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1024, 63, 38}); + auto coords = make_shared(element::Type_t::f32, Shape{300, 5}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 882; const float spatial_scale = 0.0625; const int64_t group_size = 3; @@ -38,9 +38,9 @@ TEST(type_prop, deformable_psroi_pooling_output_shape) TEST(type_prop, deformable_psroi_pooling_output_shape_2) { - auto input = make_shared(element::f32, Shape{1, 7938, 38, 38}); - auto coords = make_shared(element::f32, Shape{300, 5}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 7938, 38, 38}); + auto coords = make_shared(element::Type_t::f32, Shape{300, 5}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 162; const float spatial_scale = 0.0625; const int64_t group_size = 7; @@ -53,9 +53,9 @@ TEST(type_prop, deformable_psroi_pooling_output_shape_2) TEST(type_prop, deformable_psroi_pooling_invalid_input_rank) { - auto input = make_shared(element::f32, Shape{1, 2, 3}); - auto coords = make_shared(element::f32, Shape{1, 2}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto coords = make_shared(element::Type_t::f32, Shape{1, 2}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 4; const float spatial_scale = 0.9; const int64_t group_size = 7; @@ -79,9 +79,9 @@ TEST(type_prop, deformable_psroi_pooling_invalid_input_rank) TEST(type_prop, deformable_psroi_pooling_invalid_box_coordinates_rank) { - auto input = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto coords = make_shared(element::f32, Shape{1, 2, 3}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto coords = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 4; const float spatial_scale = 0.9; const int64_t group_size = 7; @@ -106,9 +106,9 @@ TEST(type_prop, deformable_psroi_pooling_invalid_box_coordinates_rank) TEST(type_prop, deformable_psroi_pooling_invalid_offstes_rank) { - auto input = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto coords = make_shared(element::f32, Shape{1, 2}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4, 5}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto coords = make_shared(element::Type_t::f32, Shape{1, 2}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4, 5}); const int64_t output_dim = 4; const float spatial_scale = 0.9; const int64_t group_size = 7; diff --git a/ngraph/test/type_prop/depth_to_space.cpp b/ngraph/test/type_prop/depth_to_space.cpp index 4375b9ab8184ff..779ddd13d923f1 100644 --- a/ngraph/test/type_prop/depth_to_space.cpp +++ b/ngraph/test/type_prop/depth_to_space.cpp @@ -23,57 +23,57 @@ using namespace ngraph; TEST(type_prop, depth_to_space_output_shape_block_first_4D) { - auto A = make_shared(element::f32, Shape{1, 128, 8, 8}); + auto A = make_shared(element::Type_t::f32, Shape{1, 128, 8, 8}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 8); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 64, 64})); } TEST(type_prop, depth_to_space_output_shape_block_first_4D_2) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_output_shape_block_first_5D) { - auto A = make_shared(element::f32, Shape{1, 16, 3, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 16, 3, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 2 * 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_output_shape_depth_first_4D) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_output_shape_depth_first_5D) { - auto A = make_shared(element::f32, Shape{1, 16, 3, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 16, 3, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 2 * 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_input_rank_not_supported) { - auto A = make_shared(element::f32, Shape{1, 8}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8}); try { auto space_to_depth = @@ -94,7 +94,7 @@ TEST(type_prop, depth_to_space_input_rank_not_supported) TEST(type_prop, depth_to_space_blocksize_not_matched) { - auto A = make_shared(element::f32, Shape{1, 7, 4, 4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 7, 4, 4}); try { auto space_to_depth = diff --git a/ngraph/test/type_prop/dyn_reshape.cpp b/ngraph/test/type_prop/dyn_reshape.cpp index 760ccf9917fc5b..a8b571ffac234b 100644 --- a/ngraph/test/type_prop/dyn_reshape.cpp +++ b/ngraph/test/type_prop/dyn_reshape.cpp @@ -23,20 +23,22 @@ using namespace ngraph; TEST(type_prop, reshape_v1_arg_rank_static_pattern_zero) { - auto arg = make_shared(element::f32, Shape{2, 0, 2, 8}); - auto pattern = op::Constant::create(element::i64, Shape{4}, {1, 2, 0, 32}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 0, 2, 8}); + auto pattern = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 2, 0, 32}); auto reshape_v1_static = make_shared(arg, pattern, true); EXPECT_EQ(reshape_v1_static->get_output_shape(0), Shape({1, 2, 2, 32})); - auto dynamic_arg = make_shared(element::f32, PartialShape::dynamic()); + auto dynamic_arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto reshape_v1_dynamic = make_shared(dynamic_arg, pattern, true); EXPECT_TRUE(reshape_v1_dynamic->get_output_partial_shape(0).same_scheme( PartialShape{1, 2, Dimension::dynamic(), 32})); try { - auto static_shape_parameter = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto reshape_output_pattern = op::Constant::create(element::i64, Shape{4}, {2, 2, 3, 4}); + auto static_shape_parameter = + make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto reshape_output_pattern = + op::Constant::create(element::Type_t::i64, Shape{4}, {2, 2, 3, 4}); auto reshape = make_shared(static_shape_parameter, reshape_output_pattern, true); FAIL() << "Expected failure on reshape construction"; diff --git a/ngraph/test/type_prop/elu.cpp b/ngraph/test/type_prop/elu.cpp index 3d2bf279594808..82e29aeda751f9 100644 --- a/ngraph/test/type_prop/elu.cpp +++ b/ngraph/test/type_prop/elu.cpp @@ -24,8 +24,8 @@ using namespace ngraph; TEST(type_prop, elu) { Shape data_shape{2, 4}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto elu = make_shared(data, 1); - ASSERT_EQ(elu->get_element_type(), element::f32); + ASSERT_EQ(elu->get_element_type(), element::Type_t::f32); ASSERT_EQ(elu->get_shape(), data_shape); } diff --git a/ngraph/test/type_prop/embedding_segments_sum.cpp b/ngraph/test/type_prop/embedding_segments_sum.cpp index 58f28d3a0d0cd8..dc118c78058811 100644 --- a/ngraph/test/type_prop/embedding_segments_sum.cpp +++ b/ngraph/test/type_prop/embedding_segments_sum.cpp @@ -25,19 +25,19 @@ using namespace ngraph; TEST(type_prop, ess) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ess = make_shared( emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights); EXPECT_TRUE( ess->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 2})); EXPECT_TRUE(indices->get_partial_shape().same_scheme(per_sample_weights->get_partial_shape())); - EXPECT_EQ(ess->get_output_element_type(0), element::f32); + EXPECT_EQ(ess->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(segment_ids->get_partial_shape().rank().get_length(), 1); } @@ -45,12 +45,12 @@ TEST(type_prop, ess) TEST(type_prop, ess_dynamic_emb_table_number_segment) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ess = make_shared( emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights); @@ -61,12 +61,12 @@ TEST(type_prop, ess_dynamic_emb_table_number_segment) TEST(type_prop, ess_fail_indices_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -86,12 +86,12 @@ TEST(type_prop, ess_fail_indices_element_type) TEST(type_prop, ess_fail_segment_ids_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::f32, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::f32, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -111,12 +111,12 @@ TEST(type_prop, ess_fail_segment_ids_element_type) TEST(type_prop, ess_fail_number_segments_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::f32, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::f32, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -136,12 +136,12 @@ TEST(type_prop, ess_fail_number_segments_element_type) TEST(type_prop, ess_fail_default_index_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::f32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::f32, Shape{}); try { @@ -161,12 +161,12 @@ TEST(type_prop, ess_fail_default_index_element_type) TEST(type_prop, ess_fail_mismatch_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i32, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i32, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -188,12 +188,12 @@ TEST(type_prop, ess_fail_mismatch_element_type) TEST(type_prop, ess_fail_mismatch_element_type_1) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -215,12 +215,12 @@ TEST(type_prop, ess_fail_mismatch_element_type_1) TEST(type_prop, ess_fail_mismatch_element_type_2) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::i64, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::i64, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -242,12 +242,12 @@ TEST(type_prop, ess_fail_mismatch_element_type_2) TEST(type_prop, ess_fail_mismatch_element_type_3) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i32, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i32, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -270,12 +270,12 @@ TEST(type_prop, ess_fail_mismatch_element_type_3) TEST(type_prop, ess_fail_mismatch_shape) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{3}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -296,12 +296,12 @@ TEST(type_prop, ess_fail_mismatch_shape) TEST(type_prop, ess_fail_num_segments_scalar) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{2}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{2}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -321,12 +321,12 @@ TEST(type_prop, ess_fail_num_segments_scalar) TEST(type_prop, ess_fail_default_index_scalar) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{2}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{2}); try { @@ -346,12 +346,12 @@ TEST(type_prop, ess_fail_default_index_scalar) TEST(type_prop, ess_fail_indices_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4, 2}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4, 2}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -371,12 +371,12 @@ TEST(type_prop, ess_fail_indices_1d) TEST(type_prop, ess_fail_segment_ids_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{3, 2}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{3, 2}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -396,12 +396,12 @@ TEST(type_prop, ess_fail_segment_ids_1d) TEST(type_prop, ess_fail_per_sample_weights_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4, 2}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4, 2}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -421,26 +421,26 @@ TEST(type_prop, ess_fail_per_sample_weights_1d) TEST(type_prop, ess_4_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); auto ess = make_shared(emb_table, indices, segment_ids, num_segments); EXPECT_TRUE( ess->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 2})); - EXPECT_EQ(ess->get_output_element_type(0), element::f32); + EXPECT_EQ(ess->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(segment_ids->get_partial_shape().rank().get_length(), 1); } TEST(type_prop, ess_fail_indices_element_type_4_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); try { @@ -460,15 +460,15 @@ TEST(type_prop, ess_fail_indices_element_type_4_args_api) TEST(type_prop, ess_num_segment_const) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = opset3::Constant::create(element::i64, Shape{}, {3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = opset3::Constant::create(element::Type_t::i64, Shape{}, {3}); auto ess = make_shared(emb_table, indices, segment_ids, num_segments); EXPECT_TRUE(ess->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); - EXPECT_EQ(ess->get_output_element_type(0), element::f32); + EXPECT_EQ(ess->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(segment_ids->get_partial_shape().rank().get_length(), 1); -} \ No newline at end of file +} diff --git a/ngraph/test/type_prop/embeddingbag_offsetssum.cpp b/ngraph/test/type_prop/embeddingbag_offsetssum.cpp index 5b74d18d4c75c9..6d4a71b1f4d3d9 100644 --- a/ngraph/test/type_prop/embeddingbag_offsetssum.cpp +++ b/ngraph/test/type_prop/embeddingbag_offsetssum.cpp @@ -23,17 +23,17 @@ using namespace ngraph; TEST(type_prop, ebos) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); EXPECT_TRUE(ebos->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); EXPECT_TRUE(indices->get_partial_shape().same_scheme(per_sample_weights->get_partial_shape())); - EXPECT_EQ(ebos->get_output_element_type(0), element::f32); + EXPECT_EQ(ebos->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(offsets->get_partial_shape().rank().get_length(), 1); } @@ -41,11 +41,11 @@ TEST(type_prop, ebos) TEST(type_prop, ebos_dynamic_emb_table) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); @@ -56,11 +56,12 @@ TEST(type_prop, ebos_dynamic_emb_table) TEST(type_prop, ebos_dynamic_offsets) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); @@ -72,11 +73,12 @@ TEST(type_prop, ebos_dynamic_offsets) TEST(type_prop, ebos_dynamic_emb_table_offsets) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); @@ -87,11 +89,11 @@ TEST(type_prop, ebos_dynamic_emb_table_offsets) TEST(type_prop, ebos_fail_indices_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -111,11 +113,11 @@ TEST(type_prop, ebos_fail_indices_element_type) TEST(type_prop, ebos_fail_offsets_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::f32, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::f32, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -135,11 +137,11 @@ TEST(type_prop, ebos_fail_offsets_element_type) TEST(type_prop, ebos_fail_default_index_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::f32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::f32, Shape{}); try { @@ -159,11 +161,11 @@ TEST(type_prop, ebos_fail_default_index_element_type) TEST(type_prop, ebos_fail_mismatch_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i32, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i32, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -185,11 +187,11 @@ TEST(type_prop, ebos_fail_mismatch_element_type) TEST(type_prop, ebos_fail_mismatch_element_type_1) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -211,11 +213,11 @@ TEST(type_prop, ebos_fail_mismatch_element_type_1) TEST(type_prop, ebos_fail_mismatch_element_type_2) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::i64, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::i64, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -237,11 +239,11 @@ TEST(type_prop, ebos_fail_mismatch_element_type_2) TEST(type_prop, ebos_fail_mismatch_shape) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{3}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -262,11 +264,11 @@ TEST(type_prop, ebos_fail_mismatch_shape) TEST(type_prop, ebos_fail_default_index_scalar) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{2}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{2}); try { @@ -286,11 +288,11 @@ TEST(type_prop, ebos_fail_default_index_scalar) TEST(type_prop, ebos_fail_indices_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4, 2}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4, 2}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -310,11 +312,11 @@ TEST(type_prop, ebos_fail_indices_1d) TEST(type_prop, ebos_fail_offsets_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3, 2}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3, 2}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -334,11 +336,11 @@ TEST(type_prop, ebos_fail_offsets_1d) TEST(type_prop, ebos_fail_per_sample_weights_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4, 2}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4, 2}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -358,22 +360,22 @@ TEST(type_prop, ebos_fail_per_sample_weights_1d) TEST(type_prop, ebos_3_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); auto ebos = make_shared(emb_table, indices, offsets); EXPECT_TRUE(ebos->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); - EXPECT_EQ(ebos->get_output_element_type(0), element::f32); + EXPECT_EQ(ebos->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(offsets->get_partial_shape().rank().get_length(), 1); } TEST(type_prop, ebos_fail_indices_element_type_3_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); try { diff --git a/ngraph/test/type_prop/embeddingbag_packedsum.cpp b/ngraph/test/type_prop/embeddingbag_packedsum.cpp index 2ff631b51b3132..6cb353399629aa 100644 --- a/ngraph/test/type_prop/embeddingbag_packedsum.cpp +++ b/ngraph/test/type_prop/embeddingbag_packedsum.cpp @@ -23,24 +23,24 @@ using namespace ngraph; TEST(type_prop, ebps) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); auto ebps = make_shared(emb_table, indices, per_sample_weights); EXPECT_TRUE(ebps->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); EXPECT_TRUE(indices->get_partial_shape().same_scheme(per_sample_weights->get_partial_shape())); - EXPECT_EQ(ebps->get_output_element_type(0), element::f32); + EXPECT_EQ(ebps->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 2); } TEST(type_prop, ebps_dynamic_emb_table) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebps = make_shared(emb_table, indices, per_sample_weights); @@ -50,10 +50,11 @@ TEST(type_prop, ebps_dynamic_emb_table) TEST(type_prop, ebps_dynamic_indices) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, PartialShape{Dimension::dynamic(), 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic(), 4}); auto per_sample_weights = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4}); auto ebps = make_shared(emb_table, indices, per_sample_weights); @@ -64,10 +65,11 @@ TEST(type_prop, ebps_dynamic_indices) TEST(type_prop, ebps_dynamic_emb_table_indices) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, PartialShape{Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic(), 4}); auto per_sample_weights = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4}); auto ebps = make_shared(emb_table, indices, per_sample_weights); @@ -77,9 +79,9 @@ TEST(type_prop, ebps_dynamic_emb_table_indices) TEST(type_prop, ebps_fail_indices_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); try { @@ -99,9 +101,9 @@ TEST(type_prop, ebps_fail_indices_element_type) TEST(type_prop, ebps_fail_mismatch_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::i64, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::i64, Shape{3, 4}); try { @@ -123,9 +125,9 @@ TEST(type_prop, ebps_fail_mismatch_element_type) TEST(type_prop, ebps_fail_mismatch_shape) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{4, 3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4, 3}); try { @@ -146,9 +148,9 @@ TEST(type_prop, ebps_fail_mismatch_shape) TEST(type_prop, ebps_fail_indices_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); try { @@ -168,9 +170,9 @@ TEST(type_prop, ebps_fail_indices_1d) TEST(type_prop, ebps_fail_per_sample_weights_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); try { @@ -190,19 +192,19 @@ TEST(type_prop, ebps_fail_per_sample_weights_1d) TEST(type_prop, ebps_2_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); auto ebps = make_shared(emb_table, indices); EXPECT_TRUE(ebps->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); - EXPECT_EQ(ebps->get_output_element_type(0), element::f32); + EXPECT_EQ(ebps->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 2); } TEST(type_prop, ebps_fail_indices_element_type_2_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{3, 4}); try { @@ -217,4 +219,4 @@ TEST(type_prop, ebps_fail_indices_element_type_2_args_api) { FAIL() << "INDICES type check failed for unexpected reason"; } -} \ No newline at end of file +} diff --git a/ngraph/test/type_prop/extractimagepatches.cpp b/ngraph/test/type_prop/extractimagepatches.cpp index de01b066f8322a..1427dc74f1cb35 100644 --- a/ngraph/test/type_prop/extractimagepatches.cpp +++ b/ngraph/test/type_prop/extractimagepatches.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, extractimagepatches_i32) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -31,13 +31,13 @@ TEST(type_prop, extractimagepatches_i32) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_i64) { - auto data = make_shared(element::i64, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i64, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -45,13 +45,13 @@ TEST(type_prop, extractimagepatches_i64) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i64); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i64); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_rates_change) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{2, 2}; @@ -59,13 +59,13 @@ TEST(type_prop, extractimagepatches_rates_change) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_input_shape_change) { - auto data = make_shared(element::i32, Shape{64, 3, 9, 9}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 9, 9}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{2, 2}; @@ -73,13 +73,13 @@ TEST(type_prop, extractimagepatches_input_shape_change) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 1, 1})); } TEST(type_prop, extractimagepatches_dynamic_shape) { - auto data = make_shared(element::i32, PartialShape::dynamic(4)); + auto data = make_shared(element::Type_t::i32, PartialShape::dynamic(4)); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{2, 2}; @@ -87,15 +87,15 @@ TEST(type_prop, extractimagepatches_dynamic_shape) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE( extractimagepatches->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, extractimagepatches_dynamic_batch_shape) { - auto data = - make_shared(element::i32, PartialShape{Dimension::dynamic(), 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, + PartialShape{Dimension::dynamic(), 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -103,14 +103,14 @@ TEST(type_prop, extractimagepatches_dynamic_batch_shape) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE(extractimagepatches->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), 27, 2, 2})); } TEST(type_prop, extractimagepatches_padding_same_lower1) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -118,13 +118,13 @@ TEST(type_prop, extractimagepatches_padding_same_lower1) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_padding_same_lower2) { - auto data = make_shared(element::i32, Shape{64, 3, 9, 9}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 9, 9}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -132,12 +132,12 @@ TEST(type_prop, extractimagepatches_padding_same_lower2) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_padding_same_upper) { - auto data = make_shared(element::i32, Shape{64, 3, 11, 11}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 11, 11}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -145,13 +145,13 @@ TEST(type_prop, extractimagepatches_padding_same_upper) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 3, 3})); } TEST(type_prop, extractimagepatches_padding_same_upper2) { - auto data = make_shared(element::i32, Shape{64, 3, 6, 11}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 6, 11}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -159,13 +159,13 @@ TEST(type_prop, extractimagepatches_padding_same_upper2) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 3})); } TEST(type_prop, extractimagepatches_zero_dim_inputs) { - auto data = make_shared(element::i32, Shape{64, 0, 0, 0}); + auto data = make_shared(element::Type_t::i32, Shape{64, 0, 0, 0}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -173,13 +173,13 @@ TEST(type_prop, extractimagepatches_zero_dim_inputs) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 0, 0, 0})); } TEST(type_prop, extractimagepatches_large_stride_valid_padding) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{15, 15}; auto rates = Shape{1, 1}; @@ -187,13 +187,13 @@ TEST(type_prop, extractimagepatches_large_stride_valid_padding) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 1, 1})); } TEST(type_prop, extractimagepatches_large_stride_same_padding) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{15, 15}; auto rates = Shape{1, 1}; @@ -201,6 +201,6 @@ TEST(type_prop, extractimagepatches_large_stride_same_padding) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 1, 1})); } diff --git a/ngraph/test/type_prop/fake_quantize.cpp b/ngraph/test/type_prop/fake_quantize.cpp index 9af0464d2df03f..6fba5c2b5c7eb9 100644 --- a/ngraph/test/type_prop/fake_quantize.cpp +++ b/ngraph/test/type_prop/fake_quantize.cpp @@ -23,41 +23,41 @@ using namespace ngraph; TEST(type_prop, fake_quantize) { - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const int levels = 5; const auto fake_quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); - EXPECT_EQ(fake_quantize->get_element_type(), element::f32); + EXPECT_EQ(fake_quantize->get_element_type(), element::Type_t::f32); EXPECT_EQ(fake_quantize->get_shape(), (Shape{1, 2, 3, 4})); } TEST(type_prop, fake_quantize_autob) { - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto input_low = make_shared(element::f32, Shape{3, 1}); - const auto input_high = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto output_low = make_shared(element::f32, Shape{4}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto input_low = make_shared(element::Type_t::f32, Shape{3, 1}); + const auto input_high = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto output_low = make_shared(element::Type_t::f32, Shape{4}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const int levels = 5; const auto fake_quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); - EXPECT_EQ(fake_quantize->get_element_type(), element::f32); + EXPECT_EQ(fake_quantize->get_element_type(), element::Type_t::f32); EXPECT_EQ(fake_quantize->get_shape(), (Shape{1, 2, 3, 4})); } TEST(type_prop, fake_quantize_invalid_autob) { - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto input_low = make_shared(element::f32, Shape{3}); - auto input_high = make_shared(element::f32, Shape{}); - auto output_low = make_shared(element::f32, Shape{}); - auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto input_low = make_shared(element::Type_t::f32, Shape{3}); + auto input_high = make_shared(element::Type_t::f32, Shape{}); + auto output_low = make_shared(element::Type_t::f32, Shape{}); + auto output_high = make_shared(element::Type_t::f32, Shape{}); const int levels = 5; try diff --git a/ngraph/test/type_prop/gather.cpp b/ngraph/test/type_prop/gather.cpp index 7d74cdf196e449..48e28a10645575 100644 --- a/ngraph/test/type_prop/gather.cpp +++ b/ngraph/test/type_prop/gather.cpp @@ -28,11 +28,11 @@ TEST(type_prop, gather_axis_0) Shape params_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); ASSERT_EQ(G->get_axis(), 0); } @@ -42,20 +42,20 @@ TEST(type_prop, gather_axis_1) Shape params_shape{3, 3}; Shape indices_shape{1, 2}; Shape out_shape{3, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto G = make_shared(P, I, A); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); ASSERT_EQ(G->get_axis(), 1); } TEST(type_prop, gather_v1_incorrect_axis_shape) { - auto params = make_shared(element::f32, Shape{5, 6}); - auto indices = make_shared(element::i64, Shape{4}); - auto axis = make_shared(element::i64, Shape{2}); + auto params = make_shared(element::Type_t::f32, Shape{5, 6}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto axis = make_shared(element::Type_t::i64, Shape{2}); try { auto G = make_shared(params, indices, axis); @@ -75,9 +75,9 @@ TEST(type_prop, gather_v1_incorrect_axis_shape) TEST(type_prop, gather_v1_axis_out_of_input_rank) { - auto params = make_shared(element::f32, Shape{5, 6}); - auto indices = make_shared(element::i64, Shape{4}); - auto axis = make_shared(element::i64, Shape{1}, vector{2}); + auto params = make_shared(element::Type_t::f32, Shape{5, 6}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto axis = make_shared(element::Type_t::i64, Shape{1}, vector{2}); try { auto G = make_shared(params, indices, axis); @@ -97,10 +97,11 @@ TEST(type_prop, gather_v1_axis_out_of_input_rank) TEST(type_prop, gather_v1_negative_axis) { - auto params = make_shared(element::f32, Shape{5, 6, 7}); - auto indices = make_shared(element::i64, Shape{4}); + auto params = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); int64_t axis = -2; - auto axis_node = make_shared(element::i64, Shape{1}, vector{axis}); + auto axis_node = + make_shared(element::Type_t::i64, Shape{1}, vector{axis}); auto gather_v1 = make_shared(params, indices, axis_node); ASSERT_EQ(gather_v1->get_axis(), 1); } diff --git a/ngraph/test/type_prop/gather_nd.cpp b/ngraph/test/type_prop/gather_nd.cpp index ea4c6d2307071a..6501d7f0e4ec17 100644 --- a/ngraph/test/type_prop/gather_nd.cpp +++ b/ngraph/test/type_prop/gather_nd.cpp @@ -28,10 +28,10 @@ TEST(type_prop, gather_nd_slices_from_4d_batch_dims0) Shape params_shape{2, 3, 11, 12}; Shape indices_shape{2, 3, 2}; Shape out_shape{2, 3, 11, 12}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 0); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -40,10 +40,10 @@ TEST(type_prop, gather_nd_scalars_from_4d_batch_dims2) Shape params_shape{2, 3, 11, 12}; Shape indices_shape{2, 3, 2}; Shape out_shape{6}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -52,10 +52,10 @@ TEST(type_prop, gather_nd_slices_from_5d_batch_dims2) Shape params_shape{7, 5, 11, 12, 32}; Shape indices_shape{7, 5, 3, 1}; Shape out_shape{35, 3, 12, 32}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -64,10 +64,10 @@ TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim) PartialShape params_shape{7, Dimension::dynamic(), 11, 12, 32}; Shape indices_shape{7, 5, 3, 1}; Shape out_shape{35, 3, 12, 32}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -76,10 +76,10 @@ TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim2) PartialShape params_shape{7, Dimension::dynamic(), Dimension::dynamic(), 12, 32}; Shape indices_shape{7, 5, 3, 1}; Shape out_shape{35, 3, 12, 32}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -89,10 +89,10 @@ TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim3) 7, Dimension::dynamic(), Dimension::dynamic(), 12, Dimension::dynamic()}; Shape indices_shape{7, 5, 3, 1}; PartialShape out_shape{35, 3, 12, Dimension::dynamic()}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G5->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -101,10 +101,10 @@ TEST(type_prop, gather_nd_batch_dim0_with_dyn_ind_dim) PartialShape params_shape{ 7, Dimension::dynamic(), Dimension::dynamic(), 12, Dimension::dynamic()}; PartialShape indices_shape{7, 5, 3, Dimension::dynamic()}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 0); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G5->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } @@ -112,8 +112,8 @@ TEST(type_prop, gather_nd_fail_batch_dims_greater_indices_rank) { Shape params_shape{2, 3, 4, 5}; Shape indices_shape{2, 1}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -137,8 +137,8 @@ TEST(type_prop, gather_nd_fail_unequal_batch_dims) { Shape params_shape{2, 3, 4, 5}; Shape indices_shape{2, 1, 4}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -161,8 +161,8 @@ TEST(type_prop, gather_nd_fail_indices_tuple_greater_data_rank_batch_dims2) { Shape params_shape{2, 1, 4, 5}; Shape indices_shape{2, 1, 5, 3}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -189,11 +189,11 @@ TEST(type_prop, gather_nd_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -202,11 +202,11 @@ TEST(type_prop, gather_nd_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -215,11 +215,11 @@ TEST(type_prop, gather_nd_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 3}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -228,11 +228,11 @@ TEST(type_prop, gather_nd_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -241,11 +241,11 @@ TEST(type_prop, gather_nd_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{1, 1}; Shape out_shape{1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -254,11 +254,11 @@ TEST(type_prop, gather_nd_batch_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 2}; Shape out_shape{2, 1}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -267,11 +267,11 @@ TEST(type_prop, gather_nd_batch_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -280,11 +280,11 @@ TEST(type_prop, gather_nd_batch_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 3}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -293,11 +293,11 @@ TEST(type_prop, gather_nd_batch_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -306,11 +306,11 @@ TEST(type_prop, gather_nd_batch_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -319,8 +319,8 @@ TEST(type_prop, gather_nd_fail_params_rank) Shape params_shape{}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -343,8 +343,8 @@ TEST(type_prop, gather_nd_fail_indices_rank) Shape params_shape{2, 2, 2}; Shape indices_shape{}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -367,8 +367,8 @@ TEST(type_prop, gather_nd_fail_indices_element_type) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::f32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::f32, indices_shape); try { diff --git a/ngraph/test/type_prop/gather_tree.cpp b/ngraph/test/type_prop/gather_tree.cpp index 7cca7206a94ba0..eb3e71200e465b 100644 --- a/ngraph/test/type_prop/gather_tree.cpp +++ b/ngraph/test/type_prop/gather_tree.cpp @@ -23,24 +23,24 @@ using namespace ngraph; TEST(type_prop, gather_tree_output_shape) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); ASSERT_EQ(gather_tree->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(gather_tree->get_output_element_type(0), element::i64); + ASSERT_EQ(gather_tree->get_output_element_type(0), element::Type_t::i64); } TEST(type_prop, gather_tree_pooling_step_ids_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3, 4}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); try { auto gather_tree = @@ -61,10 +61,10 @@ TEST(type_prop, gather_tree_pooling_step_ids_invalid_rank) TEST(type_prop, gather_tree_parent_idx_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3, 4}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); try { auto gather_tree = @@ -86,10 +86,10 @@ TEST(type_prop, gather_tree_parent_idx_invalid_rank) TEST(type_prop, gather_tree_max_seq_len_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1, 2}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1, 2}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); try { auto gather_tree = @@ -111,10 +111,10 @@ TEST(type_prop, gather_tree_max_seq_len_invalid_rank) TEST(type_prop, gather_tree_end_token_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{1}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{1}); try { auto gather_tree = diff --git a/ngraph/test/type_prop/grn.cpp b/ngraph/test/type_prop/grn.cpp index ba91245c45126d..4a5441c479545f 100644 --- a/ngraph/test/type_prop/grn.cpp +++ b/ngraph/test/type_prop/grn.cpp @@ -25,17 +25,17 @@ TEST(type_prop, grn) { float bias = 1.25f; Shape data_shape{2, 3, 4, 5}; - auto A = make_shared(element::f32, data_shape); + auto A = make_shared(element::Type_t::f32, data_shape); auto grn = make_shared(A, bias); - ASSERT_EQ(grn->get_element_type(), element::f32); + ASSERT_EQ(grn->get_element_type(), element::Type_t::f32); ASSERT_EQ(grn->get_shape(), data_shape); } TEST(type_prop, grn_invalid_data_rank) { float bias = 1.25f; - auto A = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::Type_t::f32, Shape{4}); try { @@ -53,7 +53,7 @@ TEST(type_prop, grn_invalid_data_rank) FAIL() << "Deduced type check failed for unexpected reason"; } - A = make_shared(element::f32, Shape{1, 2, 3, 4, 5}); + A = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4, 5}); try { diff --git a/ngraph/test/type_prop/group_convolution.cpp b/ngraph/test/type_prop/group_convolution.cpp index 054c1864a2da8e..62ec12d83fd0a9 100644 --- a/ngraph/test/type_prop/group_convolution.cpp +++ b/ngraph/test/type_prop/group_convolution.cpp @@ -31,8 +31,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -52,8 +52,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_upper) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -73,8 +73,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower_nc_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -94,8 +94,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_upper_nc_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -115,8 +115,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_spatial_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); diff --git a/ngraph/test/type_prop/group_convolution_backprop_data.cpp b/ngraph/test/type_prop/group_convolution_backprop_data.cpp index dc5422bc155ba2..b792a489350955 100644 --- a/ngraph/test/type_prop/group_convolution_backprop_data.cpp +++ b/ngraph/test/type_prop/group_convolution_backprop_data.cpp @@ -24,12 +24,12 @@ using namespace ngraph; TEST(type_prop, group_conv_backprop_data) { // GROUPS x C_IN x C_OUT x kH x kW - const auto weights = make_shared(element::f32, Shape{2, 8, 2, 3, 3}); + const auto weights = make_shared(element::Type_t::f32, Shape{2, 8, 2, 3, 3}); // N x C_IN * GROUPS x H x W - const auto data = make_shared(element::f32, Shape{1, 16, 6, 6}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 16, 6, 6}); const auto gcbd = make_shared( data, weights, Strides{}, CoordinateDiff{}, CoordinateDiff{}, Strides{}); - EXPECT_EQ(gcbd->get_element_type(), element::f32); + EXPECT_EQ(gcbd->get_element_type(), element::Type_t::f32); EXPECT_EQ(gcbd->get_output_shape(0), (Shape{1, 4, 8, 8})); EXPECT_EQ(gcbd->get_strides(), (Strides{1, 1})); EXPECT_EQ(gcbd->get_dilations(), (Strides{1, 1})); @@ -42,14 +42,14 @@ TEST(type_prop, group_conv_backprop_data) TEST(type_prop, group_conv_backprop_data_output_shape) { // N x C_IN * GROUPS x H x W - const auto data = make_shared(element::f32, Shape{1, 16, 5, 5}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 16, 5, 5}); // GROUPS x C_IN x C_OUT x kH x kW - const auto weights = make_shared(element::f32, Shape{1, 16, 2, 3, 3}); - const auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + const auto weights = make_shared(element::Type_t::f32, Shape{1, 16, 2, 3, 3}); + const auto output_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {3, 3}); const auto gcbd = make_shared( data, weights, output_shape, Strides{}, Strides{}, op::PadType::SAME_UPPER); - EXPECT_EQ(gcbd->get_element_type(), element::f32); + EXPECT_EQ(gcbd->get_element_type(), element::Type_t::f32); EXPECT_EQ(gcbd->get_output_shape(0), (Shape{1, 2, 3, 3})); EXPECT_EQ(gcbd->get_strides(), (Strides{1, 1})); EXPECT_EQ(gcbd->get_dilations(), (Strides{1, 1})); @@ -62,9 +62,9 @@ TEST(type_prop, group_conv_backprop_data_output_shape) TEST(type_prop, group_conv_bprop_data_v1_output_partial_shape_dynamic_static_rank) { PartialShape shape_filter{4, 5, 2, 3, 3}; - auto filters = make_shared(element::f32, shape_filter); + auto filters = make_shared(element::Type_t::f32, shape_filter); PartialShape shape_data{Dimension(), 20, 224, 224}; - auto data = make_shared(element::f32, shape_data); + auto data = make_shared(element::Type_t::f32, shape_data); auto strides = Strides{2, 2}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{1, 1}; @@ -83,9 +83,9 @@ TEST(type_prop, group_conv_bprop_data_v1_output_partial_shape_dynamic_static_ran TEST(type_prop, group_conv_backprop_data_invalid_params) { // GROUPS x C_IN x C_OUT x kH x kW - auto weights = make_shared(element::f32, Shape{21, 16, 20, 3, 3}); + auto weights = make_shared(element::Type_t::f32, Shape{21, 16, 20, 3, 3}); // N x C_IN * GROUPS x H x W - const auto data = make_shared(element::f32, Shape{1, 16, 5, 5}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 16, 5, 5}); try { @@ -105,7 +105,7 @@ TEST(type_prop, group_conv_backprop_data_invalid_params) } // GROUPS x C_IN x C_OUT x kH x kW - weights = make_shared(element::f32, Shape{4, 16, 20, 3, 3}); + weights = make_shared(element::Type_t::f32, Shape{4, 16, 20, 3, 3}); try { @@ -126,7 +126,7 @@ TEST(type_prop, group_conv_backprop_data_invalid_params) } // GROUPS x C_IN x C_OUT x kH x kW - weights = make_shared(element::f32, Shape{4, 4, 20, 3, 3}); + weights = make_shared(element::Type_t::f32, Shape{4, 4, 20, 3, 3}); try { diff --git a/ngraph/test/type_prop/gru_cell.cpp b/ngraph/test/type_prop/gru_cell.cpp index a7b3558b908791..ef2969e3146a91 100644 --- a/ngraph/test/type_prop/gru_cell.cpp +++ b/ngraph/test/type_prop/gru_cell.cpp @@ -29,15 +29,16 @@ TEST(type_prop, gru_cell) const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(gru_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); } @@ -48,13 +49,13 @@ TEST(type_prop, gru_cell_invalid_input) const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{hidden_size, input_size}); + auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); try { const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); @@ -67,8 +68,9 @@ TEST(type_prop, gru_cell_invalid_input) } // Invalid R tensor shape. - W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - R = make_shared(element::f32, Shape{hidden_size, 1}); + W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + R = make_shared(element::Type_t::f32, Shape{hidden_size, 1}); try { const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); @@ -83,8 +85,9 @@ TEST(type_prop, gru_cell_invalid_input) } // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); + R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); @@ -98,8 +101,8 @@ TEST(type_prop, gru_cell_invalid_input) } // Invalid B tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, Shape{hidden_size}); try { const auto gru_cell = make_shared(X, H_t, W, R, B, hidden_size); @@ -119,16 +122,17 @@ TEST(type_prop, gru_cell_dynamic_batch_size) const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, + const auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -139,16 +143,17 @@ TEST(type_prop, gru_cell_dynamic_hidden_size) const auto hidden_size = Dimension::dynamic(); const size_t gates_count = 3; - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, + const auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, PartialShape{hidden_size * gates_count, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, PartialShape{hidden_size * gates_count, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, 3); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -158,16 +163,19 @@ TEST(type_prop, gru_cell_dynamic_inputs) const auto input_size = Dimension::dynamic(); const auto hidden_size = Dimension::dynamic(); - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + const auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto W = + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, PartialShape{hidden_size, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, 2); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, gru_cell_invalid_input_rank0) @@ -177,43 +185,44 @@ TEST(type_prop, gru_cell_invalid_input_rank0) const size_t hidden_size = 3; const size_t gates_count = 3; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto R = make_shared(element::f32, + auto X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - auto W = make_shared(element::f32, PartialShape{}); + auto W = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, + W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for R tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); + H_t = make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - auto B = make_shared(element::f32, PartialShape{}); + auto B = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, B, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; @@ -226,10 +235,11 @@ TEST(type_prop, gru_cell_invalid_input_dynamic_rank) const size_t hidden_size = 3; const size_t gates_count = 3; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto R = make_shared(element::f32, + auto X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); auto check_dynamic_gru = [](const shared_ptr& gru) -> bool { return gru->output(0).get_partial_shape() == PartialShape::dynamic() && @@ -237,32 +247,34 @@ TEST(type_prop, gru_cell_invalid_input_dynamic_rank) }; // Invalid dynamic rank for W tensor. - auto W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto W = + make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_w = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_w), true); // Invalid dynamic rank for X tensor. - W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + W = make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + X = make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_x = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_x), true); // Invalid dynamic rank for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_h = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_h), true); // Invalid dynamic rank for R tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + H_t = make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_r = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_r), true); // Invalid dynamic rank for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto B = + make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_b = make_shared(X, H_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_b), true); } diff --git a/ngraph/test/type_prop/gru_sequence.cpp b/ngraph/test/type_prop/gru_sequence.cpp index 47cc47fa89ab00..d8c6665cccd03f 100644 --- a/ngraph/test/type_prop/gru_sequence.cpp +++ b/ngraph/test/type_prop/gru_sequence.cpp @@ -30,17 +30,18 @@ TEST(type_prop, gru_sequence_forward) const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); const auto W = make_shared( - element::f32, Shape{num_directions, 3 * hidden_size, input_size}); + element::Type_t::f32, Shape{num_directions, 3 * hidden_size, input_size}); const auto R = make_shared( - element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); - const auto B = - make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); + element::Type_t::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, + Shape{num_directions, 3 * hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; @@ -55,10 +56,10 @@ TEST(type_prop, gru_sequence_forward) EXPECT_EQ(sequence->get_activations()[1], "tanh"); EXPECT_EQ(sequence->get_clip(), 0.f); EXPECT_EQ(sequence->get_linear_before_reset(), false); - EXPECT_EQ(sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(sequence->outputs().size(), 2); EXPECT_EQ(sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); } diff --git a/ngraph/test/type_prop/hard_sigmoid.cpp b/ngraph/test/type_prop/hard_sigmoid.cpp index b213e1f0bfe5b9..dc59fc97e299da 100644 --- a/ngraph/test/type_prop/hard_sigmoid.cpp +++ b/ngraph/test/type_prop/hard_sigmoid.cpp @@ -25,10 +25,10 @@ TEST(type_prop, hardsigmoid) { const Shape data_shape{3, 5}; - const auto P = make_shared(element::f32, data_shape); + const auto P = make_shared(element::Type_t::f32, data_shape); const auto alpha = op::Constant::create(P->get_element_type(), Shape{}, {0.1f}); const auto beta = op::Constant::create(P->get_element_type(), Shape{}, {1.2f}); const auto H = make_shared(P, alpha, beta); - ASSERT_EQ(H->get_element_type(), element::f32); + ASSERT_EQ(H->get_element_type(), element::Type_t::f32); ASSERT_EQ(H->get_shape(), data_shape); } diff --git a/ngraph/test/type_prop/hsigmoid.cpp b/ngraph/test/type_prop/hsigmoid.cpp index 9ef8e4833a7da2..15e116f6aff8da 100644 --- a/ngraph/test/type_prop/hsigmoid.cpp +++ b/ngraph/test/type_prop/hsigmoid.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, hsigmoid) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto hsigmoid_func = make_shared(data); - EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(hsigmoid_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(hsigmoid_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, hsigmoid_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hsigmoid_func = make_shared(data); - EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(hsigmoid_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hsigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto hsigmoid_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(hsigmoid_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, hsigmoid_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hsigmoid_func = make_shared(data); - EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(hsigmoid_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hsigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(hsigmoid_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/hswish.cpp b/ngraph/test/type_prop/hswish.cpp index 9df6d19b86a317..053ca1609e201b 100644 --- a/ngraph/test/type_prop/hswish.cpp +++ b/ngraph/test/type_prop/hswish.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, hswish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto hswish_func = make_shared(data); - EXPECT_EQ(hswish_func->get_element_type(), element::f32); + EXPECT_EQ(hswish_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(hswish_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, hswish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hswish_func = make_shared(data); - EXPECT_EQ(hswish_func->get_element_type(), element::f32); + EXPECT_EQ(hswish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hswish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto hswish_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(hswish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, hswish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hswish_func = make_shared(data); - EXPECT_EQ(hswish_func->get_element_type(), element::f32); + EXPECT_EQ(hswish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hswish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(hswish_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/interpolate.cpp b/ngraph/test/type_prop/interpolate.cpp index 22c795bc56351c..e0050f999ce549 100644 --- a/ngraph/test/type_prop/interpolate.cpp +++ b/ngraph/test/type_prop/interpolate.cpp @@ -28,10 +28,10 @@ using ShapeCalcMode = op::v4::Interpolate::ShapeCalcMode; TEST(type_prop, interpolate_v4) { - auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, Shape{2, 2, 30, 60}); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -44,7 +44,7 @@ TEST(type_prop, interpolate_v4) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); EXPECT_EQ(interp->get_shape(), (Shape{2, 2, 15, 30})); } @@ -52,10 +52,10 @@ TEST(type_prop, interpolate_v4_partial) { auto partial_shape = PartialShape{2, 2, Dimension::dynamic(), Dimension::dynamic()}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -68,11 +68,12 @@ TEST(type_prop, interpolate_v4_partial) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(partial_shape)); // rank unknown - auto partial_param = std::make_shared(element::f32, PartialShape::dynamic()); + auto partial_param = + std::make_shared(element::Type_t::f32, PartialShape::dynamic()); auto interp_part = std::make_shared(partial_param, target_shape, scales, axes, attrs); ASSERT_TRUE(interp_part->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); @@ -82,10 +83,10 @@ TEST(type_prop, interpolate_v4_partial_static_rank) { auto partial_shape = PartialShape{2, 2, Dimension::dynamic(), Dimension::dynamic()}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -98,7 +99,7 @@ TEST(type_prop, interpolate_v4_partial_static_rank) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(partial_shape)); ASSERT_TRUE(interp->get_output_partial_shape(0).rank().is_static()); } @@ -108,10 +109,10 @@ TEST(type_prop, interpolate_v4_partial_static_rank2) auto partial_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 10, 20}; auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 10}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -124,7 +125,7 @@ TEST(type_prop, interpolate_v4_partial_static_rank2) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(out_shape)); ASSERT_TRUE(interp->get_output_partial_shape(0).rank().is_static()); } @@ -134,10 +135,11 @@ TEST(type_prop, interpolate_v4_partial_static_rank3) auto partial_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, 3}; auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 1, 1}); - auto scales = op::Constant::create(element::f32, Shape{2}, {1.0f / 3.0f, 1.0f / 3.0f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); + auto scales = + op::Constant::create(element::Type_t::f32, Shape{2}, {1.0f / 3.0f, 1.0f / 3.0f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -150,7 +152,7 @@ TEST(type_prop, interpolate_v4_partial_static_rank3) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(out_shape)); ASSERT_TRUE(interp->get_output_partial_shape(0).rank().is_static()); } diff --git a/ngraph/test/type_prop/log_softmax.cpp b/ngraph/test/type_prop/log_softmax.cpp index 5fe7caad4caed9..82fb61d87eeda7 100644 --- a/ngraph/test/type_prop/log_softmax.cpp +++ b/ngraph/test/type_prop/log_softmax.cpp @@ -23,15 +23,15 @@ using namespace ngraph; TEST(type_prop, log_softmax) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto log_softmax_func = make_shared(data, 1); - EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); + EXPECT_EQ(log_softmax_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(log_softmax_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, log_softmax_incorrect_axis) { - const auto data = make_shared(element::f32, Shape{1, 3, 6}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); try { @@ -48,24 +48,26 @@ TEST(type_prop, log_softmax_incorrect_axis) TEST(type_prop, log_softmax_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto log_softmax_func = make_shared(data, 1); - EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); + EXPECT_EQ(log_softmax_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto log_softmax_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE( log_softmax_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, log_softmax_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto log_softmax_func = make_shared(data, 1); - EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); + EXPECT_EQ(log_softmax_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/loop.cpp b/ngraph/test/type_prop/loop.cpp index f4dfe846d88ab0..55356511a9635f 100644 --- a/ngraph/test/type_prop/loop.cpp +++ b/ngraph/test/type_prop/loop.cpp @@ -29,23 +29,23 @@ using namespace ngraph; TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -134,23 +134,23 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, false); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, false); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -239,24 +239,24 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{1}); - auto Y = make_shared(element::f32, Shape{1}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::Type_t::f32, Shape{1}); + auto Y = make_shared(element::Type_t::f32, Shape{1}); + auto M = make_shared(element::Type_t::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = - std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared( + ngraph::element::Type_t::f32, ngraph::Shape{1}, 10); auto body_condition = std::make_shared(M_body, condition_const); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -338,24 +338,24 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{1}); - auto Y = make_shared(element::f32, Shape{1}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::Type_t::f32, Shape{1}); + auto Y = make_shared(element::Type_t::f32, Shape{1}); + auto M = make_shared(element::Type_t::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = - std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared( + ngraph::element::Type_t::f32, ngraph::Shape{1}, 10); auto body_condition = std::make_shared(M_body, condition_const); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -442,23 +442,23 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, -1); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, -1); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -548,23 +548,23 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -654,23 +654,23 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports_scalars) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); - - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::Type_t::i64, Shape{}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); + + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{}, 10); + auto exec_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -760,23 +760,23 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 10, 1}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 10, 1}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); - - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::Type_t::i64, Shape{}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); + + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{}, 10); + auto exec_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); diff --git a/ngraph/test/type_prop/lrn.cpp b/ngraph/test/type_prop/lrn.cpp index d4f5b8f162aa52..354506e0f7ac50 100644 --- a/ngraph/test/type_prop/lrn.cpp +++ b/ngraph/test/type_prop/lrn.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, lrn_invalid_axes_rank) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto axes = make_shared(element::f32, Shape{1, 2}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto axes = make_shared(element::Type_t::f32, Shape{1, 2}); double alpha = 0.1, beta = 0.2, bias = 0.3; size_t size = 3; try @@ -42,7 +42,7 @@ TEST(type_prop, lrn_invalid_axes_rank) FAIL() << "Deduced type check failed for unexpected reason"; } - axes = make_shared(element::f32, Shape{5}); + axes = make_shared(element::Type_t::f32, Shape{5}); try { auto lrn = make_shared(data, axes, alpha, beta, bias, size); @@ -63,8 +63,8 @@ TEST(type_prop, lrn_invalid_axes_rank) TEST(type_prop, lrn_incorrect_axes_value) { - auto data = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{3, 4}); double alpha = 0.1, beta = 0.2, bias = 0.3; size_t size = 3; try diff --git a/ngraph/test/type_prop/lstm_cell.cpp b/ngraph/test/type_prop/lstm_cell.cpp index e8275d8973f87a..f56d31e7bb17c8 100644 --- a/ngraph/test/type_prop/lstm_cell.cpp +++ b/ngraph/test/type_prop/lstm_cell.cpp @@ -29,13 +29,16 @@ TEST(type_prop, lstm_cell) const size_t hidden_size = 3; const size_t gates_count = 4; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = + make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(lstm_cell->get_hidden_size(), hidden_size); @@ -45,9 +48,9 @@ TEST(type_prop, lstm_cell) EXPECT_EQ(lstm_cell->get_activations()[0], "sigmoid"); EXPECT_EQ(lstm_cell->get_activations()[1], "tanh"); EXPECT_EQ(lstm_cell->get_activations()[2], "tanh"); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(lstm_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(lstm_cell->get_output_shape(1), (Shape{batch_size, hidden_size})); } @@ -58,14 +61,15 @@ TEST(type_prop, lstm_cell_invalid_input) const size_t hidden_size = 3; const size_t gates_count = 4; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto C_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{1 * hidden_size, input_size}); + auto W = + make_shared(element::Type_t::f32, Shape{1 * hidden_size, input_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -78,8 +82,9 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid R tensor shape. - W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - R = make_shared(element::f32, Shape{gates_count * hidden_size, 1}); + W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + R = make_shared(element::Type_t::f32, Shape{gates_count * hidden_size, 1}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -93,8 +98,9 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); + R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -108,8 +114,8 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid C_t tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - C_t = make_shared(element::f32, Shape{4, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + C_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -123,9 +129,10 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid B tensor shape. - C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{2 * gates_count * hidden_size}); - auto P = make_shared(element::f32, Shape{3 * hidden_size}); + C_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto B = + make_shared(element::Type_t::f32, Shape{2 * gates_count * hidden_size}); + auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, B, hidden_size); @@ -146,22 +153,22 @@ TEST(type_prop, lstm_cell_dynamic_batch_size) const size_t gates_count = 4; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto W = make_shared( - element::f32, PartialShape{gates_count * hidden_size, input_size}); + element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); const auto R = make_shared( - element::f32, PartialShape{gates_count * hidden_size, hidden_size}); + element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto C_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(lstm_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(lstm_cell->get_output_partial_shape(1), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); } TEST(type_prop, lstm_cell_dynamic_hidden_size) @@ -172,22 +179,22 @@ TEST(type_prop, lstm_cell_dynamic_hidden_size) const size_t gates_count = 4; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto W = make_shared( - element::f32, PartialShape{hidden_size * gates_count, input_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, input_size}); const auto R = make_shared( - element::f32, PartialShape{hidden_size * gates_count, hidden_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto C_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, 3); EXPECT_EQ(lstm_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(lstm_cell->get_output_partial_shape(1), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); } TEST(type_prop, lstm_cell_dynamic_inputs) @@ -198,22 +205,22 @@ TEST(type_prop, lstm_cell_dynamic_inputs) const size_t gates_count = 4; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto W = make_shared( - element::f32, PartialShape{hidden_size * gates_count, input_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, input_size}); const auto R = make_shared( - element::f32, PartialShape{hidden_size * gates_count, hidden_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto C_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, 3); EXPECT_EQ(lstm_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(lstm_cell->get_output_partial_shape(1), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); } TEST(type_prop, lstm_cell_invalid_input_rank0) @@ -223,53 +230,58 @@ TEST(type_prop, lstm_cell_invalid_input_rank0) const size_t hidden_size = 3; const size_t gates_count = 4; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto W = make_shared(element::f32, + auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - auto R = make_shared(element::f32, + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - auto C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + auto C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - W = make_shared(element::f32, PartialShape{}); + W = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, + W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for C_t tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - C_t = make_shared(element::f32, PartialShape{}); + H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + C_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for R tensor. - C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); + C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape{}); + auto B = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, B, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; @@ -282,13 +294,16 @@ TEST(type_prop, lstm_cell_invalid_input_dynamic_rank) const size_t hidden_size = 3; const size_t gates_count = 4; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto W = make_shared(element::f32, + auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - auto R = make_shared(element::f32, + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - auto C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + auto C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); auto check_dynamic_lstm = [](const shared_ptr& lstm) -> bool { return lstm->output(0).get_partial_shape() == PartialShape::dynamic() && @@ -297,39 +312,47 @@ TEST(type_prop, lstm_cell_invalid_input_dynamic_rank) }; // Invalid dynamic rank for W tensor. - W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + W = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for X tensor. - W = make_shared(element::f32, + W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for C_t tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - C_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + C_t = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for R tensor. - C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto B = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); } diff --git a/ngraph/test/type_prop/lstm_sequence.cpp b/ngraph/test/type_prop/lstm_sequence.cpp index 756e7d9c90b4ca..48631b366e328b 100644 --- a/ngraph/test/type_prop/lstm_sequence.cpp +++ b/ngraph/test/type_prop/lstm_sequence.cpp @@ -35,7 +35,7 @@ struct recurrent_sequence_parameters Dimension seq_length = 12; Dimension input_size = 8; Dimension hidden_size = 256; - ngraph::element::Type et = element::f32; + ngraph::element::Type et = element::Type_t::f32; }; // @@ -86,19 +86,20 @@ TEST(type_prop, lstm_sequence_forward) const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); const auto initial_cell_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); const auto W = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, input_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, input_size}); const auto R = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); - const auto B = - make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, + Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = op::RecurrentSequenceDirection::FORWARD; @@ -120,13 +121,13 @@ TEST(type_prop, lstm_sequence_forward) EXPECT_EQ(lstm_sequence->get_activations()[1], "tanh"); EXPECT_EQ(lstm_sequence->get_activations()[2], "tanh"); EXPECT_EQ(lstm_sequence->get_clip(), 0.f); - EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(lstm_sequence->outputs().size(), 3); EXPECT_EQ(lstm_sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(2), (Shape{batch_size, num_directions, hidden_size})); } @@ -138,19 +139,20 @@ TEST(type_prop, lstm_sequence_bidirectional) const size_t input_size = 8; const size_t hidden_size = 256; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); const auto initial_cell_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); const auto W = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, input_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, input_size}); const auto R = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); - const auto B = - make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, + Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = opset5::LSTMSequence::direction::BIDIRECTIONAL; const std::vector activations_alpha = {2.7, 7.0, 32.367}; @@ -177,12 +179,12 @@ TEST(type_prop, lstm_sequence_bidirectional) EXPECT_EQ(lstm_sequence->get_activations()[1], "sigmoid"); EXPECT_EQ(lstm_sequence->get_activations()[2], "sigmoid"); EXPECT_EQ(lstm_sequence->get_clip(), 0.f); - EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(2), (Shape{batch_size, num_directions, hidden_size})); } @@ -195,7 +197,7 @@ TEST(type_prop, lstm_sequence_dynamic_batch_size) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -221,7 +223,7 @@ TEST(type_prop, lstm_sequence_dynamic_num_directions) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -247,7 +249,7 @@ TEST(type_prop, lstm_sequence_dynamic_seq_length) param.seq_length = Dimension::dynamic(); param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -273,7 +275,7 @@ TEST(type_prop, lstm_sequence_dynamic_hidden_size) param.seq_length = 12; param.input_size = 8; param.hidden_size = Dimension::dynamic(); - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -299,7 +301,7 @@ TEST(type_prop, lstm_sequence_dynamic_inputs) param.hidden_size = Dimension::dynamic(); param.num_directions = Dimension::dynamic(); param.seq_length = Dimension::dynamic(); - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -325,7 +327,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dimension) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); auto invalid_rank0_tensor = make_shared(param.et, PartialShape{}); @@ -350,7 +352,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dynamic_rank) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto check_dynamic_lstm = [](const shared_ptr& lstm) -> bool { return lstm->output(0).get_partial_shape() == PartialShape::dynamic() && diff --git a/ngraph/test/type_prop/matmul.cpp b/ngraph/test/type_prop/matmul.cpp index eb452d7794975f..f5ec44b3e109e6 100644 --- a/ngraph/test/type_prop/matmul.cpp +++ b/ngraph/test/type_prop/matmul.cpp @@ -23,113 +23,114 @@ using namespace ngraph; TEST(type_prop, matmul_2D_same) { - auto A = make_shared(element::f32, Shape{2, 2}); - auto B = make_shared(element::f32, Shape{2, 2}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2})); } TEST(type_prop, matmul_4D_same) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 3}); - auto B = make_shared(element::f32, Shape{2, 2, 3, 3}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 3, 3}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 3, 3}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 3})); } TEST(type_prop, matmul_2D) { - auto A = make_shared(element::f32, Shape{3, 6}); - auto B = make_shared(element::f32, Shape{6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{6, 4}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); - auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 6, 4}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_5D_x_3D_transpose_a_transpose_b) { - auto A = make_shared(element::f32, Shape{2, 1, 6, 3}); - auto B = make_shared(element::f32, Shape{7, 1, 5, 4, 6}); + auto A = make_shared(element::Type_t::f32, Shape{2, 1, 6, 3}); + auto B = make_shared(element::Type_t::f32, Shape{7, 1, 5, 4, 6}); auto matmul = make_shared(A, B, true, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{7, 2, 5, 3, 4})); } TEST(type_prop, matmul_2D_transpose_a) { - auto A = make_shared(element::f32, Shape{6, 3}); - auto B = make_shared(element::f32, Shape{6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{6, 3}); + auto B = make_shared(element::Type_t::f32, Shape{6, 4}); auto matmul = make_shared(A, B, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D_transpose_a) { - auto A = make_shared(element::f32, Shape{2, 2, 6, 3}); - auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 6, 3}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 6, 4}); auto matmul = make_shared(A, B, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_2D_transpose_b) { - auto A = make_shared(element::f32, Shape{3, 6}); - auto B = make_shared(element::f32, Shape{4, 6}); + auto A = make_shared(element::Type_t::f32, Shape{3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{4, 6}); auto matmul = make_shared(A, B, 0, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D_transpose_b) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); - auto B = make_shared(element::f32, Shape{2, 2, 4, 6}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 4, 6}); auto matmul = make_shared(A, B, 0, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_dynamic_5D_transpose_b) { Dimension dynamic = Dimension::dynamic(); - auto A = - make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); - auto B = make_shared(element::f32, PartialShape{1, dynamic, dynamic, 4, 6}); + auto A = make_shared(element::Type_t::f32, + PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto B = + make_shared(element::Type_t::f32, PartialShape{1, dynamic, dynamic, 4, 6}); auto matmul = make_shared(A, B, 0, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(1, -1), 4, dynamic, dynamic, 4})); } @@ -137,24 +138,24 @@ TEST(type_prop, matmul_dynamic_5D_transpose_b) TEST(type_prop, matmul_dynamic_2D_transpose_a) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic, 3}); - auto B = make_shared(element::f32, PartialShape{4, dynamic}); + auto A = make_shared(element::Type_t::f32, PartialShape{dynamic, 3}); + auto B = make_shared(element::Type_t::f32, PartialShape{4, dynamic}); auto matmul = make_shared(A, B, 1, 0); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{3, dynamic})); } TEST(type_prop, matmul_dynamic_1D_3D) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic}); - auto B = make_shared(element::f32, PartialShape{2, 4, dynamic}); + auto A = make_shared(element::Type_t::f32, PartialShape{dynamic}); + auto B = make_shared(element::Type_t::f32, PartialShape{2, 4, dynamic}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{2, dynamic})); } @@ -162,52 +163,52 @@ TEST(type_prop, matmul_dynamic_1D_3D) // 1D x 1D TEST(type_prop, matmul_1D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_false_true) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, false, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_true_false) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, true, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_true_true) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, true, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_incompatible) { - auto A = make_shared(element::f32, Shape{3}); - auto B = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::Type_t::f32, Shape{3}); + auto B = make_shared(element::Type_t::f32, Shape{4}); try { @@ -228,30 +229,30 @@ TEST(type_prop, matmul_1D_x_1D_incompatible) // 2D x 1D TEST(type_prop, matmul_2D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_2D_x_1D_false_true) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); auto matmul = make_shared(A, B, false, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_2D_x_1D_true_false) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); try { @@ -271,8 +272,8 @@ TEST(type_prop, matmul_2D_x_1D_true_false) TEST(type_prop, matmul_2D_x_1D_true_true) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); try { @@ -293,19 +294,19 @@ TEST(type_prop, matmul_2D_x_1D_true_true) // 1D x 2D TEST(type_prop, matmul_1D_x_2D_false_false) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_1D_x_2D_false_true) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -325,18 +326,18 @@ TEST(type_prop, matmul_1D_x_2D_false_true) TEST(type_prop, matmul_1D_x_2D_true_false) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); auto matmul = make_shared(A, B, true, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_1D_x_2D_true_true) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -357,65 +358,65 @@ TEST(type_prop, matmul_1D_x_2D_true_true) // 1D x 4D TEST(type_prop, matmul_1D_x_4D_false_false) { - auto A = make_shared(element::f32, Shape{3}); - auto B = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto A = make_shared(element::Type_t::f32, Shape{3}); + auto B = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1, 2, 4})); } // 4D x 1D TEST(type_prop, matmul_4D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto B = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto B = make_shared(element::Type_t::f32, Shape{4}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1, 2, 3})); } // Batch broadcast TEST(type_prop, matmul_batch_broadcast) { - auto A = make_shared(element::f32, Shape{5, 1, 1, 4, 3}); - auto B = make_shared(element::f32, Shape{1, 1, 6, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{5, 1, 1, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{1, 1, 6, 3, 2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{5, 1, 6, 4, 2})); } TEST(type_prop, matmul_batch_broadcast_expand_to_A) { - auto A = make_shared(element::f32, Shape{1, 4, 3}); - auto B = make_shared(element::f32, Shape{7, 8, 5, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{7, 8, 5, 3, 2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{7, 8, 5, 4, 2})); } TEST(type_prop, matmul_batch_broadcast_expand_to_B) { - auto A = make_shared(element::f32, Shape{8, 7, 6, 1, 4, 3}); - auto B = make_shared(element::f32, Shape{1, 5, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{8, 7, 6, 1, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{1, 5, 3, 2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{8, 7, 6, 5, 4, 2})); } TEST(type_prop, matmul_incompatible_batch_dims) { - auto A = make_shared(element::f32, Shape{7, 4, 3}); - auto B = make_shared(element::f32, Shape{6, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{7, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{6, 3, 2}); try { @@ -435,14 +436,14 @@ TEST(type_prop, matmul_incompatible_batch_dims) TEST(type_prop, matmul_matrix_dynamic_bounds) { - auto A = - make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(6, 10)}); - auto B = - make_shared(element::f32, PartialShape{Dimension(7, 8), Dimension(15, 20)}); + auto A = make_shared(element::Type_t::f32, + PartialShape{Dimension(2, 5), Dimension(6, 10)}); + auto B = make_shared(element::Type_t::f32, + PartialShape{Dimension(7, 8), Dimension(15, 20)}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(2, 5), Dimension(15, 20)})); } @@ -517,35 +518,35 @@ TEST(type_prop, matmul_batch_dynamic_bounds) 5, // 18 4}; // 19 - auto A = make_shared(element::f32, A_shape); - auto B = make_shared(element::f32, B_shape); + auto A = make_shared(element::Type_t::f32, A_shape); + auto B = make_shared(element::Type_t::f32, B_shape); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } TEST(type_prop, matmul_incompatible_matrix_dim_bounds) { - auto A = - make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(3, 4)}); - auto B = - make_shared(element::f32, PartialShape{Dimension(1, 2), Dimension(15, 20)}); + auto A = make_shared(element::Type_t::f32, + PartialShape{Dimension(2, 5), Dimension(3, 4)}); + auto B = make_shared(element::Type_t::f32, + PartialShape{Dimension(1, 2), Dimension(15, 20)}); auto expected_output_shape = PartialShape{Dimension(2, 5), Dimension(15, 20)}; // No error for backward compatibility auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } TEST(type_prop, matmul_incompatible_batch_dim_bounds) { - auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), 4, 3}); - auto B = make_shared(element::f32, PartialShape{Dimension(6, 10), 3, 2}); + auto A = make_shared(element::Type_t::f32, PartialShape{Dimension(2, 5), 4, 3}); + auto B = make_shared(element::Type_t::f32, PartialShape{Dimension(6, 10), 3, 2}); Dimension dynamic = Dimension::dynamic(); auto expected_output_shape = PartialShape{dynamic, 4, 2}; @@ -553,6 +554,6 @@ TEST(type_prop, matmul_incompatible_batch_dim_bounds) // No error for backward compatibility auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } diff --git a/ngraph/test/type_prop/max_pool.cpp b/ngraph/test/type_prop/max_pool.cpp index 0fb8bd7fc79fa9..fb9c59403f3c5b 100644 --- a/ngraph/test/type_prop/max_pool.cpp +++ b/ngraph/test/type_prop/max_pool.cpp @@ -31,7 +31,7 @@ TEST(type_prop, max_pool_auto_padding) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -50,7 +50,7 @@ TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_lower) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -70,7 +70,7 @@ TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -90,7 +90,7 @@ TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -99,3 +99,18 @@ TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) ASSERT_EQ(mp->get_pads_begin(), (Shape{})); ASSERT_EQ(mp->get_pads_end(), (Shape{})); } + +TEST(type_prop, max_pool_default_values) +{ + const PartialShape arg_shape{1, 3, 32, 32}; + const Strides strides{1, 1}; + const Shape pads_begin{0, 0}; + const Shape pads_end{0, 0}; + const Shape kernel_shape{2, 2}; + + auto arg = make_shared(element::f32, arg_shape); + auto mp = make_shared(arg, strides, pads_begin, pads_end, kernel_shape); + + ASSERT_EQ(mp->get_rounding_type(), op::RoundingType::FLOOR); + ASSERT_EQ(mp->get_auto_pad(), op::PadType::EXPLICIT); +} diff --git a/ngraph/test/type_prop/mish.cpp b/ngraph/test/type_prop/mish.cpp index 68ec076374fa98..c28a9faceafdff 100644 --- a/ngraph/test/type_prop/mish.cpp +++ b/ngraph/test/type_prop/mish.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, mish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto mish_func = make_shared(data); - EXPECT_EQ(mish_func->get_element_type(), element::f32); + EXPECT_EQ(mish_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(mish_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, mish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mish_func = make_shared(data); - EXPECT_EQ(mish_func->get_element_type(), element::f32); + EXPECT_EQ(mish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(mish_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto mish_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(mish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, mish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mish_func = make_shared(data); - EXPECT_EQ(mish_func->get_element_type(), element::f32); + EXPECT_EQ(mish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(mish_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(mish_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/mvn.cpp b/ngraph/test/type_prop/mvn.cpp index 7b37b95a2682d6..87247422d4080c 100644 --- a/ngraph/test/type_prop/mvn.cpp +++ b/ngraph/test/type_prop/mvn.cpp @@ -23,17 +23,18 @@ using namespace ngraph; TEST(type_prop, mvn) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto mvn_func = make_shared(data); - EXPECT_EQ(mvn_func->get_element_type(), element::f32); + EXPECT_EQ(mvn_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(mvn_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, mvn_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mvn_func = make_shared(data); - EXPECT_EQ(mvn_func->get_element_type(), element::f32); + EXPECT_EQ(mvn_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(mvn_func->get_reduction_axes(), (AxisSet{1, 2})); ASSERT_TRUE(mvn_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); @@ -42,8 +43,8 @@ TEST(type_prop, mvn_partial) EXPECT_EQ(make_shared(data, false)->get_reduction_axes(), (AxisSet{2})); // rank unknown - auto mvn_partial = - make_shared(make_shared(element::f32, PartialShape::dynamic())); + auto mvn_partial = make_shared( + make_shared(element::Type_t::f32, PartialShape::dynamic())); EXPECT_EQ(mvn_partial->get_reduction_axes(), AxisSet{}); ASSERT_TRUE(mvn_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } diff --git a/ngraph/test/type_prop/non_max_suppression.cpp b/ngraph/test/type_prop/non_max_suppression.cpp index 8202486b25d12b..1c2d7572b07057 100644 --- a/ngraph/test/type_prop/non_max_suppression.cpp +++ b/ngraph/test/type_prop/non_max_suppression.cpp @@ -27,8 +27,8 @@ TEST(type_prop, nms_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -42,8 +42,8 @@ TEST(type_prop, nms_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -57,8 +57,8 @@ TEST(type_prop, nms_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -73,8 +73,8 @@ TEST(type_prop, nms_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -88,11 +88,11 @@ TEST(type_prop, nms_incorrect_scheme_num_boxes) TEST(type_prop, nms_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_scalar = make_shared(element::f32, Shape{1}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_scalar = make_shared(element::Type_t::f32, Shape{1}); try { @@ -125,8 +125,8 @@ TEST(type_prop, nms_scalar_inputs_check) TEST(type_prop, nms_output_shape) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); const auto nms = make_shared(boxes, scores); const auto nms_out_ps = nms->get_output_partial_shape(0); @@ -138,46 +138,49 @@ TEST(type_prop, nms_output_shape) TEST(type_prop, nms_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{1, 6, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 6}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 6, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 6}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{3, 3})); } TEST(type_prop, nms_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 1}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{1, 3})); } TEST(type_prop, nms_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_TRUE( nms->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 3})); } @@ -188,8 +191,8 @@ TEST(type_prop, nms_v3_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -203,8 +206,8 @@ TEST(type_prop, nms_v3_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -218,8 +221,8 @@ TEST(type_prop, nms_v3_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -234,8 +237,8 @@ TEST(type_prop, nms_v3_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -249,11 +252,11 @@ TEST(type_prop, nms_v3_incorrect_scheme_num_boxes) TEST(type_prop, nms_v3_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_scalar = make_shared(element::f32, Shape{1}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_scalar = make_shared(element::Type_t::f32, Shape{1}); try { @@ -286,8 +289,8 @@ TEST(type_prop, nms_v3_scalar_inputs_check) TEST(type_prop, nms_v3_output_shape) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); const auto nms = make_shared(boxes, scores); const auto nms_out_ps = nms->get_output_partial_shape(0); @@ -299,41 +302,44 @@ TEST(type_prop, nms_v3_output_shape) TEST(type_prop, nms_v3_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{1, 6, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 6}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 6, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 6}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{3, 3})); } TEST(type_prop, nms_v3_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 1}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{1, 3})); } TEST(type_prop, nms_v3_output_shape_i32) { - const auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 1}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared(boxes, @@ -343,24 +349,25 @@ TEST(type_prop, nms_v3_output_shape_i32) score_threshold, op::v3::NonMaxSuppression::BoxEncodingType::CORNER, true, - element::i32); + element::Type_t::i32); - ASSERT_EQ(nms->get_element_type(), element::i32); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i32); ASSERT_EQ(nms->get_shape(), (Shape{1, 3})); } TEST(type_prop, nms_v3_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_TRUE( nms->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 3})); } @@ -371,8 +378,8 @@ TEST(type_prop, nms_v4_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -386,8 +393,8 @@ TEST(type_prop, nms_v4_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -401,8 +408,8 @@ TEST(type_prop, nms_v4_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -417,8 +424,8 @@ TEST(type_prop, nms_v4_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -432,11 +439,11 @@ TEST(type_prop, nms_v4_incorrect_scheme_num_boxes) TEST(type_prop, nms_v4_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_scalar = make_shared(element::f32, Shape{1}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_scalar = make_shared(element::Type_t::f32, Shape{1}); try { @@ -469,8 +476,8 @@ TEST(type_prop, nms_v4_scalar_inputs_check) TEST(type_prop, nms_v4_output_shape) { - const auto boxes = make_shared(element::f32, Shape{5, 2, 4}); - const auto scores = make_shared(element::f32, Shape{5, 3, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{5, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{5, 3, 2}); const auto nms = make_shared(boxes, scores); const auto nms_out_ps = nms->get_output_partial_shape(0); @@ -482,41 +489,44 @@ TEST(type_prop, nms_v4_output_shape) TEST(type_prop, nms_v4_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{2 * 5 * 3, 3})); } TEST(type_prop, nms_v4_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {1000}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {1000}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{2 * 5 * 7, 3})); } TEST(type_prop, nms_v4_output_shape_i32) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared(boxes, @@ -526,24 +536,25 @@ TEST(type_prop, nms_v4_output_shape_i32) score_threshold, op::v3::NonMaxSuppression::BoxEncodingType::CORNER, true, - element::i32); + element::Type_t::i32); - ASSERT_EQ(nms->get_element_type(), element::i32); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i32); ASSERT_EQ(nms->get_shape(), (Shape{30, 3})); } TEST(type_prop, nms_v4_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_TRUE( nms->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 3})); } @@ -554,8 +565,8 @@ TEST(type_prop, nms_v5_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -569,8 +580,8 @@ TEST(type_prop, nms_v5_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -584,8 +595,8 @@ TEST(type_prop, nms_v5_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -600,8 +611,8 @@ TEST(type_prop, nms_v5_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -615,11 +626,11 @@ TEST(type_prop, nms_v5_incorrect_scheme_num_boxes) TEST(type_prop, nms_v5_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_0d_or_1d = make_shared(element::f32, Shape{2}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_0d_or_1d = make_shared(element::Type_t::f32, Shape{2}); try { @@ -664,8 +675,8 @@ TEST(type_prop, nms_v5_scalar_inputs_check) TEST(type_prop, nms_v5_output_shape) { - const auto boxes = make_shared(element::f32, Shape{5, 2, 4}); - const auto scores = make_shared(element::f32, Shape{5, 3, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{5, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{5, 3, 2}); const auto nms = make_shared(boxes, scores); @@ -679,18 +690,19 @@ TEST(type_prop, nms_v5_output_shape) TEST(type_prop, nms_v5_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_output_element_type(0), element::i64); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i64); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i64); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i64); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension(0, 30), Dimension(3)})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension(0, 30), Dimension(3)})); @@ -699,18 +711,19 @@ TEST(type_prop, nms_v5_output_shape_2) TEST(type_prop, nms_v5_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {1000}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {1000}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_output_element_type(0), element::i64); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i64); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i64); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i64); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension(0, 70), Dimension(3)})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension(0, 70), Dimension(3)})); EXPECT_EQ(nms->get_output_shape(2), (Shape{1})); @@ -718,11 +731,12 @@ TEST(type_prop, nms_v5_output_shape_3) TEST(type_prop, nms_v5_output_shape_i32) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared(boxes, @@ -732,11 +746,11 @@ TEST(type_prop, nms_v5_output_shape_i32) score_threshold, op::v5::NonMaxSuppression::BoxEncodingType::CORNER, true, - element::i32); + element::Type_t::i32); - ASSERT_EQ(nms->get_output_element_type(0), element::i32); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i32); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i32); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i32); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension(0, 30), Dimension(3)})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension(0, 30), Dimension(3)})); @@ -745,18 +759,19 @@ TEST(type_prop, nms_v5_output_shape_i32) TEST(type_prop, nms_v5_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_output_element_type(0), element::i64); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i64); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i64); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i64); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 3})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension::dynamic(), 3})); EXPECT_EQ(nms->get_output_shape(2), (Shape{1})); diff --git a/ngraph/test/type_prop/non_zero.cpp b/ngraph/test/type_prop/non_zero.cpp index 03ad7397c821bb..1f22ec9fb19d89 100644 --- a/ngraph/test/type_prop/non_zero.cpp +++ b/ngraph/test/type_prop/non_zero.cpp @@ -23,38 +23,38 @@ using namespace ngraph; TEST(type_prop, non_zero) { - auto data = make_shared(element::f32, Shape{3, 3, 224, 224}); + auto data = make_shared(element::Type_t::f32, Shape{3, 3, 224, 224}); auto non_zero = make_shared(data); - EXPECT_EQ(non_zero->get_element_type(), element::i64); + EXPECT_EQ(non_zero->get_element_type(), element::Type_t::i64); EXPECT_TRUE( non_zero->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, non_zero_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto non_zero = make_shared(data); - EXPECT_EQ(non_zero->get_element_type(), element::i64); + EXPECT_EQ(non_zero->get_element_type(), element::Type_t::i64); EXPECT_TRUE(non_zero->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic()})); } TEST(type_prop, non_zero_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto non_zero = make_shared(data, element::i32); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto non_zero = make_shared(data, element::Type_t::i32); - ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); + ASSERT_EQ(non_zero->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE( non_zero->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, non_zero_string_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto non_zero = make_shared(data, "i32"); - ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); + ASSERT_EQ(non_zero->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE( non_zero->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } @@ -62,10 +62,10 @@ TEST(type_prop, non_zero_string_output_type) TEST(type_prop, non_zero_fail_index_element_type) { // Deduce type - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); try { - auto non_zero = make_shared(data, element::i16); + auto non_zero = make_shared(data, element::Type_t::i16); // Should have thrown, so fail if it didn't FAIL() << "Invalid output type not detected"; diff --git a/ngraph/test/type_prop/normalize.cpp b/ngraph/test/type_prop/normalize.cpp index 03f342e5ba8d72..9d0b9af0394c65 100644 --- a/ngraph/test/type_prop/normalize.cpp +++ b/ngraph/test/type_prop/normalize.cpp @@ -24,8 +24,8 @@ using namespace ngraph; TEST(type_prop, normalize_axes_input_not_constant) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - auto axes = make_shared(element::u64, Shape{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto axes = make_shared(element::Type_t::u64, Shape{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -48,8 +48,9 @@ TEST(type_prop, normalize_axes_input_not_constant) TEST(type_prop, normalize_invalid_axes_rank) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1, 2}, vector{1, 2}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{1, 2}, vector{1, 2}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -73,8 +74,9 @@ TEST(type_prop, normalize_invalid_axes_rank) TEST(type_prop, normalize_axes_out_of_bounds) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{2}, vector{3, 4}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; diff --git a/ngraph/test/type_prop/one_hot.cpp b/ngraph/test/type_prop/one_hot.cpp index 07a5f3bac16376..8bd13bfa0d7440 100644 --- a/ngraph/test/type_prop/one_hot.cpp +++ b/ngraph/test/type_prop/one_hot.cpp @@ -23,34 +23,34 @@ using namespace ngraph; TEST(type_prop, one_hot_v1_output_shape) { - auto indices = make_shared(element::i64, Shape{3}); - auto depth = op::Constant::create(element::i64, Shape{}, {2}); - auto on_value = op::Constant::create(element::u32, Shape{}, {5}); - auto off_value = op::Constant::create(element::u32, Shape{}, {10}); + auto indices = make_shared(element::Type_t::i64, Shape{3}); + auto depth = op::Constant::create(element::Type_t::i64, Shape{}, {2}); + auto on_value = op::Constant::create(element::Type_t::u32, Shape{}, {5}); + auto off_value = op::Constant::create(element::Type_t::u32, Shape{}, {10}); int64_t axis = -1; auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); - ASSERT_EQ(ont_hot->get_element_type(), element::u32); + ASSERT_EQ(ont_hot->get_element_type(), element::Type_t::u32); ASSERT_EQ(ont_hot->get_shape(), (Shape{3, 2})); } TEST(type_prop, one_hot_v1_output_shape_2) { - auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::Type_t::i64, Shape{1, 3, 2, 3}); + auto depth = op::Constant::create(element::Type_t::i64, Shape{}, {4}); + auto on_value = op::Constant::create(element::Type_t::f32, Shape{}, {1.0f}); + auto off_value = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); int64_t axis = 3; auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); - ASSERT_EQ(ont_hot->get_element_type(), element::f32); + ASSERT_EQ(ont_hot->get_element_type(), element::Type_t::f32); ASSERT_EQ(ont_hot->get_shape(), (Shape{1, 3, 2, 4, 3})); } TEST(type_prop, one_hot_v1_indices_elem_not_integral) { - auto indices = make_shared(element::f16, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::u32, Shape{}); - auto off_value = make_shared(element::u32, Shape{}); + auto indices = make_shared(element::Type_t::f16, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::u32, Shape{}); + auto off_value = make_shared(element::Type_t::u32, Shape{}); int64_t axis = -1; try { @@ -70,10 +70,10 @@ TEST(type_prop, one_hot_v1_indices_elem_not_integral) TEST(type_prop, one_hot_v1_depth_elem_not_integral) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::f16, Shape{}); - auto on_value = make_shared(element::u32, Shape{}); - auto off_value = make_shared(element::u32, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::f16, Shape{}); + auto on_value = make_shared(element::Type_t::u32, Shape{}); + auto off_value = make_shared(element::Type_t::u32, Shape{}); int64_t axis = -1; try { @@ -93,10 +93,10 @@ TEST(type_prop, one_hot_v1_depth_elem_not_integral) TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::f16, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::bf16, Shape{}); + auto off_value = make_shared(element::Type_t::f16, Shape{}); int64_t axis = -1; try { @@ -118,10 +118,10 @@ TEST(type_prop, one_hot_v1_on_off_values_not_compatible) TEST(type_prop, one_hot_v1_depth_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{1}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::bf16, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{1}); + auto on_value = make_shared(element::Type_t::bf16, Shape{}); + auto off_value = make_shared(element::Type_t::bf16, Shape{}); int64_t axis = -1; try { @@ -141,10 +141,10 @@ TEST(type_prop, one_hot_v1_depth_not_scalar) TEST(type_prop, one_hot_v1_on_value_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{2}); - auto off_value = make_shared(element::bf16, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::bf16, Shape{2}); + auto off_value = make_shared(element::Type_t::bf16, Shape{}); int64_t axis = -1; try { @@ -164,10 +164,10 @@ TEST(type_prop, one_hot_v1_on_value_not_scalar) TEST(type_prop, one_hot_v1_off_value_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::bf16, Shape{3}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::bf16, Shape{}); + auto off_value = make_shared(element::Type_t::bf16, Shape{3}); int64_t axis = -1; try { diff --git a/ngraph/test/type_prop/pad.cpp b/ngraph/test/type_prop/pad.cpp index c7a43737a17898..106b2e43dadf63 100644 --- a/ngraph/test/type_prop/pad.cpp +++ b/ngraph/test/type_prop/pad.cpp @@ -25,10 +25,10 @@ using namespace ngraph; TEST(type_prop, pad_v1_arg_pad_value_type_mismatch) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); - auto arg_pad_value = make_shared(element::f16, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); + auto arg_pad_value = make_shared(element::Type_t::f16, Shape{1}); try { @@ -52,10 +52,10 @@ TEST(type_prop, pad_v1_arg_pad_value_type_mismatch) TEST(type_prop, pad_v1_arg_pad_value_shape_not_compatible) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); - auto arg_pad_value = make_shared(element::f32, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{1}); try { @@ -78,9 +78,9 @@ TEST(type_prop, pad_v1_arg_pad_value_shape_not_compatible) TEST(type_prop, pad_v1_pads_begin_shape_not_1D) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1, 2}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1, 2}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); try { @@ -102,9 +102,9 @@ TEST(type_prop, pad_v1_pads_begin_shape_not_1D) TEST(type_prop, pad_v1_pads_end_shape_not_1D) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1, 2}); try { @@ -125,9 +125,9 @@ TEST(type_prop, pad_v1_pads_end_shape_not_1D) TEST(type_prop, pad_v1_pads_begin_size_not_correct) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{4}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{4}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); try { @@ -150,10 +150,10 @@ TEST(type_prop, pad_v1_pads_begin_size_not_correct) TEST(type_prop, pad_v1_pads_end_size_not_correct) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{4}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{4}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{}); try { @@ -178,9 +178,9 @@ TEST(type_prop, pad_v1_pads_end_size_not_correct) TEST(type_prop, pad_v1_arg_pads_begin_incompatible_type) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::f32, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::f32, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); try { @@ -202,9 +202,9 @@ TEST(type_prop, pad_v1_arg_pads_begin_incompatible_type) TEST(type_prop, pad_v1_arg_pads_end_incompatible_type) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::f32, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::f32, Shape{1}); try { @@ -226,12 +226,12 @@ TEST(type_prop, pad_v1_arg_pads_end_incompatible_type) TEST(type_prop, pad_v1_deduce_too_small_for_edge) { - auto arg = make_shared(element::f32, Shape{1, 5, 0, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 5, 0, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); auto pads_end = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{}); try { @@ -255,12 +255,12 @@ TEST(type_prop, pad_v1_deduce_too_small_for_edge) TEST(type_prop, pad_v1_deduce_too_small_for_reflect) { - auto arg = make_shared(element::f32, Shape{1, 5, 1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 5, 1, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); auto pads_end = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{}); try { diff --git a/ngraph/test/type_prop/parameter.cpp b/ngraph/test/type_prop/parameter.cpp index 6208dbb7f3605d..a78b2c49035c66 100644 --- a/ngraph/test/type_prop/parameter.cpp +++ b/ngraph/test/type_prop/parameter.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, param_partial_rank_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto& pshape = a->get_output_partial_shape(0); @@ -33,7 +33,8 @@ TEST(type_prop, param_partial_rank_dynamic) TEST(type_prop, param_partial_rank_static) { - auto a = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3, 4}); + auto a = make_shared(element::Type_t::f32, + PartialShape{2, Dimension::dynamic(), 3, 4}); auto& pshape = a->get_output_partial_shape(0); diff --git a/ngraph/test/type_prop/prelu.cpp b/ngraph/test/type_prop/prelu.cpp index d4b95cbb4d69ee..27fb45b64d8ff9 100644 --- a/ngraph/test/type_prop/prelu.cpp +++ b/ngraph/test/type_prop/prelu.cpp @@ -23,10 +23,10 @@ using namespace ngraph; TEST(type_prop, prelu) { - auto param = make_shared(element::f32, Shape{2, 4}); - auto slope = make_shared(element::f32, Shape{2}); + auto param = make_shared(element::Type_t::f32, Shape{2, 4}); + auto slope = make_shared(element::Type_t::f32, Shape{2}); Shape prelu_shape{2, 4}; auto prelu = make_shared(param, slope); - ASSERT_EQ(prelu->get_element_type(), element::f32); + ASSERT_EQ(prelu->get_element_type(), element::Type_t::f32); ASSERT_EQ(prelu->get_shape(), prelu_shape); } diff --git a/ngraph/test/type_prop/proposal.cpp b/ngraph/test/type_prop/proposal.cpp index 9b92b790bf6343..10bc01b4bf1ce7 100644 --- a/ngraph/test/type_prop/proposal.cpp +++ b/ngraph/test/type_prop/proposal.cpp @@ -27,9 +27,9 @@ using namespace ngraph; TEST(type_prop, proposal_v0_invalid_class_probs_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -52,9 +52,9 @@ TEST(type_prop, proposal_v0_invalid_class_probs_rank) TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -78,9 +78,9 @@ TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) TEST(type_prop, proposal_v0_invalid_image_shape_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{2, 1}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -103,9 +103,9 @@ TEST(type_prop, proposal_v0_invalid_image_shape_rank) TEST(type_prop, proposal_v0_invalid_image_shape_size) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{5}); try { @@ -135,10 +135,11 @@ TEST(type_prop, proposal_v0_shape_infer) attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_probs = + make_shared(element::Type_t::f32, Shape{batch_size, 12, 34, 62}); auto class_bbox_deltas = - make_shared(element::f32, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f32, Shape{3}); + make_shared(element::Type_t::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5})); } @@ -148,9 +149,9 @@ TEST(type_prop, proposal_v0_shape_infer) TEST(type_prop, proposal_v4_invalid_class_probs_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -173,9 +174,9 @@ TEST(type_prop, proposal_v4_invalid_class_probs_rank) TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -199,9 +200,9 @@ TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) TEST(type_prop, proposal_v4_invalid_image_shape_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{2, 1}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -224,9 +225,9 @@ TEST(type_prop, proposal_v4_invalid_image_shape_rank) TEST(type_prop, proposal_v4_invalid_image_shape_size) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{5}); try { @@ -256,10 +257,11 @@ TEST(type_prop, proposal_v4_shape_infer) attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_probs = + make_shared(element::Type_t::f32, Shape{batch_size, 12, 34, 62}); auto class_bbox_deltas = - make_shared(element::f32, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f32, Shape{3}); + make_shared(element::Type_t::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5})); ASSERT_EQ(op->get_output_shape(1), (Shape{batch_size * attrs.post_nms_topn})); diff --git a/ngraph/test/type_prop/quantize.cpp b/ngraph/test/type_prop/quantize.cpp index ee7cfbebd5d77e..4b8af66ce3f42e 100644 --- a/ngraph/test/type_prop/quantize.cpp +++ b/ngraph/test/type_prop/quantize.cpp @@ -28,8 +28,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_channel_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{3}; Shape zero_point_shape{3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -51,8 +51,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_image_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64}; Shape zero_point_shape{64}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -74,8 +74,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_row_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{480}; Shape zero_point_shape{480}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -97,8 +97,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_image_channel_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3}; Shape zero_point_shape{64, 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -120,8 +120,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_whole_batch_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -143,8 +143,8 @@ TEST(type_prop, quantize_f64_to_i8_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f64; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f64; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -166,8 +166,8 @@ TEST(type_prop, quantize_f64_to_u8_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f64; - element::Type quantized_type = element::u8; + element::Type unquantized_type = element::Type_t::f64; + element::Type quantized_type = element::Type_t::u8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -189,8 +189,8 @@ TEST(type_prop, quantize_f64_to_dyn_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f64; - element::Type quantized_type = element::dynamic; + element::Type unquantized_type = element::Type_t::f64; + element::Type quantized_type = element::Type_t::dynamic; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -222,8 +222,8 @@ TEST(type_prop, quantize_i8_to_u8_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::i8; - element::Type quantized_type = element::u8; + element::Type unquantized_type = element::Type_t::i8; + element::Type quantized_type = element::Type_t::u8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -256,8 +256,8 @@ TEST(type_prop, quantize_f32_to_f32_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::f32; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::f32; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -289,10 +289,10 @@ TEST(type_prop, quantize_batch_scale_type_mismatch_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; - element::Type scale_type = element::f64; + element::Type scale_type = element::Type_t::f64; element::Type zero_point_type = quantized_type; AxisSet axes{}; auto round_mode = op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_INFINITY; @@ -323,11 +323,11 @@ TEST(type_prop, quantize_zero_point_type_mismatch_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; - element::Type zero_point_type = element::u8; + element::Type zero_point_type = element::Type_t::u8; AxisSet axes{}; auto round_mode = op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_INFINITY; @@ -357,8 +357,8 @@ TEST(type_prop, quantize_oob_axis_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{320}; Shape zero_point_shape{320}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -391,8 +391,8 @@ TEST(type_prop, quantize_scale_shape_mismatch_same_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 4}; Shape zero_point_shape{64, 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -425,8 +425,8 @@ TEST(type_prop, quantize_scale_shape_mismatch_different_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3, 2}; Shape zero_point_shape{64, 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -459,8 +459,8 @@ TEST(type_prop, quantize_zero_point_shape_mismatch_same_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3}; Shape zero_point_shape{64, 4}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -493,8 +493,8 @@ TEST(type_prop, quantize_zero_point_shape_mismatch_different_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3}; Shape zero_point_shape{64, 3, 2}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -527,8 +527,8 @@ TEST(type_prop, quantize_partial_all_rank_dynamic_ok) PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{PartialShape::dynamic()}; PartialShape zero_point_shape{PartialShape::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -551,8 +551,8 @@ TEST(type_prop, PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96}; PartialShape zero_point_shape{PartialShape::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -576,8 +576,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96}; PartialShape zero_point_shape{PartialShape::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -613,8 +613,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()}; PartialShape zero_point_shape{64, 22, Dimension::dynamic(), Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -638,8 +638,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()}; PartialShape zero_point_shape{64, 22, Dimension::dynamic(), Dimension::dynamic(), 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -675,8 +675,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()}; PartialShape zero_point_shape{65, 22, Dimension::dynamic(), Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -712,8 +712,8 @@ TEST( PartialShape batch_shape{2, 4, 6, Dimension::dynamic(), 10, Dimension::dynamic()}; PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()}; PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -738,8 +738,8 @@ TEST( PartialShape batch_shape{2, 4, 6, Dimension::dynamic(), 10, Dimension::dynamic()}; PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()}; PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -774,8 +774,8 @@ TEST( PartialShape batch_shape{2, 5, 6, Dimension::dynamic(), 10, Dimension::dynamic()}; PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()}; PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; diff --git a/ngraph/test/type_prop/range.cpp b/ngraph/test/type_prop/range.cpp index 5fcfc8e33682da..ec3f9b08ffda68 100644 --- a/ngraph/test/type_prop/range.cpp +++ b/ngraph/test/type_prop/range.cpp @@ -23,57 +23,57 @@ using namespace ngraph; TEST(type_prop, range_nonconst_ok) { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::Type_t::i32, Shape{}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::i32); + EXPECT_EQ(range->get_element_type(), element::Type_t::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_some_dyn_et_ok) { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::dynamic, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::Type_t::i32, Shape{}); + auto stop = make_shared(element::Type_t::dynamic, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::i32); + EXPECT_EQ(range->get_element_type(), element::Type_t::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_all_dyn_et_ok) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::dynamic, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::Type_t::dynamic, Shape{}); + auto stop = make_shared(element::Type_t::dynamic, Shape{}); + auto step = make_shared(element::Type_t::dynamic, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::dynamic); + EXPECT_EQ(range->get_element_type(), element::Type_t::dynamic); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_f32_ok) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::Type_t::dynamic, Shape{}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::dynamic, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::f32); + EXPECT_EQ(range->get_element_type(), element::Type_t::f32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_boolean_fails) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::boolean, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::Type_t::dynamic, Shape{}); + auto stop = make_shared(element::Type_t::boolean, Shape{}); + auto step = make_shared(element::Type_t::dynamic, Shape{}); try { @@ -93,21 +93,21 @@ TEST(type_prop, range_nonconst_boolean_fails) TEST(type_prop, range_some_const_ok) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}, std::vector{2}); + auto start = make_shared(element::Type_t::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}, std::vector{2}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::i32); + EXPECT_EQ(range->get_element_type(), element::Type_t::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_some_const_zero_stride_fails) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}, std::vector{0}); + auto start = make_shared(element::Type_t::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}, std::vector{0}); try { @@ -127,9 +127,9 @@ TEST(type_prop, range_some_const_zero_stride_fails) TEST(type_prop, range_some_const_plus_inf_start_fails) { auto start = make_shared( - element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -149,9 +149,9 @@ TEST(type_prop, range_some_const_plus_inf_start_fails) TEST(type_prop, range_some_const_minus_inf_start_fails) { auto start = make_shared( - element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -171,9 +171,9 @@ TEST(type_prop, range_some_const_minus_inf_start_fails) TEST(type_prop, range_some_const_nan_start_fails) { auto start = - make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + make_shared(element::Type_t::f32, Shape{}, std::vector{std::nanf("")}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -192,10 +192,10 @@ TEST(type_prop, range_some_const_nan_start_fails) TEST(type_prop, range_some_const_plus_inf_stop_fails) { - auto start = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}); auto stop = make_shared( - element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -214,10 +214,10 @@ TEST(type_prop, range_some_const_plus_inf_stop_fails) TEST(type_prop, range_some_const_minus_inf_stop_fails) { - auto start = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}); auto stop = make_shared( - element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -236,9 +236,10 @@ TEST(type_prop, range_some_const_minus_inf_stop_fails) TEST(type_prop, range_some_const_nan_stio_fails) { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::Type_t::f32, Shape{}); + auto stop = + make_shared(element::Type_t::f32, Shape{}, std::vector{std::nanf("")}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -257,10 +258,10 @@ TEST(type_prop, range_some_const_nan_stio_fails) TEST(type_prop, range_some_const_plus_inf_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::f32, Shape{}); auto step = make_shared( - element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + element::Type_t::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); try { @@ -279,10 +280,10 @@ TEST(type_prop, range_some_const_plus_inf_stride_fails) TEST(type_prop, range_some_const_minus_inf_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::f32, Shape{}); auto step = make_shared( - element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + element::Type_t::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); try { @@ -301,9 +302,10 @@ TEST(type_prop, range_some_const_minus_inf_stride_fails) TEST(type_prop, range_some_const_nan_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto start = make_shared(element::Type_t::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = + make_shared(element::Type_t::f32, Shape{}, std::vector{std::nanf("")}); try { @@ -322,9 +324,9 @@ TEST(type_prop, range_some_const_nan_stride_fails) TEST(type_prop, range_all_const_zero_stride_fails) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}, std::vector{5}); - auto step = make_shared(element::i32, Shape{}, std::vector{0}); + auto start = make_shared(element::Type_t::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::i32, Shape{}, std::vector{5}); + auto step = make_shared(element::Type_t::i32, Shape{}, std::vector{0}); try { @@ -371,62 +373,62 @@ struct RangeTest : ::testing::TestWithParam TEST_P(RangeTest, deduce_shape_i8) { - run_range_test(element::i8, GetParam()); + run_range_test(element::Type_t::i8, GetParam()); } TEST_P(RangeTest, deduce_shape_i16) { - run_range_test(element::i16, GetParam()); + run_range_test(element::Type_t::i16, GetParam()); } TEST_P(RangeTest, deduce_shape_i32) { - run_range_test(element::i32, GetParam()); + run_range_test(element::Type_t::i32, GetParam()); } TEST_P(RangeTest, deduce_shape_i64) { - run_range_test(element::i64, GetParam()); + run_range_test(element::Type_t::i64, GetParam()); } TEST_P(RangeTest, deduce_shape_u8) { - run_range_test(element::u8, GetParam()); + run_range_test(element::Type_t::u8, GetParam()); } TEST_P(RangeTest, deduce_shape_u16) { - run_range_test(element::u16, GetParam()); + run_range_test(element::Type_t::u16, GetParam()); } TEST_P(RangeTest, deduce_shape_u32) { - run_range_test(element::u32, GetParam()); + run_range_test(element::Type_t::u32, GetParam()); } TEST_P(RangeTest, deduce_shape_u64) { - run_range_test(element::u64, GetParam()); + run_range_test(element::Type_t::u64, GetParam()); } TEST_P(RangeTest, deduce_shape_bf16) { - run_range_test(element::bf16, GetParam()); + run_range_test(element::Type_t::bf16, GetParam()); } TEST_P(RangeTest, deduce_shape_f16) { - run_range_test(element::f16, GetParam()); + run_range_test(element::Type_t::f16, GetParam()); } TEST_P(RangeTest, deduce_shape_f32) { - run_range_test(element::f32, GetParam()); + run_range_test(element::Type_t::f32, GetParam()); } TEST_P(RangeTest, deduce_shape_f64) { - run_range_test(element::f64, GetParam()); + run_range_test(element::Type_t::f64, GetParam()); } INSTANTIATE_TEST_CASE_P(type_prop, @@ -445,42 +447,42 @@ struct RangeTestWithNegatives : ::testing::TestWithParam TEST_P(RangeTestWithNegatives, deduce_shape_i8) { - run_range_test(element::i8, GetParam()); + run_range_test(element::Type_t::i8, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_i16) { - run_range_test(element::i16, GetParam()); + run_range_test(element::Type_t::i16, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_i32) { - run_range_test(element::i32, GetParam()); + run_range_test(element::Type_t::i32, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_i64) { - run_range_test(element::i64, GetParam()); + run_range_test(element::Type_t::i64, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_bf16) { - run_range_test(element::bf16, GetParam()); + run_range_test(element::Type_t::bf16, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_f16) { - run_range_test(element::f16, GetParam()); + run_range_test(element::Type_t::f16, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_f32) { - run_range_test(element::f32, GetParam()); + run_range_test(element::Type_t::f32, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_f64) { - run_range_test(element::f64, GetParam()); + run_range_test(element::Type_t::f64, GetParam()); } INSTANTIATE_TEST_CASE_P(type_prop, @@ -498,22 +500,22 @@ struct RangeTestFloating : ::testing::TestWithParam TEST_P(RangeTestFloating, deduce_shape_bf16) { - run_range_test(element::bf16, GetParam()); + run_range_test(element::Type_t::bf16, GetParam()); } TEST_P(RangeTestFloating, deduce_shape_f16) { - run_range_test(element::f16, GetParam()); + run_range_test(element::Type_t::f16, GetParam()); } TEST_P(RangeTestFloating, deduce_shape_f32) { - run_range_test(element::f32, GetParam()); + run_range_test(element::Type_t::f32, GetParam()); } TEST_P(RangeTestFloating, deduce_shape_f64) { - run_range_test(element::f64, GetParam()); + run_range_test(element::Type_t::f64, GetParam()); } INSTANTIATE_TEST_CASE_P(type_prop, diff --git a/ngraph/test/type_prop/read_value.cpp b/ngraph/test/type_prop/read_value.cpp index 793ad539285407..b096ddb4e43174 100644 --- a/ngraph/test/type_prop/read_value.cpp +++ b/ngraph/test/type_prop/read_value.cpp @@ -23,9 +23,9 @@ using namespace ngraph; TEST(type_prop, read_value_deduce) { - auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); auto read_value = make_shared(input, "variable_id"); - ASSERT_EQ(read_value->get_element_type(), element::f32); + ASSERT_EQ(read_value->get_element_type(), element::Type_t::f32); ASSERT_EQ(read_value->get_shape(), (Shape{1, 2, 64, 64})); } diff --git a/ngraph/test/type_prop/reduce_l1.cpp b/ngraph/test/type_prop/reduce_l1.cpp index 1b165f5cd919b7..6d2812990e4498 100644 --- a/ngraph/test/type_prop/reduce_l1.cpp +++ b/ngraph/test/type_prop/reduce_l1.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_l1_v4_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_sum = make_shared(arg, axes); @@ -43,8 +43,8 @@ TEST(type_prop, reduce_l1_v4_axis_out_of_range) TEST(type_prop, reduce_l1_v4_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -52,8 +52,8 @@ TEST(type_prop, reduce_l1_v4_shape_if_keep_dims) TEST(type_prop, reduce_l1_v4_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reduce_l2.cpp b/ngraph/test/type_prop/reduce_l2.cpp index e8f41281746a97..546938a2edadec 100644 --- a/ngraph/test/type_prop/reduce_l2.cpp +++ b/ngraph/test/type_prop/reduce_l2.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_l2_v4_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_sum = make_shared(arg, axes); @@ -43,8 +43,8 @@ TEST(type_prop, reduce_l2_v4_axis_out_of_range) TEST(type_prop, reduce_l2_v4_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -52,8 +52,8 @@ TEST(type_prop, reduce_l2_v4_shape_if_keep_dims) TEST(type_prop, reduce_l2_v4_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reduce_prod.cpp b/ngraph/test/type_prop/reduce_prod.cpp index 1242a9fee1cd67..f8fcb2b36dae91 100644 --- a/ngraph/test/type_prop/reduce_prod.cpp +++ b/ngraph/test/type_prop/reduce_prod.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_prod_v1_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_prod = make_shared(arg, axes); @@ -44,8 +44,8 @@ TEST(type_prop, reduce_prod_v1_axis_out_of_range) TEST(type_prop, reduce_prod_v1_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -53,8 +53,8 @@ TEST(type_prop, reduce_prod_v1_shape_if_keep_dims) TEST(type_prop, reduce_prod_v1_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reduce_sum.cpp b/ngraph/test/type_prop/reduce_sum.cpp index 4b915a937d76fa..90e50aeec7e20d 100644 --- a/ngraph/test/type_prop/reduce_sum.cpp +++ b/ngraph/test/type_prop/reduce_sum.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_sum_v1_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_sum = make_shared(arg, axes); @@ -44,8 +44,8 @@ TEST(type_prop, reduce_sum_v1_axis_out_of_range) TEST(type_prop, reduce_sum_v1_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -53,8 +53,8 @@ TEST(type_prop, reduce_sum_v1_shape_if_keep_dims) TEST(type_prop, reduce_sum_v1_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reorg_yolo.cpp b/ngraph/test/type_prop/reorg_yolo.cpp index c132d1fc9ed230..e63f0cb5ffaf33 100644 --- a/ngraph/test/type_prop/reorg_yolo.cpp +++ b/ngraph/test/type_prop/reorg_yolo.cpp @@ -25,7 +25,7 @@ TEST(type_prop, reorg_yolo_stride_2) { const auto in_shape = Shape{1, 64, 26, 26}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -38,7 +38,7 @@ TEST(type_prop, reorg_yolo_stride_2_batch_2) { const auto in_shape = Shape{2, 64, 26, 26}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -51,7 +51,7 @@ TEST(type_prop, reorg_yolo_stride_2_smaller_H) { const auto in_shape = Shape{1, 24, 34, 62}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -63,7 +63,7 @@ TEST(type_prop, reorg_yolo_stride_3) { const auto in_shape = Shape{1, 9, 3, 3}; size_t stride = 3; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -77,7 +77,7 @@ TEST(type_prop, reorg_yolo_catch_small_shape_stride) { const auto in_shape = Shape{1, 1, 4, 4}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); try { // Throw error test: For [N, C, H, W] input shape, C >= (stride*stride) is required. diff --git a/ngraph/test/type_prop/reshape.cpp b/ngraph/test/type_prop/reshape.cpp index 0d2f73b60bf3ff..171182b1d10793 100644 --- a/ngraph/test/type_prop/reshape.cpp +++ b/ngraph/test/type_prop/reshape.cpp @@ -23,83 +23,83 @@ using namespace ngraph; TEST(type_prop, reshape_deduce_s2v) { - auto param = make_shared(element::f32, Shape{}); + auto param = make_shared(element::Type_t::f32, Shape{}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{1}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{1}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{1})); } TEST(type_prop, reshape_deduce_s2m) { - auto param = make_shared(element::f32, Shape{}); + auto param = make_shared(element::Type_t::f32, Shape{}); auto r = make_shared( - param, op::Constant::create(element::u64, {2}, Shape{1, 1}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {2}, Shape{1, 1}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{1, 1})); } TEST(type_prop, reshape_deduce_s2t) { - auto param = make_shared(element::f32, Shape{}); + auto param = make_shared(element::Type_t::f32, Shape{}); auto r = make_shared( - param, op::Constant::create(element::u64, {3}, Shape{1, 1, 1}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {3}, Shape{1, 1, 1}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{1, 1, 1})); } TEST(type_prop, reshape_deduce_m2v_01) { - auto param = make_shared(element::f32, Shape{3, 4}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{12}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{12}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{12})); } TEST(type_prop, reshape_deduce_m2v_10) { - auto param = make_shared(element::f32, Shape{3, 4}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{12}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{12}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{12})); } TEST(type_prop, reshape_deduce_t2v_012) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{60}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{60}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{60})); } TEST(type_prop, reshape_deduce_t2v_120) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{60}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{60}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{60})); } TEST(type_prop, reshape_deduce_zero_special) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); auto r = make_shared( - param, op::Constant::create(element::u64, {3}, Shape{6, 2, 0}), true); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {3}, Shape{6, 2, 0}), true); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{6, 2, 5})); } TEST(type_prop, reshape_deduce_wrong_output_shape) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); try { auto r = make_shared( - param, op::Constant::create(element::u64, {3}, Shape{3, 3, 3}), false); + param, op::Constant::create(element::Type_t::u64, {3}, Shape{3, 3, 3}), false); // Should have thrown, so fail if it didn't FAIL() << "No exception was thrown"; } @@ -120,10 +120,10 @@ TEST(type_prop, reshape_deduce_wrong_output_shape) // TEST(type_prop, reshape_partial_rank_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto r = make_shared( - param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {4}, Shape{3, 1, 8, 2}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2})); } @@ -135,10 +135,10 @@ TEST(type_prop, reshape_partial_rank_static) { auto param_shape = PartialShape{Dimension::dynamic(), 6, Dimension::dynamic(), Dimension::dynamic()}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::Type_t::f32, param_shape); auto r = make_shared( - param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {4}, Shape{3, 1, 8, 2}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2})); } @@ -151,10 +151,10 @@ TEST(type_prop, reshape_partial_rank_static_dynamic_but_zero_ok) { auto param_shape = PartialShape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - auto param = make_shared(element::f32, PartialShape::dynamic()); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto r = make_shared( - param, op::Constant::create(element::u64, {4}, Shape{3, 1, 0, 2}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {4}, Shape{3, 1, 0, 2}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 0, 2})); } diff --git a/ngraph/test/type_prop/reverse.cpp b/ngraph/test/type_prop/reverse.cpp index 6a77fe367b8f02..ce58bc9433597e 100644 --- a/ngraph/test/type_prop/reverse.cpp +++ b/ngraph/test/type_prop/reverse.cpp @@ -26,133 +26,140 @@ using namespace ngraph; TEST(type_prop, reverse_1d_deduce) { // Deduce type - auto param = make_shared(element::f32, Shape{5}); + auto param = make_shared(element::Type_t::f32, Shape{5}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5})); } TEST(type_prop, reverse_2d_deduce_0) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); } TEST(type_prop, reverse_2d_deduce_1) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); } TEST(type_prop, reverse_2d_deduce_01) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); } TEST(type_prop, reverse_3d_deduce_0) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_1) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_2) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_01) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_02) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_12) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {1, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {1, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_012) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {3}, {0, 1, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = + make_shared(param, + op::Constant::create(element::Type_t::i64, {3}, {0, 1, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_oob) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); try { - auto rev = make_shared(param, - op::Constant::create(element::i64, {3}, {0, 3, 2}), - op::v1::Reverse::Mode::INDEX); + auto rev = + make_shared(param, + op::Constant::create(element::Type_t::i64, {3}, {0, 3, 2}), + op::v1::Reverse::Mode::INDEX); // Should have thrown, so fail if it didn't FAIL() << "Axis out of bounds not detected"; @@ -175,13 +182,13 @@ TEST(type_prop, reverse_3d_deduce_oob) // TEST(type_prop, reverse_partial_rank_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto rev = - make_shared(param, - op::Constant::create(element::i64, {4}, {0, 2, 1776, 90909}), - op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto rev = make_shared( + param, + op::Constant::create(element::Type_t::i64, {4}, {0, 2, 1776, 90909}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_TRUE(rev->get_output_partial_shape(0).rank().is_dynamic()); } @@ -192,23 +199,25 @@ TEST(type_prop, reverse_partial_rank_dynamic) TEST(type_prop, reverse_partial_rank_static_dynamic_axes_ok) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, param_shape); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_TRUE(rev->get_output_partial_shape(0).same_scheme(param_shape)); } TEST(type_prop, reverse_partial_rank_static_dynamic_axes_oob) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::Type_t::f32, param_shape); try { - auto rev = make_shared(param, - op::Constant::create(element::i64, {3}, {0, 4, 2}), - op::v1::Reverse::Mode::INDEX); + auto rev = + make_shared(param, + op::Constant::create(element::Type_t::i64, {3}, {0, 4, 2}), + op::v1::Reverse::Mode::INDEX); // Should have thrown, so fail if it didn't FAIL() << "Axis out of bounds not detected"; diff --git a/ngraph/test/type_prop/reverse_sequence.cpp b/ngraph/test/type_prop/reverse_sequence.cpp index ade152fd77eef7..65819ceee51f4b 100644 --- a/ngraph/test/type_prop/reverse_sequence.cpp +++ b/ngraph/test/type_prop/reverse_sequence.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reverse_sequence_1_dim) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{4, 4}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lenghts = make_shared(element::Type_t::f32, Shape{4, 4}); try { size_t batch_axis = 0; @@ -45,8 +45,8 @@ TEST(type_prop, reverse_sequence_1_dim) TEST(type_prop, reverse_sequence_batch_index_oob) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{3}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lenghts = make_shared(element::Type_t::f32, Shape{3}); try { size_t batch_axis = 3; @@ -66,8 +66,8 @@ TEST(type_prop, reverse_sequence_batch_index_oob) TEST(type_prop, reverse_sequence_sequence_index_oob) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lengths = make_shared(element::f32, Shape{3}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lengths = make_shared(element::Type_t::f32, Shape{3}); try { size_t batch_axis = 1; @@ -87,8 +87,8 @@ TEST(type_prop, reverse_sequence_sequence_index_oob) TEST(type_prop, reverse_sequence_seq_len_size_equal_to_batch_dim) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{3}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lenghts = make_shared(element::Type_t::f32, Shape{3}); try { size_t batch_axis = 0; @@ -111,67 +111,68 @@ TEST(type_prop, reverse_sequence_seq_len_size_equal_to_batch_dim) TEST(type_prop, reverse_sequence_partial_both_rank_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto seq_lengths = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape::dynamic()); // Unrealistic values, but they don't matter here. size_t batch_axis = 202; size_t seq_axis = 909; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).is_dynamic()); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_left_rank_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); // Unrealistic values, but they don't matter here. size_t batch_axis = 202; size_t seq_axis = 909; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).is_dynamic()); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_right_rank_dynamic) { - auto data = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto seq_lengths = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape::dynamic()); size_t batch_axis = 0; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme(PartialShape{2, 4, 6, 8})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape::dynamic()); size_t batch_axis = 0; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme(PartialShape{ Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_batch_axis_oob) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto seq_lengths = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); size_t batch_axis = 4; size_t seq_axis = 1; try @@ -191,12 +192,13 @@ TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_batch_axis_oob TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_sequence_axis_oob) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto seq_lengths = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); size_t batch_axis = 1; size_t seq_axis = 4; try @@ -217,50 +219,51 @@ TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_sequence_axis_ TEST(type_prop, reverse_sequence_partial_left_rank_static_dynamic_right_static_left_seq_length_dynamic) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); size_t batch_axis = 2; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_right_seq_length_dynamic) { auto data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto seq_lengths = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); size_t batch_axis = 2; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_left_rank_static_dynamic_right_static_left_seq_length_static) { auto data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); size_t batch_axis = 2; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST( @@ -268,9 +271,9 @@ TEST( reverse_sequence_partial_left_rank_static_dynamic_right_static_left_seq_length_static_inconsistent) { auto data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{4}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{4}); size_t batch_axis = 2; size_t seq_axis = 1; try @@ -292,8 +295,8 @@ TEST( TEST(type_prop, reverse_sequence_negative_axis_dynamic_input_rank) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto seq_lengths = make_shared(element::f32, PartialShape{1}); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{1}); int64_t batch_axis = 1; int64_t seq_axis = -2; try @@ -315,8 +318,8 @@ TEST(type_prop, reverse_sequence_negative_axis_dynamic_input_rank) TEST(type_prop, reverse_sequence_negative_axes_support) { - auto data = make_shared(element::f32, PartialShape{1, 2, 3, 4, 5}); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto data = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4, 5}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); int64_t batch_axis = -3; int64_t seq_axis = -2; diff --git a/ngraph/test/type_prop/rnn_cell.cpp b/ngraph/test/type_prop/rnn_cell.cpp index 627457edbb9c93..aedc5c88fe26a3 100644 --- a/ngraph/test/type_prop/rnn_cell.cpp +++ b/ngraph/test/type_prop/rnn_cell.cpp @@ -28,13 +28,17 @@ TEST(type_prop, rnn_cell) const size_t input_size = 3; const size_t hidden_size = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); + const auto X = + make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = + make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(rnn_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); } @@ -44,12 +48,13 @@ TEST(type_prop, rnn_cell_invalid_input) const size_t input_size = 3; const size_t hidden_size = 3; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{2 * hidden_size, input_size}); + auto W = + make_shared(element::Type_t::f32, Shape{2 * hidden_size, input_size}); try { const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); @@ -62,8 +67,8 @@ TEST(type_prop, rnn_cell_invalid_input) } // Invalid R tensor shape. - W = make_shared(element::f32, Shape{hidden_size, input_size}); - R = make_shared(element::f32, Shape{hidden_size, 1}); + W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + R = make_shared(element::Type_t::f32, Shape{hidden_size, 1}); try { const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); @@ -78,8 +83,8 @@ TEST(type_prop, rnn_cell_invalid_input) } // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); + R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); @@ -93,8 +98,8 @@ TEST(type_prop, rnn_cell_invalid_input) } // Invalid B tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{2 * hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, Shape{2 * hidden_size}); try { const auto rnn_cell = make_shared(X, H_t, W, R, B, hidden_size); @@ -114,16 +119,16 @@ TEST(type_prop, rnn_cell_dynamic_batch_size) const size_t hidden_size = 3; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto W = - make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = - make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(rnn_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -134,16 +139,16 @@ TEST(type_prop, rnn_cell_dynamic_hidden_size) const auto hidden_size = Dimension::dynamic(); const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto W = - make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = - make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, 3); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(rnn_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -154,18 +159,18 @@ TEST(type_prop, rnn_cell_dynamic_inputs) const auto hidden_size = Dimension::dynamic(); const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto R = - make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); const auto W = - make_shared(element::f32, PartialShape{hidden_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, 2); EXPECT_EQ(rnn_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, rnn_cell_invalid_input_rank0) @@ -174,40 +179,41 @@ TEST(type_prop, rnn_cell_invalid_input_rank0) const size_t input_size = 3; const size_t hidden_size = 3; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - auto W = make_shared(element::f32, PartialShape{}); + auto W = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); + W = make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + X = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, Shape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for R tensor. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape{}); + R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, B, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; @@ -219,40 +225,46 @@ TEST(type_prop, rnn_cell_invalid_input_dynamic_rank) const size_t input_size = 3; const size_t hidden_size = 3; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); auto check_dynamic_rnn = [](const shared_ptr& rnn) -> bool { return rnn->output(0).get_partial_shape() == PartialShape::dynamic() && rnn->output(0).get_element_type() == rnn->input(0).get_element_type(); }; // Invalid dynamic rank for W tensor. - auto W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto W = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_w = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_w), true); // Invalid dynamic rank for X tensor. - W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + W = make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + X = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_x = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_x), true); // Invalid dynamic rank for H_t tensor. - X = make_shared(element::f32, Shape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_h = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_h), true); // Invalid dynamic rank for R tensor. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_r = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_r), true); // Invalid dynamic rank for B tensor. - R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_b = make_shared(X, H_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_b), true); } diff --git a/ngraph/test/type_prop/rnn_sequence.cpp b/ngraph/test/type_prop/rnn_sequence.cpp index 94b500dbb02e4c..30c24dff42f3f6 100644 --- a/ngraph/test/type_prop/rnn_sequence.cpp +++ b/ngraph/test/type_prop/rnn_sequence.cpp @@ -30,17 +30,19 @@ TEST(type_prop, rnn_sequence_forward) const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, + const auto W = make_shared(element::Type_t::f32, Shape{num_directions, hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, Shape{num_directions, hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{num_directions, hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; @@ -53,10 +55,10 @@ TEST(type_prop, rnn_sequence_forward) EXPECT_TRUE(sequence->get_activations_beta().empty()); EXPECT_EQ(sequence->get_activations()[0], "tanh"); EXPECT_EQ(sequence->get_clip(), 0.f); - EXPECT_EQ(sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(sequence->outputs().size(), 2); EXPECT_EQ(sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); } diff --git a/ngraph/test/type_prop/roi_align.cpp b/ngraph/test/type_prop/roi_align.cpp index 67b103606703a9..0c32f24c5d6854 100644 --- a/ngraph/test/type_prop/roi_align.cpp +++ b/ngraph/test/type_prop/roi_align.cpp @@ -22,37 +22,39 @@ using namespace ngraph; TEST(type_prop_layers, roi_align_basic_shape_inference) { - const auto data = make_shared(element::f32, Shape{2, 3, 5, 5}); - const auto rois = make_shared(element::f32, Shape{7, 4}); - const auto batch_indices = make_shared(element::i32, Shape{7}); + const auto data = make_shared(element::Type_t::f32, Shape{2, 3, 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{7, 4}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{7}); const auto op = make_shared(data, rois, batch_indices, 2, 2, 1, 1.0f, "avg"); ASSERT_EQ(op->get_shape(), (Shape{7, 3, 2, 2})); } TEST(type_prop_layers, roi_align_dynamic_channels_dim) { - const auto data = make_shared(element::f32, PartialShape{10, Dimension(), 5, 5}); - const auto rois = make_shared(element::f32, Shape{7, 4}); - const auto batch_indices = make_shared(element::i32, Shape{7}); + const auto data = + make_shared(element::Type_t::f32, PartialShape{10, Dimension(), 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{7, 4}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{7}); const auto op = make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{7, Dimension(), 3, 4})); } TEST(type_prop_layers, roi_align_num_rois_from_batch_indices) { - const auto data = make_shared(element::f32, PartialShape{10, 3, 5, 5}); + const auto data = make_shared(element::Type_t::f32, PartialShape{10, 3, 5, 5}); const auto rois = - make_shared(element::f32, PartialShape{Dimension{}, Dimension{}}); - const auto batch_indices = make_shared(element::i32, Shape{9}); + make_shared(element::Type_t::f32, PartialShape{Dimension{}, Dimension{}}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{9}); const auto op = make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"); ASSERT_EQ(op->get_shape(), (Shape{9, 3, 3, 4})); } TEST(type_prop_layers, roi_align_incompatible_num_rois) { - const auto data = make_shared(element::f32, Shape{10, 3, 5, 5}); - const auto rois = make_shared(element::f32, PartialShape{1, Dimension{}}); - const auto batch_indices = make_shared(element::i32, Shape{2}); + const auto data = make_shared(element::Type_t::f32, Shape{10, 3, 5, 5}); + const auto rois = + make_shared(element::Type_t::f32, PartialShape{1, Dimension{}}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{2}); // the first dimension of rois and batch_indices should be equal ASSERT_THROW(make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"), ngraph::NodeValidationFailure); @@ -60,9 +62,9 @@ TEST(type_prop_layers, roi_align_incompatible_num_rois) TEST(type_prop_layers, roi_align_incompatible_input_rank) { - const auto data = make_shared(element::f32, Shape{1, 10, 3, 5, 5}); - const auto rois = make_shared(element::f32, Shape{1, 4}); - const auto batch_indices = make_shared(element::i32, Shape{1}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 10, 3, 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{1, 4}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{1}); // data rank needs to be 4 ASSERT_THROW(make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"), ngraph::NodeValidationFailure); @@ -70,9 +72,9 @@ TEST(type_prop_layers, roi_align_incompatible_input_rank) TEST(type_prop_layers, roi_align_incompatible_rois_second_dim) { - const auto data = make_shared(element::f32, Shape{10, 3, 5, 5}); - const auto rois = make_shared(element::f32, Shape{1, 5}); - const auto batch_indices = make_shared(element::i32, Shape{1}); + const auto data = make_shared(element::Type_t::f32, Shape{10, 3, 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{1, 5}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{1}); // the second dim of rois needs to be 4 ASSERT_THROW(make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"), ngraph::NodeValidationFailure); diff --git a/ngraph/test/type_prop/roi_pooling.cpp b/ngraph/test/type_prop/roi_pooling.cpp new file mode 100644 index 00000000000000..f9ce17a2b58966 --- /dev/null +++ b/ngraph/test/type_prop/roi_pooling.cpp @@ -0,0 +1,138 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" + +using namespace std; +using namespace ngraph; + +TEST(type_prop, roi_pooling_basic_shape_inference) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{4, 5}); + const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); + ASSERT_EQ(op->get_method(), "max"); + ASSERT_EQ(op->get_shape(), (Shape{4, 3, 2, 2})); +} + +TEST(type_prop, roi_pooling_dynamic_channels_dim) +{ + const auto feat_maps = + make_shared(element::Type_t::f32, PartialShape{1, Dimension(), 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{4, 5}); + const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension(), 2, 2})); +} + +TEST(type_prop, roi_pooling_dynamic_num_rois_dim) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 6, 6}); + const auto rois = + make_shared(element::Type_t::f32, PartialShape{Dimension(), 5}); + const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 3, 2, 2})); +} + +TEST(type_prop, roi_pooling_dynamic_rank_feat_maps) +{ + const auto feat_maps = + make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto rois = make_shared(element::Type_t::f32, Shape{4, 5}); + const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension(), 2, 2})); +} + +TEST(type_prop, roi_pooling_dynamic_rank_rois) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); + ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 3, 2, 2})); +} + +TEST(type_prop, roi_pooling_incompatible_input_rank) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 5}); + // feat_maps must be of rank 4 + ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_incompatible_pooling_shape) +{ + Shape pool_shape{2, 2, 2}; + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 5}); + // pool_shape must be of rank 2 {pooled_h, pooled_w} + ASSERT_THROW(make_shared(feat_maps, rois, pool_shape, 0.625f, "max"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_incompatible_rois_second_dim) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 4}); + // the second dim of rois must be 5. [batch_id, x_1, y_1, x_2, y_2] + ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_incompatible_feature_maps_element_type) +{ + const auto feat_maps = make_shared(element::Type_t::i32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 5}); + // feat_maps element type must be floating point type + ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_incompatible_rois_element_type) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); + // rois element type must be equal to feat_maps element type (floating point type) + ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "bilinear"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_invalid_pooling_method) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); + // ROIPooling method is invalid: not max nor bilinear + ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "invalid"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_invalid_spatial_scale) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); + // ROIPooling spatial scale attribute must be a positive floating point number + ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, -0.625f, "max"), + ngraph::NodeValidationFailure); +} + +TEST(type_prop, roi_pooling_invalid_pooled_size) +{ + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); + // ROIPooling pooled_h and pooled_w must be non-negative integers + ASSERT_THROW(make_shared(feat_maps, rois, Shape{1, 0}, 0.625f, "max"), + ngraph::NodeValidationFailure); +} diff --git a/ngraph/test/type_prop/round.cpp b/ngraph/test/type_prop/round.cpp index dde3c7a7f01f89..dad253981a846b 100644 --- a/ngraph/test/type_prop/round.cpp +++ b/ngraph/test/type_prop/round.cpp @@ -23,57 +23,60 @@ using namespace ngraph; TEST(type_prop, rounding_to_even) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(round_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, rounding_away) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(round_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, rounding_to_even_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto round_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic()), + make_shared(element::Type_t::f32, PartialShape::dynamic()), op::v5::Round::RoundMode::HALF_TO_EVEN); ASSERT_TRUE(round_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, rounding_away_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto round_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic()), + make_shared(element::Type_t::f32, PartialShape::dynamic()), op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); ASSERT_TRUE(round_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, rounding_to_even_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(round_func->get_output_partial_shape(0).rank().is_static()); @@ -81,10 +84,11 @@ TEST(type_prop, rounding_to_even_partial_static_rank) TEST(type_prop, rounding_away_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(round_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/scatter_elements_update.cpp b/ngraph/test/type_prop/scatter_elements_update.cpp index d1149b8e3778d1..02b28505ad5ed1 100644 --- a/ngraph/test/type_prop/scatter_elements_update.cpp +++ b/ngraph/test/type_prop/scatter_elements_update.cpp @@ -29,10 +29,10 @@ TEST(type_prop, scatter_elements_update_output_shape) Shape axis_shape{}; Shape expected_output_shape{2, 4, 5, 7}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape); auto scatter = make_shared(data, indices, updates, axis); @@ -46,10 +46,10 @@ TEST(type_prop, scatter_elements_update_output_partial_dyn_shape) PartialShape updates_shape{2, 2, Dimension::dynamic()}; PartialShape axis_shape = PartialShape::dynamic(); - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape); auto scatter = make_shared(data, indices, updates, axis); @@ -63,10 +63,10 @@ TEST(type_prop, scatter_elements_update_output_full_dyn_shape) PartialShape updates_shape = PartialShape::dynamic(); PartialShape axis_shape = PartialShape::dynamic(); - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape); auto scatter = make_shared(data, indices, updates, axis); @@ -80,10 +80,10 @@ TEST(type_prop, scatter_elements_update_axis_validation) Shape updates_shape{2, 2, 2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{8}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{8}); try { @@ -107,10 +107,10 @@ TEST(type_prop, scatter_elements_updates_indices_shape) Shape updates_shape{2, 2, 2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{1}); try { @@ -135,10 +135,10 @@ TEST(type_prop, scatter_elements_updates_indices_rank) Shape updates_shape{2, 2, 2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{1}); try { @@ -163,10 +163,10 @@ TEST(type_prop, scatter_elements_data_indices_rank) Shape updates_shape{2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{1}); try { diff --git a/ngraph/test/type_prop/scatter_nd_update.cpp b/ngraph/test/type_prop/scatter_nd_update.cpp index 06010fcb3787fc..a00baaa2610470 100644 --- a/ngraph/test/type_prop/scatter_nd_update.cpp +++ b/ngraph/test/type_prop/scatter_nd_update.cpp @@ -26,9 +26,9 @@ TEST(type_prop, scatter_nd_update_v3_fail_indices_element_type) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::f16, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::f16, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); @@ -51,9 +51,9 @@ TEST(type_prop, scatter_nd_update_v3_fail_updates_rank) Shape indices_shape{1}; Shape updates_shape{3, 3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); @@ -78,9 +78,9 @@ TEST(type_prop, scatter_nd_update_fail_updates_element_type) Shape indices_shape{1}; Shape updates_shape{3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::i32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::i32, updates_shape); try { auto G = make_shared(R, I, U); @@ -104,9 +104,9 @@ TEST(type_prop, scatter_nd_update_fail_updates_shape) Shape indices_shape{1}; Shape updates_shape{2, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); @@ -132,9 +132,9 @@ TEST(type_prop, scatter_nd_update_fail_indices_last_dim) Shape indices_shape{2, 4}; Shape updates_shape{2, 3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); diff --git a/ngraph/test/type_prop/scatter_update.cpp b/ngraph/test/type_prop/scatter_update.cpp index 4f113b22988427..3135ab79b38b40 100644 --- a/ngraph/test/type_prop/scatter_update.cpp +++ b/ngraph/test/type_prop/scatter_update.cpp @@ -26,10 +26,10 @@ TEST(type_prop, scatter_update_v3_fail_indices_element_type) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::f16, indices_shape); - auto U = make_shared(element::f32, updates_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::f16, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -52,10 +52,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_data_et_not_equal) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u32, updates_shape); - auto A = op::Constant::create(element::u32, Shape{1}, {1}); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u32, updates_shape); + auto A = op::Constant::create(element::Type_t::u32, Shape{1}, {1}); try { auto G = make_shared(R, I, U, A); @@ -78,10 +78,10 @@ TEST(type_prop, scatter_update_v3_fail_axis_element_type) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i16, ref_shape); - auto I = make_shared(element::u64, indices_shape); - auto U = make_shared(element::i16, updates_shape); - auto A = op::Constant::create(element::f32, Shape{1}, {1.5f}); + auto R = make_shared(element::Type_t::i16, ref_shape); + auto I = make_shared(element::Type_t::u64, indices_shape); + auto U = make_shared(element::Type_t::i16, updates_shape); + auto A = op::Constant::create(element::Type_t::f32, Shape{1}, {1.5f}); try { auto G = make_shared(R, I, U, A); @@ -104,10 +104,10 @@ TEST(type_prop, scatter_update_v3_fail_axis_shape) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::u8, ref_shape); - auto I = make_shared(element::u16, indices_shape); - auto U = make_shared(element::u8, updates_shape); - auto A = op::Constant::create(element::u8, Shape{2}, {1, 5}); + auto R = make_shared(element::Type_t::u8, ref_shape); + auto I = make_shared(element::Type_t::u16, indices_shape); + auto U = make_shared(element::Type_t::u8, updates_shape); + auto A = op::Constant::create(element::Type_t::u8, Shape{2}, {1, 5}); try { auto G = make_shared(R, I, U, A); @@ -130,10 +130,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_rank) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 1, 4}; - auto R = make_shared(element::f64, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::f64, updates_shape); - auto A = op::Constant::create(element::u8, Shape{}, {0}); + auto R = make_shared(element::Type_t::f64, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::f64, updates_shape); + auto A = op::Constant::create(element::Type_t::u8, Shape{}, {0}); try { auto G = make_shared(R, I, U, A); @@ -157,10 +157,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_axis) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::u64, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u64, updates_shape); - auto A = op::Constant::create(element::u16, Shape{}, {0}); + auto R = make_shared(element::Type_t::u64, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u64, updates_shape); + auto A = op::Constant::create(element::Type_t::u16, Shape{}, {0}); try { auto G = make_shared(R, I, U, A); @@ -185,10 +185,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_indices) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 3, 1, 4}; - auto R = make_shared(element::u32, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u32, updates_shape); - auto A = op::Constant::create(element::i32, Shape{}, {1}); + auto R = make_shared(element::Type_t::u32, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u32, updates_shape); + auto A = op::Constant::create(element::Type_t::i32, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -213,10 +213,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_data_before_axis) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{3, 2, 1, 4}; - auto R = make_shared(element::u16, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u16, updates_shape); - auto A = op::Constant::create(element::i8, Shape{}, {1}); + auto R = make_shared(element::Type_t::u16, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u16, updates_shape); + auto A = op::Constant::create(element::Type_t::i8, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -241,10 +241,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_data_after_axis) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 5}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::Type_t::i8, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::i8, updates_shape); + auto A = op::Constant::create(element::Type_t::i16, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -269,13 +269,13 @@ TEST(type_prop, scatter_update_v3) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::Type_t::i8, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::i8, updates_shape); + auto A = op::Constant::create(element::Type_t::i16, Shape{}, {1}); auto scatter_update = make_shared(R, I, U, A); - EXPECT_EQ(scatter_update->get_output_element_type(0), element::i8); + EXPECT_EQ(scatter_update->get_output_element_type(0), element::Type_t::i8); EXPECT_EQ(scatter_update->get_output_shape(0), ref_shape); } @@ -284,12 +284,12 @@ TEST(type_prop, scatter_update_v3_dynamic_data_shape) PartialShape ref_shape = PartialShape::dynamic(); Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::Type_t::i8, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::i8, updates_shape); + auto A = op::Constant::create(element::Type_t::i16, Shape{}, {1}); auto scatter_update = make_shared(R, I, U, A); - EXPECT_EQ(scatter_update->get_output_element_type(0), element::i8); + EXPECT_EQ(scatter_update->get_output_element_type(0), element::Type_t::i8); EXPECT_TRUE(scatter_update->get_output_partial_shape(0).is_dynamic()); } diff --git a/ngraph/test/type_prop/select.cpp b/ngraph/test/type_prop/select.cpp index e70cff09043ce0..c98f2e6dc711fa 100644 --- a/ngraph/test/type_prop/select.cpp +++ b/ngraph/test/type_prop/select.cpp @@ -25,19 +25,19 @@ using namespace ngraph; TEST(type_prop, select_deduce) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 4})); } TEST(type_prop, select_shape_mismatch_a) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{3, 5}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{3, 5}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -56,9 +56,9 @@ TEST(type_prop, select_shape_mismatch_a) TEST(type_prop, select_shape_mismatch_b) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{3, 5}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{3, 5}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -77,9 +77,9 @@ TEST(type_prop, select_shape_mismatch_b) TEST(type_prop, select_shape_mismatch_c) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{3, 5}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{3, 5}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -98,9 +98,9 @@ TEST(type_prop, select_shape_mismatch_c) TEST(type_prop, select_elem_mismatch_a) { - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -120,9 +120,9 @@ TEST(type_prop, select_elem_mismatch_a) TEST(type_prop, select_elem_mismatch_bc) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::i32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -142,21 +142,21 @@ TEST(type_prop, select_elem_mismatch_bc) TEST(type_prop, select_partial_all_rank_dynamic) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mismatch) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::i32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); try { @@ -177,78 +177,78 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mis TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg2_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_arg2_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::dynamic); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::dynamic); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_arg0_rank_dynamic_static_arg1_arg2_rank_dynamic_ok) { - auto param0 = - make_shared(element::boolean, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, + PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE( sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } TEST(type_prop, select_partial_arg1_rank_dynamic_static_arg0_arg2_rank_dynamic_ok) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, PartialShape::dynamic()); auto param1 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE( sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } TEST(type_prop, select_partial_arg2_rank_dynamic_static_arg0_arg1_rank_dynamic_ok) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE( sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } @@ -256,15 +256,15 @@ TEST(type_prop, select_partial_arg2_rank_dynamic_static_arg0_arg1_rank_dynamic_o TEST(type_prop, select_partial_all_rank_static_dynamic_ok) { auto param0 = make_shared( - element::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + element::Type_t::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); auto param1 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); auto param2 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3}); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).is_static()); ASSERT_EQ(sel->get_output_shape(0), (Shape{2, 8, 3})); } @@ -272,11 +272,11 @@ TEST(type_prop, select_partial_all_rank_static_dynamic_ok) TEST(type_prop, select_partial_all_rank_static_intransitive_incompatibility) { auto param0 = make_shared( - element::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + element::Type_t::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); auto param1 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); auto param2 = - make_shared(element::f32, PartialShape{3, Dimension::dynamic(), 3}); + make_shared(element::Type_t::f32, PartialShape{3, Dimension::dynamic(), 3}); try { @@ -331,43 +331,71 @@ TEST_P(DeduceV1SelectTest, output_shape) INSTANTIATE_TEST_CASE_P( type_prop, DeduceV1SelectTest, - ::testing::Values(SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NONE), - SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{}, {2, 4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{}, {4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::dynamic, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{}, {2, 4}, {4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{4}, {2, 4}, {4}, {2, 4}}, - {element::boolean, element::i8, element::dynamic, element::i8}, - op::AutoBroadcastType::NUMPY), - SelectParams({{4}, {4}, {2, 4}, {2, 4}}, - {element::dynamic, element::dynamic, element::i8, element::i8}, - op::AutoBroadcastType::NUMPY), - SelectParams({{2}, {2}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::dynamic, element::f32}, - {op::AutoBroadcastType::PDPD, 0}), - // TODO: Whats the right behavior here? - // SelectParams({{2}, {2, 4}, {2}, {2, 4}}, {element::boolean, element::f32, - // element::dynamic, element::f32}, {op::AutoBroadcastType::PDPD, 0}), - SelectParams({{4}, {4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::dynamic, element::f32}, - {op::AutoBroadcastType::PDPD, 1})), + ::testing::Values( + SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NONE), + SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{}, {2, 4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{}, {4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::dynamic, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{}, {2, 4}, {4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{4}, {2, 4}, {4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::i8, + element::Type_t::dynamic, + element::Type_t::i8}, + op::AutoBroadcastType::NUMPY), + SelectParams({{4}, {4}, {2, 4}, {2, 4}}, + {element::Type_t::dynamic, + element::Type_t::dynamic, + element::Type_t::i8, + element::Type_t::i8}, + op::AutoBroadcastType::NUMPY), + SelectParams({{2}, {2}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::dynamic, + element::Type_t::f32}, + {op::AutoBroadcastType::PDPD, 0}), + // TODO: Whats the right behavior here? + // SelectParams({{2}, {2, 4}, {2}, {2, 4}}, {element::Type_t::boolean, element::Type_t::f32, + // element::Type_t::dynamic, element::Type_t::f32}, {op::AutoBroadcastType::PDPD, 0}), + SelectParams({{4}, {4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::dynamic, + element::Type_t::f32}, + {op::AutoBroadcastType::PDPD, 1})), PrintToDummyParamName()); TEST(type_prop, select_v1_partial_shape) { - auto a = make_shared(element::boolean, PartialShape::dynamic()); - auto b = make_shared(element::f32, Shape{2, 4}); - auto c = make_shared(element::f32, Shape{2, 4}); + auto a = make_shared(element::Type_t::boolean, PartialShape::dynamic()); + auto b = make_shared(element::Type_t::f32, Shape{2, 4}); + auto c = make_shared(element::Type_t::f32, Shape{2, 4}); auto select = make_shared(a, b, c, op::AutoBroadcastType::NONE); ASSERT_EQ(select->get_shape(), (Shape{2, 4})); @@ -375,9 +403,11 @@ TEST(type_prop, select_v1_partial_shape) TEST(type_prop, select_v1_partial_shape_autob) { - auto a = make_shared(element::boolean, PartialShape{Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{Dimension::dynamic()}); - auto c = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); + auto a = + make_shared(element::Type_t::boolean, PartialShape{Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); + auto c = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic()}); auto select = make_shared(a, b, c); ASSERT_TRUE( @@ -386,9 +416,9 @@ TEST(type_prop, select_v1_partial_shape_autob) TEST(type_prop, select_v1_wrong_et) { - auto param0 = make_shared(element::i8, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 4}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::Type_t::i8, Shape{2, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { @@ -408,9 +438,9 @@ TEST(type_prop, select_v1_wrong_et) TEST(type_prop, select_v1_et_mismatch) { - auto param0 = make_shared(element::boolean, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 4}); - auto param2 = make_shared(element::i8, Shape{2, 4}); + auto param0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto param2 = make_shared(element::Type_t::i8, Shape{2, 4}); try { @@ -430,9 +460,9 @@ TEST(type_prop, select_v1_et_mismatch) TEST(type_prop, select_v1_shape_mismatch) { - auto param0 = make_shared(element::boolean, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 3}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 3}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { @@ -452,9 +482,10 @@ TEST(type_prop, select_v1_shape_mismatch) TEST(type_prop, select_v1_partial_shape_mismatch) { auto param0 = - make_shared(element::boolean, PartialShape{3, Dimension::dynamic()}); - auto param1 = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + make_shared(element::Type_t::boolean, PartialShape{3, Dimension::dynamic()}); + auto param1 = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic()}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { diff --git a/ngraph/test/type_prop/shape_of.cpp b/ngraph/test/type_prop/shape_of.cpp index 812b9771a22312..9ea09f6cc28c56 100644 --- a/ngraph/test/type_prop/shape_of.cpp +++ b/ngraph/test/type_prop/shape_of.cpp @@ -23,85 +23,85 @@ using namespace ngraph; TEST(type_prop, shape_of_v0) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_et_dynamic_v0) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_static_dynamic_v0) { auto a = make_shared( - element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); + element::Type_t::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_dynamic_v0) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_TRUE(so->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, shape_of_v3) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_et_dynamic_v3) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_static_dynamic_v3) { auto a = make_shared( - element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); + element::Type_t::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_dynamic_v3) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_TRUE(so->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, shape_of_output_type_v3) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto so = make_shared(a, element::i32); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto so = make_shared(a, element::Type_t::i32); try { - auto sx = make_shared(a, element::i8); + auto sx = make_shared(a, element::Type_t::i8); FAIL() << "Invalid output_type not detected"; } catch (NodeValidationFailure) @@ -113,7 +113,7 @@ TEST(type_prop, shape_of_output_type_v3) } try { - auto sx = make_shared(a, element::i16); + auto sx = make_shared(a, element::Type_t::i16); FAIL() << "Invalid output_type not detected"; } catch (NodeValidationFailure) @@ -125,7 +125,7 @@ TEST(type_prop, shape_of_output_type_v3) } try { - auto sx = make_shared(a, element::f32); + auto sx = make_shared(a, element::Type_t::f32); FAIL() << "Invalid output_type not detected"; } catch (NodeValidationFailure) @@ -136,6 +136,6 @@ TEST(type_prop, shape_of_output_type_v3) FAIL() << "Node validation error not thrown"; } - ASSERT_EQ(so->get_output_element_type(0), element::i32); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i32); ASSERT_EQ(so->get_shape(), Shape{4}); } diff --git a/ngraph/test/type_prop/shuffle_channels.cpp b/ngraph/test/type_prop/shuffle_channels.cpp index abef93c472b35c..ced139cea257e7 100644 --- a/ngraph/test/type_prop/shuffle_channels.cpp +++ b/ngraph/test/type_prop/shuffle_channels.cpp @@ -25,7 +25,7 @@ TEST(type_prop, shuffle_channels_axis_validation) { try { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); + const auto data = make_shared(element::Type_t::f64, Shape{1, 2, 3, 4}); const auto shuffle_channels = make_shared(data, -5, 5); FAIL() << "ShuffleChannels validation did not work. Op node was created with incorrect " "params."; @@ -40,7 +40,7 @@ TEST(type_prop, shuffle_channels_axis_validation) TEST(type_prop, shuffle_channels_negative_axis_calculation) { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); + const auto data = make_shared(element::Type_t::f64, Shape{1, 2, 3, 4}); const auto shuffle_channels = make_shared(data, -3, 2); @@ -51,7 +51,7 @@ TEST(type_prop, shuffle_channels_invalid_input_shape) { try { - const auto data = make_shared(element::f64, Shape{}); + const auto data = make_shared(element::Type_t::f64, Shape{}); const auto shuffle_channels = make_shared(data, 0, 1); FAIL() << "ShuffleChannels validation did not work. Op node was created with incorrect " "params."; @@ -67,7 +67,7 @@ TEST(type_prop, shuffle_channels_invalid_groups_value) { try { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 15}); + const auto data = make_shared(element::Type_t::f64, Shape{1, 2, 3, 15}); const auto shuffle_channels = make_shared(data, -1, 2); FAIL() << "ShuffleChannels validation did not work. Op node was created with incorrect " "params."; diff --git a/ngraph/test/type_prop/softmax.cpp b/ngraph/test/type_prop/softmax.cpp index e76761f061880e..728cb5a1b45d99 100644 --- a/ngraph/test/type_prop/softmax.cpp +++ b/ngraph/test/type_prop/softmax.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, softmax_default_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto sm = make_shared(arg); ASSERT_EQ(sm->get_axis(), 1); } @@ -31,7 +31,7 @@ TEST(type_prop, softmax_default_axis) TEST(type_prop, softmax_out_of_bound_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); // axis cannot be a negative number ASSERT_THROW(make_shared(arg, -1), ngraph::NodeValidationFailure); } diff --git a/ngraph/test/type_prop/softplus.cpp b/ngraph/test/type_prop/softplus.cpp index 7e40369209bc22..918f05d993c091 100644 --- a/ngraph/test/type_prop/softplus.cpp +++ b/ngraph/test/type_prop/softplus.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, softplus) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto softplus_func = make_shared(data); - EXPECT_EQ(softplus_func->get_element_type(), element::f32); + EXPECT_EQ(softplus_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(softplus_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, softplus_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto softplus_func = make_shared(data); - EXPECT_EQ(softplus_func->get_element_type(), element::f32); + EXPECT_EQ(softplus_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto softplus_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(softplus_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, softplus_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto softplus_func = make_shared(data); - EXPECT_EQ(softplus_func->get_element_type(), element::f32); + EXPECT_EQ(softplus_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/space_to_batch.cpp b/ngraph/test/type_prop/space_to_batch.cpp index cd40078a0143b0..2367ff250bbb8e 100644 --- a/ngraph/test/type_prop/space_to_batch.cpp +++ b/ngraph/test/type_prop/space_to_batch.cpp @@ -23,70 +23,75 @@ using namespace ngraph; TEST(type_prop, space_to_batch_output_shape_2D) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::Type_t::f32, Shape{2, 128}); + auto block_shape = + make_shared(element::Type_t::i64, Shape{2}, vector{1, 5}); + auto pads_begin = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 2}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 5, (128 + 2) / 5})); } TEST(type_prop, space_to_batch_output_shape_4D) { - auto data = make_shared(element::f32, Shape{2, 64, 64, 3}); + auto data = make_shared(element::Type_t::f32, Shape{2, 64, 64, 3}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 10, 5, 1}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 10 * 5, (64 + 3 + 3) / 10, (64 + 1) / 5, 3})); } TEST(type_prop, space_to_batch_output_shape_5D) { - auto data = make_shared(element::f32, Shape{2, 32, 64, 128, 256}); + auto data = make_shared(element::Type_t::f32, Shape{2, 32, 64, 128, 256}); auto block_shape = - make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + make_shared(element::Type_t::i32, Shape{5}, vector{1, 6, 5, 1, 16}); auto pads_begin = - make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 0, 0, 0}); auto pads_end = - make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 1, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16})); } TEST(type_prop, space_to_batch_and_batch_to_space) { - auto data = make_shared(element::f32, Shape{2, 100, 1024, 3}); + auto data = make_shared(element::Type_t::f32, Shape{2, 100, 1024, 3}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 12, 100, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 5, 38, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2})); auto batch_to_space = make_shared(space_to_batch, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3})); } diff --git a/ngraph/test/type_prop/space_to_depth.cpp b/ngraph/test/type_prop/space_to_depth.cpp index 9c0ded0a64bf56..6055fcd16d5680 100644 --- a/ngraph/test/type_prop/space_to_depth.cpp +++ b/ngraph/test/type_prop/space_to_depth.cpp @@ -23,47 +23,47 @@ using namespace ngraph; TEST(type_prop, space_to_depth_output_shape_block_first_4D) { - auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(A, mode, 8); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 128, 8, 8})); } TEST(type_prop, space_to_depth_output_shape_block_first_4D_2) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(A, mode, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 4, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_output_shape_depth_first_4D) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; auto space_to_depth = make_shared(A, mode, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 4, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_output_shape_depth_first_5D) { - auto A = make_shared(element::f32, Shape{1, 12, 4, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 4, 1080, 1616}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; auto space_to_depth = make_shared(A, mode, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_input_rank_not_supported) { - auto A = make_shared(element::f32, Shape{1, 8}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8}); try { auto space_to_depth = @@ -84,7 +84,7 @@ TEST(type_prop, space_to_depth_input_rank_not_supported) TEST(type_prop, space_to_depth_blocksize_not_matched) { - auto A = make_shared(element::f32, Shape{1, 3, 8, 7}); + auto A = make_shared(element::Type_t::f32, Shape{1, 3, 8, 7}); try { auto space_to_depth = diff --git a/ngraph/test/type_prop/split.cpp b/ngraph/test/type_prop/split.cpp index 8abbe593dcab87..0fffd7f96662ce 100644 --- a/ngraph/test/type_prop/split.cpp +++ b/ngraph/test/type_prop/split.cpp @@ -25,11 +25,11 @@ using namespace ngraph; TEST(type_prop, split) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto split = make_shared(data, axis, 7); FAIL() << "Split node was created with incorrect data."; } @@ -43,7 +43,7 @@ TEST(type_prop, split) try { - const auto axis = op::Constant::create(element::i64, Shape{}, {-5}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {-5}); const auto split = make_shared(data, axis, 4); // invalid axis FAIL() << "Split node was created with incorrect data."; } @@ -52,19 +52,19 @@ TEST(type_prop, split) EXPECT_HAS_SUBSTRING(error.what(), std::string("Parameter axis -5 out of the tensor rank")); } - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto split = make_shared(data, axis, 2); EXPECT_EQ(split->outputs().size(), 2); EXPECT_EQ(split->get_output_shape(0), (Shape{2, 3})); EXPECT_EQ(split->get_output_shape(1), (Shape{2, 3})); - EXPECT_EQ(split->get_output_element_type(0), element::i32); - EXPECT_EQ(split->get_output_element_type(1), element::i32); + EXPECT_EQ(split->get_output_element_type(0), element::Type_t::i32); + EXPECT_EQ(split->get_output_element_type(1), element::Type_t::i32); } TEST(type_prop, split_axis_must_be_scalar) { - const auto data = make_shared(element::i32, Shape{2, 6}); - const auto axis = op::Constant::create(element::i64, Shape{2}, {0, 1}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{2}, {0, 1}); try { @@ -84,15 +84,15 @@ TEST(type_prop, split_axis_must_be_scalar) TEST(type_prop, split_v1) { - const auto data = make_shared(element::f16, Shape{2, 3, 4}); - const auto axis = op::Constant::create(element::i64, {}, {1}); + const auto data = make_shared(element::Type_t::f16, Shape{2, 3, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); EXPECT_EQ(split->outputs().size(), num_splits); for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(split->get_output_element_type(i), element::f16); + EXPECT_EQ(split->get_output_element_type(i), element::Type_t::f16); EXPECT_EQ(split->get_output_shape(i), (Shape{2, 1, 4})); } } @@ -100,8 +100,8 @@ TEST(type_prop, split_v1) TEST(type_prop, split_v1_axis_const_data_axis_dim_known) { const auto data = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); - const auto axis = op::Constant::create(element::i32, {}, {1}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); + const auto axis = op::Constant::create(element::Type_t::i32, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -115,8 +115,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_known) TEST(type_prop, split_v1_axis_const_only_data_axis_dim_known) { const auto data = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); - const auto axis = op::Constant::create(element::i16, {}, {0}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + const auto axis = op::Constant::create(element::Type_t::i16, {}, {0}); const size_t num_splits = 2; const auto split = make_shared(data, axis, num_splits); @@ -130,9 +130,9 @@ TEST(type_prop, split_v1_axis_const_only_data_axis_dim_known) TEST(type_prop, split_v1_axis_const_data_axis_dim_unknown) { - const auto data = - make_shared(element::f32, PartialShape{4, Dimension::dynamic(), 3, 5}); - const auto axis = op::Constant::create(element::i8, {}, {1}); + const auto data = make_shared(element::Type_t::f32, + PartialShape{4, Dimension::dynamic(), 3, 5}); + const auto axis = op::Constant::create(element::Type_t::i8, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -146,8 +146,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_unknown) TEST(type_prop, split_v1_axis_const_only_data_rank_known) { - const auto data = make_shared(element::f32, PartialShape::dynamic(4)); - const auto axis = op::Constant::create(element::u64, {}, {1}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto axis = op::Constant::create(element::Type_t::u64, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -160,8 +160,8 @@ TEST(type_prop, split_v1_axis_const_only_data_rank_known) TEST(type_prop, split_v1_axis_not_const_only_data_rank_known) { - const auto data = make_shared(element::f32, PartialShape::dynamic(4)); - const auto axis = make_shared(element::u32, PartialShape{}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto axis = make_shared(element::Type_t::u32, PartialShape{}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -174,8 +174,8 @@ TEST(type_prop, split_v1_axis_not_const_only_data_rank_known) TEST(type_prop, split_v1_axis_const_data_rank_unknown) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = op::Constant::create(element::u16, {}, {2}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = op::Constant::create(element::Type_t::u16, {}, {2}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -188,8 +188,8 @@ TEST(type_prop, split_v1_axis_const_data_rank_unknown) TEST(type_prop, split_v1_axis_not_const_data_rank_unknown) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = make_shared(element::u8, PartialShape{}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = make_shared(element::Type_t::u8, PartialShape{}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -202,8 +202,8 @@ TEST(type_prop, split_v1_axis_not_const_data_rank_unknown) TEST(type_prop, split_v1_axis_dynamic_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = make_shared(element::u8, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = make_shared(element::Type_t::u8, PartialShape::dynamic()); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); diff --git a/ngraph/test/type_prop/squared_difference.cpp b/ngraph/test/type_prop/squared_difference.cpp index bdedbeb5ea824a..4646f41652a827 100644 --- a/ngraph/test/type_prop/squared_difference.cpp +++ b/ngraph/test/type_prop/squared_difference.cpp @@ -23,9 +23,9 @@ using namespace ngraph; TEST(type_prop, squared_difference) { - const auto x1 = make_shared(element::f64, Shape{2, 2}); - const auto x2 = make_shared(element::f64, Shape{3, 2}); - const auto x3 = make_shared(element::f64, Shape{1, 2}); + const auto x1 = make_shared(element::Type_t::f64, Shape{2, 2}); + const auto x2 = make_shared(element::Type_t::f64, Shape{3, 2}); + const auto x3 = make_shared(element::Type_t::f64, Shape{1, 2}); try { @@ -38,6 +38,6 @@ TEST(type_prop, squared_difference) } const auto clamp = make_shared(x1, x3); - EXPECT_EQ(clamp->get_element_type(), element::f64); + EXPECT_EQ(clamp->get_element_type(), element::Type_t::f64); EXPECT_EQ(clamp->get_shape(), (Shape{2, 2})); } diff --git a/ngraph/test/type_prop/squeeze.cpp b/ngraph/test/type_prop/squeeze.cpp index 78b813a57d91e2..7768589f450603 100644 --- a/ngraph/test/type_prop/squeeze.cpp +++ b/ngraph/test/type_prop/squeeze.cpp @@ -23,45 +23,47 @@ using namespace ngraph; TEST(type_prop, squeeze) { - auto param = make_shared(element::f32, Shape{1, 4, 1, 4, 1, 8}); + auto param = make_shared(element::Type_t::f32, Shape{1, 4, 1, 4, 1, 8}); auto axes_node = - make_shared(element::u64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{0, 2}); auto squeeze = make_shared(param, axes_node); - ASSERT_EQ(squeeze->get_element_type(), element::f32); + ASSERT_EQ(squeeze->get_element_type(), element::Type_t::f32); ASSERT_EQ(squeeze->get_shape(), (Shape{4, 4, 1, 8})); - axes_node = make_shared(element::u64, Shape{0}, vector{}); + axes_node = + make_shared(element::Type_t::u64, Shape{0}, vector{}); auto squeeze_default_axes = make_shared(param, axes_node); - ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32); + ASSERT_EQ(squeeze_default_axes->get_element_type(), element::Type_t::f32); ASSERT_EQ(squeeze_default_axes->get_shape(), (Shape{4, 4, 8})); } TEST(type_prop, squeeze_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic(6)); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic(6)); auto axes_node = - make_shared(element::u64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{0, 2}); auto squeeze = make_shared(param, axes_node); - ASSERT_EQ(squeeze->get_element_type(), element::f32); + ASSERT_EQ(squeeze->get_element_type(), element::Type_t::f32); EXPECT_TRUE(squeeze->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); - axes_node = make_shared(element::u64, Shape{0}, vector{}); + axes_node = + make_shared(element::Type_t::u64, Shape{0}, vector{}); auto squeeze_default_axes = make_shared(param, axes_node); - ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32); + ASSERT_EQ(squeeze_default_axes->get_element_type(), element::Type_t::f32); EXPECT_TRUE( squeeze_default_axes->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, squeeze_axes_invalid_value) { - auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto param = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto axes_node = - make_shared(element::u64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{0, 2}); try { diff --git a/ngraph/test/type_prop/strided_slice.cpp b/ngraph/test/type_prop/strided_slice.cpp index 77bfa280f386dc..968deff1e5ae99 100644 --- a/ngraph/test/type_prop/strided_slice.cpp +++ b/ngraph/test/type_prop/strided_slice.cpp @@ -25,9 +25,9 @@ using namespace ngraph; TEST(type_prop, strided_slice_begin_incorrect_type) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::f16, Shape{4}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::f16, Shape{4}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared( @@ -47,9 +47,9 @@ TEST(type_prop, strided_slice_begin_incorrect_type) TEST(type_prop, strided_slice_end_incorrect_type) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::boolean, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4}); + auto end = make_shared(element::Type_t::boolean, Shape{4}); try { auto strided_slice = make_shared( @@ -69,9 +69,9 @@ TEST(type_prop, strided_slice_end_incorrect_type) TEST(type_prop, strided_slice_incompatible_size_of_masks_attr) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared(data, @@ -96,9 +96,9 @@ TEST(type_prop, strided_slice_incompatible_size_of_masks_attr) TEST(type_prop, strided_slice_mask_incorrect_value) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4, 5}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4, 5}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared( @@ -119,9 +119,9 @@ TEST(type_prop, strided_slice_mask_incorrect_value) TEST(type_prop, strided_slice_begin_incorrect_shape) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4, 5}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4, 5}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared( @@ -141,9 +141,9 @@ TEST(type_prop, strided_slice_begin_incorrect_shape) TEST(type_prop, strided_slice_end_incorrect_shape) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::i64, Shape{4, 5}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4}); + auto end = make_shared(element::Type_t::i64, Shape{4, 5}); try { auto strided_slice = make_shared( @@ -163,9 +163,9 @@ TEST(type_prop, strided_slice_end_incorrect_shape) TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, PartialShape::dynamic()); - auto end = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto end = make_shared(element::Type_t::i64, Shape{2}); auto strided_slice = make_shared( data, begin, end, vector{0, 0}, vector{0, 0}); @@ -173,7 +173,7 @@ TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) try { - end = make_shared(element::i64, PartialShape::dynamic()); + end = make_shared(element::Type_t::i64, PartialShape::dynamic()); strided_slice = make_shared( data, begin, end, vector{0, 0}, vector{0, 0}); // Should have thrown, so fail if it didn't @@ -191,10 +191,11 @@ TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) TEST(type_prop, strided_slice_reverse_out_of_bounds) { - auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 4, 5}); - auto begin = op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {100}); - auto end = op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {-100}); - auto stride = op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {-1}); + auto data = + std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{3, 4, 5}); + auto begin = op::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape{3}, {100}); + auto end = op::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape{3}, {-100}); + auto stride = op::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape{3}, {-1}); std::vector begin_mask = {0, 0, 0, 0}; std::vector end_mask = {0, 0, 0, 0}; diff --git a/ngraph/test/type_prop/swish.cpp b/ngraph/test/type_prop/swish.cpp index 6611009e8d94d3..b9091a5364a5a2 100644 --- a/ngraph/test/type_prop/swish.cpp +++ b/ngraph/test/type_prop/swish.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, swish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto swish_func = make_shared(data); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(swish_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, swish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto swish_func = make_shared(data); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( swish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto swish_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(swish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, swish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto swish_func = make_shared(data); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( swish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(swish_func->get_output_partial_shape(0).rank().is_static()); @@ -55,8 +57,8 @@ TEST(type_prop, swish_partial_static_rank) TEST(type_prop, swish_incompatible_types) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f16, Shape{}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::Type_t::f16, Shape{}); try { const auto swish_func = make_shared(data, beta); @@ -70,8 +72,8 @@ TEST(type_prop, swish_incompatible_types) TEST(type_prop, swish_beta_not_scalar) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::Type_t::f32, Shape{1}); try { const auto swish_func = make_shared(data, beta); @@ -85,11 +87,11 @@ TEST(type_prop, swish_beta_not_scalar) TEST(type_prop, swish_2_inputs) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::Type_t::f32, Shape{}); const auto swish_func = make_shared(data, beta); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(swish_func->get_output_partial_shape(0).same_scheme(data->get_output_shape(0))); ASSERT_TRUE(swish_func->get_output_partial_shape(0).rank().is_static()); } diff --git a/ngraph/test/type_prop/ti.cpp b/ngraph/test/type_prop/ti.cpp index 6fdc2241c00eea..c2c26b51587bd8 100644 --- a/ngraph/test/type_prop/ti.cpp +++ b/ngraph/test/type_prop/ti.cpp @@ -30,20 +30,20 @@ TEST(type_prop, tensor_iterator_lstm) const size_t L = 10; // Sequence length const size_t I = 8; // Input size const size_t H = 32; // Hidden size - auto SENT = make_shared(element::f32, Shape{N, L, I}); + auto SENT = make_shared(element::Type_t::f32, Shape{N, L, I}); - auto H_init = make_shared(element::f32, Shape{N, 1, H}); - auto C_init = make_shared(element::f32, Shape{N, 1, H}); + auto H_init = make_shared(element::Type_t::f32, Shape{N, 1, H}); + auto C_init = make_shared(element::Type_t::f32, Shape{N, 1, H}); - auto W = make_shared(element::f32, Shape{4 * H, I}); - auto R = make_shared(element::f32, Shape{4 * H, H}); - auto H_t = make_shared(element::f32, Shape{N, 1, H}); - auto C_t = make_shared(element::f32, Shape{N, 1, H}); + auto W = make_shared(element::Type_t::f32, Shape{4 * H, I}); + auto R = make_shared(element::Type_t::f32, Shape{4 * H, H}); + auto H_t = make_shared(element::Type_t::f32, Shape{N, 1, H}); + auto C_t = make_shared(element::Type_t::f32, Shape{N, 1, H}); // Body - auto X = make_shared(element::f32, Shape{N, 1, I}); - auto W_body = make_shared(element::f32, Shape{4 * H, I}); - auto R_body = make_shared(element::f32, Shape{4 * H, H}); + auto X = make_shared(element::Type_t::f32, Shape{N, 1, I}); + auto W_body = make_shared(element::Type_t::f32, Shape{4 * H, I}); + auto R_body = make_shared(element::Type_t::f32, Shape{4 * H, H}); auto LSTM_cell = make_shared(builder::opset1::reshape(X, Shape{N, I}), builder::opset1::reshape(H_t, Shape{N, H}), builder::opset1::reshape(C_t, Shape{N, H}), @@ -77,15 +77,15 @@ TEST(type_prop, tensor_iterator_lstm) TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto M = make_shared(element::f32, Shape{32, 2, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 2, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, Shape{32, 2, 10}); - auto Yi = make_shared(element::f32, Shape{32, 2, 10}); - auto M_body = make_shared(element::f32, Shape{32, 2, 10}); + auto Xi = make_shared(element::Type_t::f32, Shape{32, 2, 10}); + auto Yi = make_shared(element::Type_t::f32, Shape{32, 2, 10}); + auto M_body = make_shared(element::Type_t::f32, Shape{32, 2, 10}); // Body auto Zo = (Xi + Yi) * M_body; @@ -121,15 +121,15 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto M = make_shared(element::f32, Shape{32, 2, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 2, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); // Body auto Zo = (Xi + Yi) * M_body; diff --git a/ngraph/test/type_prop/tile.cpp b/ngraph/test/type_prop/tile.cpp index e3c9a30b95ac0c..8dfcadcbc45659 100644 --- a/ngraph/test/type_prop/tile.cpp +++ b/ngraph/test/type_prop/tile.cpp @@ -23,27 +23,27 @@ using namespace ngraph; TEST(type_prop, tile) { - auto param0 = make_shared(element::f32, Shape{6, 8, 10}); - auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 8, 10}); + auto param1 = op::Constant::create(element::Type_t::i64, Shape{3}, {3, 4, 1}); auto top = make_shared(param0, param1); - ASSERT_EQ(top->get_element_type(), element::f32); + ASSERT_EQ(top->get_element_type(), element::Type_t::f32); ASSERT_EQ(top->get_shape(), (Shape{18, 32, 10})); } TEST(type_prop, tile_small_data_rank) { - auto param0 = make_shared(element::f32, Shape{8, 10}); - auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{8, 10}); + auto param1 = op::Constant::create(element::Type_t::i64, Shape{3}, {3, 4, 1}); auto top = make_shared(param0, param1); - ASSERT_EQ(top->get_element_type(), element::f32); + ASSERT_EQ(top->get_element_type(), element::Type_t::f32); ASSERT_EQ(top->get_shape(), (Shape{3, 32, 10})); } TEST(type_prop, tile_few_repeats) { - auto param0 = make_shared(element::f32, Shape{6, 8, 10}); - auto param1 = op::Constant::create(element::i64, Shape{2}, {4, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 8, 10}); + auto param1 = op::Constant::create(element::Type_t::i64, Shape{2}, {4, 1}); auto top = make_shared(param0, param1); - ASSERT_EQ(top->get_element_type(), element::f32); + ASSERT_EQ(top->get_element_type(), element::Type_t::f32); ASSERT_EQ(top->get_shape(), (Shape{6, 32, 10})); } diff --git a/ngraph/test/type_prop/top_k.cpp b/ngraph/test/type_prop/top_k.cpp index 644b60bac137b9..bde74878601570 100644 --- a/ngraph/test/type_prop/top_k.cpp +++ b/ngraph/test/type_prop/top_k.cpp @@ -31,8 +31,8 @@ TYPED_TEST_CASE_P(topk_type_prop); TYPED_TEST_P(topk_type_prop, topk_negative_axis_support) { const auto data_shape = Shape{1, 2, 3, 4}; - const auto data = make_shared(element::f32, data_shape); - const auto k = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto k = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const int64_t axis = -2; const auto topk = make_shared(data, k, axis, "max", "value"); @@ -46,8 +46,8 @@ TYPED_TEST_P(topk_type_prop, topk_negative_axis_support) TYPED_TEST_P(topk_type_prop, topk_negative_axis_dynamic_rank) { const auto data_shape = PartialShape::dynamic(); - const auto data = make_shared(element::f32, data_shape); - const auto k = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto k = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const int64_t axis = -2; const auto topk = make_shared(data, k, axis, "max", "value"); @@ -68,14 +68,14 @@ TYPED_TEST_P(topk_type_prop, topk_negative_axis_dynamic_rank) TYPED_TEST_P(topk_type_prop, topk_v1_partial_ouptut) { auto data_shape = PartialShape{2, 10}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); { - auto k = make_shared(element::i32, PartialShape({})); + auto k = make_shared(element::Type_t::i32, PartialShape({})); auto topk = make_shared(data, k, 1, "max", "value"); EXPECT_EQ(topk->get_output_partial_shape(0), PartialShape({2, -1})); } { - auto k = make_shared(element::i32, Shape{}, 3); + auto k = make_shared(element::Type_t::i32, Shape{}, 3); auto topk = make_shared(data, k, 1, "max", "value"); EXPECT_EQ(topk->get_output_shape(0), Shape({2, 3})); EXPECT_EQ(topk->get_output_partial_shape(0), PartialShape({2, 3})); @@ -86,18 +86,18 @@ TYPED_TEST_P(topk_type_prop, topk_rank_static_k_unknown) { const int64_t axis = 1; const auto data_shape = Shape{1, 10, 100}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); { - const auto k = make_shared(element::i32, PartialShape({})); + const auto k = make_shared(element::Type_t::i32, PartialShape({})); const auto topk = make_shared(data, k, axis, "max", "value"); const PartialShape fully_dynamic_axis_shape{1, Dimension::dynamic(), 100}; EXPECT_EQ(topk->get_output_partial_shape(0), fully_dynamic_axis_shape); } { - const auto k = make_shared(element::i64, Shape{}, 5); - const auto convert_k = make_shared(k, element::i32); + const auto k = make_shared(element::Type_t::i64, Shape{}, 5); + const auto convert_k = make_shared(k, element::Type_t::i32); const auto topk = make_shared(data, convert_k, axis, "max", "value"); const PartialShape ranged_dynamic_axis_shape{1, Dimension{5, 10}, 100}; diff --git a/ngraph/test/type_prop/transpose.cpp b/ngraph/test/type_prop/transpose.cpp index ae57978fe7f781..e4cf09085099fc 100644 --- a/ngraph/test/type_prop/transpose.cpp +++ b/ngraph/test/type_prop/transpose.cpp @@ -23,30 +23,32 @@ using namespace ngraph; TEST(type_prop, transpose_arg_static_input_order_static_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, Shape{4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::i64, Shape{4}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_static_input_order_constant_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = op::Constant::create(element::i64, Shape{4}, vector{2, 1, 0, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = + op::Constant::create(element::Type_t::i64, Shape{4}, vector{2, 1, 0, 3}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape{6, 4, 2, 8})); } TEST(type_prop, transpose_arg_static_input_order_constant_invalid_perm) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = op::Constant::create(element::i64, Shape{4}, vector{2, 9, 0, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = + op::Constant::create(element::Type_t::i64, Shape{4}, vector{2, 9, 0, 3}); try { @@ -68,76 +70,79 @@ TEST(type_prop, transpose_arg_static_input_order_constant_invalid_perm) TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_ok) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, Shape{4}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::Type_t::i64, Shape{4}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_ok) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto input_order = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape::dynamic()); + auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto input_order = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_dynamic_ok) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape::dynamic()); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{2, 2}); + auto arg = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape{2, 2}); try { @@ -156,9 +161,9 @@ TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector) TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); + auto arg = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); auto input_order = - make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{2, Dimension::dynamic()}); try { @@ -177,8 +182,8 @@ TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{5}); + auto arg = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape{5}); try { @@ -200,8 +205,8 @@ TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size) TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_input_order_not_vector) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{2, 2}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape{2, 2}); try { @@ -222,9 +227,9 @@ TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_input_order_not_vector) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); auto input_order = - make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{2, Dimension::dynamic()}); try { @@ -243,9 +248,9 @@ TEST(type_prop, TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto input_order = - make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{2, Dimension::dynamic()}); try { @@ -264,19 +269,19 @@ TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input TEST(type_prop, transpose_input_order_et_dynamic_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::dynamic, Shape{4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::dynamic, Shape{4}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_input_order_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::boolean, Shape{4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::boolean, Shape{4}); try { @@ -296,11 +301,12 @@ TEST(type_prop, transpose_input_order_et_wrong) TEST(type_prop, transpose_with_empty_order) { - auto arg = make_shared(element::f32, Shape{1, 300}); - auto input_order = make_shared(element::i64, Shape({0}), std::vector()); + auto arg = make_shared(element::Type_t::f32, Shape{1, 300}); + auto input_order = + make_shared(element::Type_t::i64, Shape({0}), std::vector()); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape({300, 1}))); } diff --git a/ngraph/test/type_prop/unary_elementwise.cpp b/ngraph/test/type_prop/unary_elementwise.cpp index 1cbddb4d3ada05..aa60efcbd094a7 100644 --- a/ngraph/test/type_prop/unary_elementwise.cpp +++ b/ngraph/test/type_prop/unary_elementwise.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, unary_arithmetic_bad_argument_element_types) { - auto tv0_2_4_param = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param = make_shared(element::Type_t::boolean, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param); diff --git a/ngraph/test/type_prop/unsqueeze.cpp b/ngraph/test/type_prop/unsqueeze.cpp index 484a60b0ea1e0e..b49e14ae22746e 100644 --- a/ngraph/test/type_prop/unsqueeze.cpp +++ b/ngraph/test/type_prop/unsqueeze.cpp @@ -23,23 +23,23 @@ using namespace ngraph; TEST(type_prop, unsqueeze) { - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto param = make_shared(element::Type_t::f32, Shape{4, 1, 4, 1, 8}); auto axes_node = - make_shared(element::u64, Shape{2}, vector{1, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{1, 2}); auto unsqueeze = make_shared(param, axes_node); - ASSERT_EQ(unsqueeze->get_element_type(), element::f32); + ASSERT_EQ(unsqueeze->get_element_type(), element::Type_t::f32); ASSERT_EQ(unsqueeze->get_shape(), (Shape{4, 1, 1, 1, 4, 1, 8})); } TEST(type_prop, unsqueeze_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic(5)); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic(5)); auto axes_node = - make_shared(element::u64, Shape{2}, vector{1, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{1, 2}); auto unsqueeze = make_shared(param, axes_node); - ASSERT_EQ(unsqueeze->get_element_type(), element::f32); + ASSERT_EQ(unsqueeze->get_element_type(), element::Type_t::f32); EXPECT_TRUE( unsqueeze->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 1, diff --git a/ngraph/test/type_prop/variadic_split.cpp b/ngraph/test/type_prop/variadic_split.cpp index 15da2bbcd18fe1..63cf9f4fdaf98b 100644 --- a/ngraph/test/type_prop/variadic_split.cpp +++ b/ngraph/test/type_prop/variadic_split.cpp @@ -23,44 +23,44 @@ using namespace ngraph; TEST(type_prop, variadic_split) { - const auto data = make_shared(element::i32, Shape{2, 6}); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {2, 4}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 4}); const auto split = make_shared(data, axis, splits); EXPECT_EQ(split->outputs().size(), 2); EXPECT_EQ(split->get_output_shape(0), (Shape{2, 2})); EXPECT_EQ(split->get_output_shape(1), (Shape{2, 4})); - EXPECT_EQ(split->get_output_element_type(0), element::i32); - EXPECT_EQ(split->get_output_element_type(1), element::i32); + EXPECT_EQ(split->get_output_element_type(0), element::Type_t::i32); + EXPECT_EQ(split->get_output_element_type(1), element::Type_t::i32); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 6}), - op::Constant::create(element::i64, Shape{}, {-2}), - op::Constant::create(element::i64, Shape{3}, {7, -1, 2})) + make_shared(element::Type_t::i32, Shape{12, 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {-2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {7, -1, 2})) ->output(1) .get_shape(), (Shape{3, 6})); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 6}), - op::Constant::create(element::i64, Shape{}, {-2}), - op::Constant::create(element::i64, Shape{3}, {-1, 7, 2})) + make_shared(element::Type_t::i32, Shape{12, 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {-2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {-1, 7, 2})) ->output(0) .get_shape(), (Shape{3, 6})); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 1, 6}), - op::Constant::create(element::i64, Shape{1}, {2}), - op::Constant::create(element::i64, Shape{3}, {3, 1, 2})) + make_shared(element::Type_t::i32, Shape{12, 1, 6}), + op::Constant::create(element::Type_t::i64, Shape{1}, {2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {3, 1, 2})) ->output(2) .get_shape(), (Shape{12, 1, 2})); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 6}), - op::Constant::create(element::i64, Shape{1}, {1}), - op::Constant::create(element::i64, Shape{2}, {6, 0})) + make_shared(element::Type_t::i32, Shape{12, 6}), + op::Constant::create(element::Type_t::i64, Shape{1}, {1}), + op::Constant::create(element::Type_t::i64, Shape{2}, {6, 0})) ->output(1) .get_shape(), (Shape{12, 0})); @@ -68,12 +68,13 @@ TEST(type_prop, variadic_split) TEST(type_prop, variadic_split_splits_rank) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{1, 2}, {2, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = + op::Constant::create(element::Type_t::i64, Shape{1, 2}, {2, 4}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -86,12 +87,12 @@ TEST(type_prop, variadic_split_splits_rank) TEST(type_prop, variadic_split_incorrect_sum) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {1, 6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 6}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -105,12 +106,12 @@ TEST(type_prop, variadic_split_incorrect_sum) TEST(type_prop, variadic_split_incorrect_axis) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {-5}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {2, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {-5}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 4}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -123,12 +124,12 @@ TEST(type_prop, variadic_split_incorrect_axis) TEST(type_prop, variadic_split_splits_invalid_negative) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {-2, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {-2, 4}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -141,12 +142,13 @@ TEST(type_prop, variadic_split_splits_invalid_negative) TEST(type_prop, variadic_split_splits_multiple_negatives) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{3}, {-1, -1, 3}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = + op::Constant::create(element::Type_t::i64, Shape{3}, {-1, -1, 3}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -161,9 +163,9 @@ TEST(type_prop, variadic_split_shape_partially_dynamic) { // Variadic split shape {12,?} into {7,?}, {3,?} and {2,?} auto var_split1 = make_shared( - make_shared(element::i32, PartialShape{12, Dimension()}), - op::Constant::create(element::i64, Shape{}, {-2}), - op::Constant::create(element::i64, Shape{3}, {7, -1, 2})); + make_shared(element::Type_t::i32, PartialShape{12, Dimension()}), + op::Constant::create(element::Type_t::i64, Shape{}, {-2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {7, -1, 2})); EXPECT_TRUE( var_split1->get_output_partial_shape(0).same_scheme(PartialShape{7, Dimension::dynamic()})); @@ -174,9 +176,9 @@ TEST(type_prop, variadic_split_shape_partially_dynamic) // Variadic split shape {?,?,6} into {?,?,3}, {?,?,1} and {?,?,2} auto var_split2 = make_shared( - make_shared(element::i32, PartialShape{Dimension(), Dimension(), 6}), - op::Constant::create(element::i64, Shape{}, {2}), - op::Constant::create(element::i64, Shape{3}, {3, 1, 2})); + make_shared(element::Type_t::i32, PartialShape{Dimension(), Dimension(), 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {3, 1, 2})); EXPECT_TRUE(var_split2->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3})); @@ -187,9 +189,9 @@ TEST(type_prop, variadic_split_shape_partially_dynamic) // Variadic split shape {?,6} into {?,6}, and {?,0} auto var_split3 = make_shared( - make_shared(element::i32, PartialShape{Dimension(), 6}), - op::Constant::create(element::i64, Shape{}, {1}), - op::Constant::create(element::i64, Shape{2}, {6, 0})); + make_shared(element::Type_t::i32, PartialShape{Dimension(), 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {1}), + op::Constant::create(element::Type_t::i64, Shape{2}, {6, 0})); EXPECT_TRUE( var_split3->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 6})); diff --git a/ngraph/test/type_prop_layers.cpp b/ngraph/test/type_prop_layers.cpp index 159c84a56bc92d..2841deb129dcc9 100644 --- a/ngraph/test/type_prop_layers.cpp +++ b/ngraph/test/type_prop_layers.cpp @@ -33,19 +33,19 @@ using namespace ngraph; TEST(type_prop_layers, ctc_greedy_decoder) { - auto input = make_shared(element::f32, Shape{88, 2, 48}); - auto seq_len = make_shared(element::f32, Shape{88, 2}); + auto input = make_shared(element::Type_t::f32, Shape{88, 2, 48}); + auto seq_len = make_shared(element::Type_t::f32, Shape{88, 2}); auto op = make_shared(input, seq_len, false); ASSERT_EQ(op->get_shape(), (Shape{2, 88, 1, 1})); } TEST(type_prop_layers, detection_output) { - auto box_logits = make_shared(element::f32, Shape{4, 1, 5, 5}); - auto class_preds = make_shared(element::f32, Shape{2, 1, 4, 5}); - auto proposals = make_shared(element::f32, Shape{2, 1, 4, 5}); - auto aux_class_preds = make_shared(element::f32, Shape{2, 1, 4, 5}); - auto aux_box_preds = make_shared(element::f32, Shape{2, 1, 4, 5}); + auto box_logits = make_shared(element::Type_t::f32, Shape{4, 1, 5, 5}); + auto class_preds = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); + auto proposals = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); + auto aux_class_preds = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); + auto aux_box_preds = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); op::DetectionOutputAttrs attrs; attrs.keep_top_k = {200}; auto op = make_shared( @@ -55,9 +55,9 @@ TEST(type_prop_layers, detection_output) TEST(type_prop_layers, interpolate) { - auto image = make_shared(element::f32, Shape{2, 2, 33, 65}); - auto dyn_output_shape = make_shared(element::i64, Shape{2}); - auto output_shape = op::v0::Constant::create(element::i64, Shape{2}, {15, 30}); + auto image = make_shared(element::Type_t::f32, Shape{2, 2, 33, 65}); + auto dyn_output_shape = make_shared(element::Type_t::i64, Shape{2}); + auto output_shape = op::v0::Constant::create(element::Type_t::i64, Shape{2}, {15, 30}); op::v0::InterpolateAttrs attrs; attrs.axes = {2, 3}; @@ -80,8 +80,8 @@ TEST(type_prop_layers, prior_box1) attrs.min_size = {2.0f, 3.0f}; attrs.aspect_ratio = {1.5f, 2.0f, 2.5f}; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {32, 32}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {32, 32}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pb = make_shared(layer_shape, image_shape, attrs); ASSERT_EQ(pb->get_shape(), (Shape{2, 20480})); } @@ -93,8 +93,8 @@ TEST(type_prop_layers, prior_box2) attrs.aspect_ratio = {1.5f, 2.0f, 2.5f}; attrs.flip = true; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {32, 32}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {32, 32}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pb = make_shared(layer_shape, image_shape, attrs); ASSERT_EQ(pb->get_shape(), (Shape{2, 32768})); } @@ -108,8 +108,8 @@ TEST(type_prop_layers, prior_box3) attrs.flip = true; attrs.scale_all_sizes = true; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {1, 1}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 1}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pb = make_shared(layer_shape, image_shape, attrs); ASSERT_EQ(pb->get_shape(), (Shape{2, 16})); } @@ -120,8 +120,8 @@ TEST(type_prop_layers, prior_box_clustered) attrs.widths = {4.0f, 2.0f, 3.2f}; attrs.heights = {1.0f, 2.0f, 1.1f}; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {19, 19}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {19, 19}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pbc = make_shared(layer_shape, image_shape, attrs); // Output shape - 4 * 19 * 19 * 3 (attrs.widths.size()) ASSERT_EQ(pbc->get_shape(), (Shape{2, 4332})); @@ -129,21 +129,21 @@ TEST(type_prop_layers, prior_box_clustered) TEST(type_prop_layers, region_yolo1) { - auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 125, 13, 13}); auto op = make_shared(inputs, 0, 0, 0, true, std::vector{}, 0, 1); ASSERT_EQ(op->get_shape(), (Shape{1 * 125, 13, 13})); } TEST(type_prop_layers, region_yolo2) { - auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 125, 13, 13}); auto op = make_shared(inputs, 0, 0, 0, true, std::vector{}, 0, 2); ASSERT_EQ(op->get_shape(), (Shape{1 * 125 * 13, 13})); } TEST(type_prop_layers, region_yolo3) { - auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 125, 13, 13}); auto op = make_shared(inputs, 4, 80, 1, false, std::vector{6, 7, 8}, 0, -1); ASSERT_EQ(op->get_shape(), (Shape{1, (80 + 4 + 1) * 3, 13, 13})); @@ -151,23 +151,23 @@ TEST(type_prop_layers, region_yolo3) TEST(type_prop_layers, reorg_yolo) { - auto inputs = make_shared(element::f32, Shape{2, 24, 34, 62}); + auto inputs = make_shared(element::Type_t::f32, Shape{2, 24, 34, 62}); auto op = make_shared(inputs, Strides{2}); ASSERT_EQ(op->get_shape(), (Shape{2, 96, 17, 31})); } TEST(type_prop_layers, psroi_pooling) { - auto inputs = make_shared(element::f32, Shape{1, 3, 4, 5}); - auto coords = make_shared(element::f32, Shape{150, 5}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 3, 4, 5}); + auto coords = make_shared(element::Type_t::f32, Shape{150, 5}); auto op = make_shared(inputs, coords, 2, 6, 0.0625, 0, 0, "Avg"); ASSERT_EQ(op->get_shape(), (Shape{150, 2, 6, 6})); } TEST(type_prop_layers, roi_pooling) { - auto inputs = make_shared(element::f32, Shape{2, 3, 4, 5}); - auto coords = make_shared(element::f32, Shape{150, 5}); - auto op = make_shared(inputs, coords, Shape{6, 6}, 0.0625, "Max"); + auto inputs = make_shared(element::Type_t::f32, Shape{2, 3, 4, 5}); + auto coords = make_shared(element::Type_t::f32, Shape{150, 5}); + auto op = make_shared(inputs, coords, Shape{6, 6}, 0.0625, "max"); ASSERT_EQ(op->get_shape(), (Shape{150, 3, 6, 6})); } diff --git a/ngraph/test/util.cpp b/ngraph/test/util.cpp index a85ab16921e45c..d24bafd31dfe80 100644 --- a/ngraph/test/util.cpp +++ b/ngraph/test/util.cpp @@ -147,15 +147,15 @@ TEST(util, all_close) auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, Shape{2, 3}); - auto b = backend->create_tensor(element::f32, Shape{2, 3}); + auto a = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(a, test::NDArray({{1, 2, 3}, {3, 4, 5}}).get_vector()); copy_data(b, test::NDArray({{1, 2, 3}, {3, 4, 5}}).get_vector()); EXPECT_TRUE(ngraph::test::all_close(a, b)); - auto c = backend->create_tensor(element::f32, Shape{2, 3}); + auto c = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(c, test::NDArray({{1.1f, 2, 3}, {3, 4, 5}}).get_vector()); EXPECT_FALSE(ngraph::test::all_close(c, a, 0, .05f)); @@ -171,9 +171,9 @@ class CloneTest : public ::testing::Test public: // (A + B) * C Shape shape = Shape{2, 2}; - std::shared_ptr A = make_shared(element::f32, shape); - std::shared_ptr B = make_shared(element::f32, shape); - std::shared_ptr C = make_shared(element::f32, shape); + std::shared_ptr A = make_shared(element::Type_t::f32, shape); + std::shared_ptr B = make_shared(element::Type_t::f32, shape); + std::shared_ptr C = make_shared(element::Type_t::f32, shape); std::shared_ptr AplusB = A + B; std::shared_ptr AplusBtimesC = AplusB * C; @@ -233,7 +233,7 @@ TEST_F(CloneTest, clone_nodes_full) TEST_F(CloneTest, clone_nodes_partial) { // map A -> A' prior to clone - auto Aprime = make_shared(element::f32, shape); + auto Aprime = make_shared(element::Type_t::f32, shape); node_map[A.get()] = Aprime; auto cloned_nodes = clone_nodes(nodes, node_map); @@ -252,9 +252,9 @@ TEST_F(CloneTest, clone_function_full) TEST(graph_util, clone_multiple_results) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto A_add_B = make_shared(A, B); auto A_add_B_mul_C = make_shared(A_add_B, C); @@ -296,7 +296,7 @@ TEST(graph_util, get_subgraph_outputs_trivial_tests) ASSERT_EQ(outputs.size(), 0); Shape shape{}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto absn = make_shared(A); auto neg_absn = make_shared(absn); outputs = ngraph::get_subgraph_outputs(NodeVector{A}, NodeVector{}); @@ -308,7 +308,7 @@ TEST(graph_util, get_subgraph_outputs_trivial_tests) outputs = ngraph::get_subgraph_outputs(NodeVector{A, absn}, NodeVector{}); ASSERT_EQ(outputs, (NodeVector{absn})); - auto B = make_shared(element::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto abs_b = make_shared(B); auto neg_b = make_shared(B); auto abs_b_neg = make_shared(abs_b); @@ -334,9 +334,9 @@ TEST(graph_util, get_subgraph_outputs_trivial_tests) TEST(graph_util, test_subgraph_topological_sort) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto add = A + B; auto mul = C * add; auto result = make_shared(mul); @@ -348,9 +348,9 @@ TEST(graph_util, test_subgraph_topological_sort) TEST(graph_util, test_subgraph_topological_sort_control_dependencies) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto D = make_shared(A); auto E = make_shared(B); auto add = A + B; @@ -511,7 +511,7 @@ TEST(graph, huge) { std::vector> weak_nodes; { - auto param = make_shared(element::f32, Shape{3, 3}); + auto param = make_shared(element::Type_t::f32, Shape{3, 3}); std::shared_ptr n = param; weak_nodes.push_back(n); for (size_t i = 0; i < 1000000; i++) @@ -602,8 +602,8 @@ TEST(util, apply_permutation_pshape_rank_dynamic_inviable_permutation_fails) TEST(util, clone_function_friendly_name) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); A->set_friendly_name("A"); @@ -625,9 +625,9 @@ TEST(util, clone_function_friendly_name) TEST(util, clone_function_op_annotations) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B + C, ParameterVector{A, B, C}); auto cacheable_op_annotation = std::make_shared(); @@ -663,9 +663,9 @@ TEST(util, clone_function_op_annotations) TEST(util, topological_sort_replace) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B + C, ParameterVector{A, B, C}); bool custom_sorter_used = false; @@ -756,7 +756,7 @@ TEST(util_host_tensor_2_vector, ht_boolean_2_vec_bool) vector input{1, 0, 1, 0}; vector output{true, false, true, false}; host_tensor_2_vector_test( - input, output, element::boolean); + input, output, element::Type_t::boolean); } TEST(util_host_tensor_2_vector, ht_boolean_2_vec_int64) @@ -764,7 +764,7 @@ TEST(util_host_tensor_2_vector, ht_boolean_2_vec_int64) vector input{1, 0, 1, 0}; vector output{true, false, true, false}; host_tensor_2_vector_test( - input, output, element::boolean); + input, output, element::Type_t::boolean); } TEST(util_host_tensor_2_vector, ht_i8_2_vec_int64) @@ -774,7 +774,7 @@ TEST(util_host_tensor_2_vector, ht_i8_2_vec_int64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::i8); + input, output, element::Type_t::i8); } TEST(util_host_tensor_2_vector, ht_i16_2_vec_int64) @@ -784,7 +784,7 @@ TEST(util_host_tensor_2_vector, ht_i16_2_vec_int64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::i16); + input, output, element::Type_t::i16); } TEST(util_host_tensor_2_vector, ht_i32_2_vec_int64) @@ -794,7 +794,7 @@ TEST(util_host_tensor_2_vector, ht_i32_2_vec_int64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::i32); + input, output, element::Type_t::i32); } TEST(util_host_tensor_2_vector, ht_i64_2_vec_int64) @@ -803,7 +803,7 @@ TEST(util_host_tensor_2_vector, ht_i64_2_vec_int64) 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; vector output{input}; host_tensor_2_vector_test( - input, output, element::i64); + input, output, element::Type_t::i64); } TEST(util_host_tensor_2_vector, ht_bf16_2_vec_double) @@ -813,7 +813,7 @@ TEST(util_host_tensor_2_vector, ht_bf16_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::bf16); + input, output, element::Type_t::bf16); } TEST(util_host_tensor_2_vector, ht_f16_2_vec_double) @@ -823,7 +823,7 @@ TEST(util_host_tensor_2_vector, ht_f16_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::f16); + input, output, element::Type_t::f16); } TEST(util_host_tensor_2_vector, ht_f32_2_vec_double) @@ -832,7 +832,7 @@ TEST(util_host_tensor_2_vector, ht_f32_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::f32); + input, output, element::Type_t::f32); } TEST(util_host_tensor_2_vector, ht_f64_2_vec_double) @@ -842,7 +842,7 @@ TEST(util_host_tensor_2_vector, ht_f64_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::f64); + input, output, element::Type_t::f64); } TEST(util_host_tensor_2_vector, ht_u8_2_vec_uint64) @@ -852,7 +852,7 @@ TEST(util_host_tensor_2_vector, ht_u8_2_vec_uint64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::u8); + input, output, element::Type_t::u8); } TEST(util_host_tensor_2_vector, ht_u16_2_vec_uint64) @@ -862,7 +862,7 @@ TEST(util_host_tensor_2_vector, ht_u16_2_vec_uint64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::u16); + input, output, element::Type_t::u16); } TEST(util_host_tensor_2_vector, ht_u32_2_vec_uint64) @@ -872,7 +872,7 @@ TEST(util_host_tensor_2_vector, ht_u32_2_vec_uint64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::u32); + input, output, element::Type_t::u32); } TEST(util_host_tensor_2_vector, ht_u64_2_vec_uint64) @@ -881,5 +881,5 @@ TEST(util_host_tensor_2_vector, ht_u64_2_vec_uint64) 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; vector output{input}; host_tensor_2_vector_test( - input, output, element::u64); + input, output, element::Type_t::u64); } diff --git a/ngraph/test/util/engine/ie_engines.cpp b/ngraph/test/util/engine/ie_engines.cpp index d25f3459e3cb1c..bfe48177d81c9b 100644 --- a/ngraph/test/util/engine/ie_engines.cpp +++ b/ngraph/test/util/engine/ie_engines.cpp @@ -88,6 +88,9 @@ namespace case InferenceEngine::Precision::FP32: return compare_blobs(computed, expected, tolerance_bits); break; + case InferenceEngine::Precision::FP64: + return compare_blobs(computed, expected, tolerance_bits); + break; case InferenceEngine::Precision::I8: return compare_blobs(computed, expected, tolerance_bits); break; diff --git a/ngraph/test/util/test_tools.cpp b/ngraph/test/util/test_tools.cpp index 85adba4579f7fa..168fa8f975d3ea 100644 --- a/ngraph/test/util/test_tools.cpp +++ b/ngraph/test/util/test_tools.cpp @@ -62,12 +62,12 @@ bool validate_list(const vector>& nodes) shared_ptr make_test_graph() { - auto arg_0 = make_shared(element::f32, Shape{2, 2}); - auto arg_1 = make_shared(element::f32, Shape{2, 2}); - auto arg_2 = make_shared(element::f32, Shape{2, 2}); - auto arg_3 = make_shared(element::f32, Shape{2, 2}); - auto arg_4 = make_shared(element::f32, Shape{2, 2}); - auto arg_5 = make_shared(element::f32, Shape{2, 2}); + auto arg_0 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_1 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_2 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_3 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_4 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_5 = make_shared(element::Type_t::f32, Shape{2, 2}); auto t0 = make_shared(arg_0, arg_1); auto t1 = make_shared(t0, arg_2); @@ -141,47 +141,47 @@ void init_int_tv(ngraph::runtime::Tensor* tv, void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine) { element::Type et = tv->get_element_type(); - if (et == element::boolean) + if (et == element::Type_t::boolean) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::f32) + else if (et == element::Type_t::f32) { init_real_tv(tv, engine, numeric_limits::min(), 1.0f); } - else if (et == element::f64) + else if (et == element::Type_t::f64) { init_real_tv(tv, engine, numeric_limits::min(), 1.0); } - else if (et == element::i8) + else if (et == element::Type_t::i8) { init_int_tv(tv, engine, -1, 1); } - else if (et == element::i16) + else if (et == element::Type_t::i16) { init_int_tv(tv, engine, -1, 1); } - else if (et == element::i32) + else if (et == element::Type_t::i32) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::i64) + else if (et == element::Type_t::i64) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u8) + else if (et == element::Type_t::u8) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u16) + else if (et == element::Type_t::u16) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u32) + else if (et == element::Type_t::u32) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u64) + else if (et == element::Type_t::u64) { init_int_tv(tv, engine, 0, 1); } diff --git a/ngraph/test/util/visitor.hpp b/ngraph/test/util/visitor.hpp index f9a01cd07c60c2..c366a84beafd0d 100644 --- a/ngraph/test/util/visitor.hpp +++ b/ngraph/test/util/visitor.hpp @@ -333,7 +333,7 @@ namespace ngraph void on_adapter(const std::string& name, ValueAccessor& adapter) override { HostTensorPtr data = - std::make_shared(element::u8, Shape{adapter.size()}); + std::make_shared(element::Type_t::u8, Shape{adapter.size()}); data->write(adapter.get_ptr(), adapter.size()); m_values.insert(name, data); } diff --git a/openvino/conditional_compilation/CMakeLists.txt b/openvino/conditional_compilation/CMakeLists.txt index c6798daff2aea9..de5e1d9ace89d2 100644 --- a/openvino/conditional_compilation/CMakeLists.txt +++ b/openvino/conditional_compilation/CMakeLists.txt @@ -44,3 +44,6 @@ elseif(SELECTIVE_BUILD STREQUAL "ON") ov_force_include(${TARGET_NAME} INTERFACE ${GENERATED_HEADER}) endif() + +file(GLOB_RECURSE hdrs "${CMAKE_CURRENT_SOURCE_DIR}/include/*.h") +add_cpplint_target(${TARGET_NAME}_cpplint FOR_SOURCES ${hdrs}) diff --git a/openvino/conditional_compilation/include/openvino/cc/factory.h b/openvino/conditional_compilation/include/openvino/cc/factory.h index f971e0fc028db0..476155491d3ef2 100644 --- a/openvino/conditional_compilation/include/openvino/cc/factory.h +++ b/openvino/conditional_compilation/include/openvino/cc/factory.h @@ -21,124 +21,127 @@ #include #include -namespace openvino -{ - namespace cc - { - template - class Factory; - - template - class Factory { - Factory(Factory const&) = delete; - Factory& operator=(Factory const&) = delete; - - public: - using builder_t = std::function; - - Factory(const std::string & name) - : name(name) {} - - #ifdef SELECTIVE_BUILD - #define registerNodeIfRequired(Module, Name, key, Impl) \ - OV_CC_EXPAND(OV_CC_CAT(registerImpl, OV_CC_SCOPE_IS_ENABLED(OV_CC_CAT3(Module, _, Name)))(key)) - #define createNodeIfRegistered(Module, key, ...) createImpl(key, __VA_ARGS__) - - template - void registerImpl0(const Key &) { - } - - template - void registerImpl1(const Key & key) { - builders[key] = [](Args... args) -> T { - Impl *impl = new Impl(args...); - return static_cast(impl); - }; - } - - T createImpl(const Key & key, Args... args) { - auto builder = builders.find(key); - if (builder != builders.end()) { - return builder->second(args...); - } - return nullptr; - } - #elif defined(SELECTIVE_BUILD_ANALYZER) - #define registerNodeIfRequired(Module, Name, key, Impl) registerImpl(key, OV_CC_TOSTRING(Name)) - #define createNodeIfRegistered(Module, key, ...) createImpl(key, __VA_ARGS__) - - template - void registerImpl(const Key & key, const char *typeName) { - const std::string task_name = "REG$" + name + "$" + to_string(key) + "$" + typeName; - openvino::itt::ScopedTask task(openvino::itt::handle(task_name)); - builders[key] = [](Args... args) -> T { - Impl *impl = new Impl(args...); - return static_cast(impl); - }; - } - - template - T createImpl(const Key & key, Args... args) { - auto builder = builders.find(key); - if (builder != builders.end()) { - const std::string task_name = "CREATE$" + name + "$" + to_string(key); - openvino::itt::ScopedTask task(openvino::itt::handle(task_name)); - return builder->second(args...); - } - return nullptr; - } - #else - #define registerNodeIfRequired(Module, Name, key, Impl) registerImpl(key) - #define createNodeIfRegistered(Module, key, ...) createImpl(key, __VA_ARGS__) - - template - void registerImpl(const Key & key) { - builders[key] = [](Args... args) -> T { - Impl *impl = new Impl(args...); - return static_cast(impl); - }; - } - - T createImpl(const Key & key, Args... args) { - auto builder = builders.find(key); - if (builder != builders.end()) { - return builder->second(args...); - } - return nullptr; - } - #endif - - template - void foreach(Fn fn) const { - for (auto itm : builders) - fn(itm); - } - - size_t size() const noexcept { - return builders.size(); - } - - private: - const std::string & to_string(const std::string & str) const noexcept { - return str; - } - - template::value, bool>::type = true> - std::string to_string(V val) const { - return std::to_string(static_cast(val)); - } - - template::value, bool>::type = true> - std::string to_string(V val) const { - return std::to_string(val); - } - - using map_t = std::unordered_map; - - const std::string name; - map_t builders; +namespace openvino { +namespace cc { + +template +class Factory; + +template +class Factory { + Factory(Factory const&) = delete; + Factory& operator=(Factory const&) = delete; + +public: + using builder_t = std::function; + + Factory(const std::string & name) + : name(name) {} + +#ifdef SELECTIVE_BUILD + #define registerNodeIfRequired(Module, Name, key, Impl) \ + OV_CC_EXPAND(OV_CC_CAT(registerImpl, OV_CC_SCOPE_IS_ENABLED(OV_CC_CAT3(Module, _, Name)))(key)) + #define createNodeIfRegistered(Module, key, ...) createImpl(key, __VA_ARGS__) + + template + void registerImpl0(const Key &) { + } + + template + void registerImpl1(const Key & key) { + builders[key] = [](Args... args) -> T { + Impl *impl = new Impl(args...); + return static_cast(impl); + }; + } + + T createImpl(const Key & key, Args... args) { + auto builder = builders.find(key); + if (builder != builders.end()) { + return builder->second(args...); + } + return nullptr; + } + +#elif defined(SELECTIVE_BUILD_ANALYZER) + #define registerNodeIfRequired(Module, Name, key, Impl) registerImpl(key, OV_CC_TOSTRING(Name)) + #define createNodeIfRegistered(Module, key, ...) createImpl(key, __VA_ARGS__) + + template + void registerImpl(const Key & key, const char *typeName) { + const std::string task_name = "REG$" + name + "$" + to_string(key) + "$" + typeName; + openvino::itt::ScopedTask task(openvino::itt::handle(task_name)); + builders[key] = [](Args... args) -> T { + Impl *impl = new Impl(args...); + return static_cast(impl); }; } -} + + template + T createImpl(const Key & key, Args... args) { + auto builder = builders.find(key); + if (builder != builders.end()) { + const std::string task_name = "CREATE$" + name + "$" + to_string(key); + openvino::itt::ScopedTask task(openvino::itt::handle(task_name)); + return builder->second(args...); + } + return nullptr; + } + +#else + + #define registerNodeIfRequired(Module, Name, key, Impl) registerImpl(key) + #define createNodeIfRegistered(Module, key, ...) createImpl(key, __VA_ARGS__) + + template + void registerImpl(const Key & key) { + builders[key] = [](Args... args) -> T { + Impl *impl = new Impl(args...); + return static_cast(impl); + }; + } + + T createImpl(const Key & key, Args... args) { + auto builder = builders.find(key); + if (builder != builders.end()) { + return builder->second(args...); + } + return nullptr; + } +#endif + + template + void foreach(Fn fn) const { + for (auto itm : builders) + fn(itm); + } + + size_t size() const noexcept { + return builders.size(); + } + +private: + const std::string & to_string(const std::string & str) const noexcept { + return str; + } + + template::value, bool>::type = true> + std::string to_string(V val) const { + return std::to_string(static_cast(val)); + } + + template::value, bool>::type = true> + std::string to_string(V val) const { + return std::to_string(val); + } + + using map_t = std::unordered_map; + + const std::string name; + map_t builders; +}; + +} // namespace cc +} // namespace openvino diff --git a/openvino/conditional_compilation/include/openvino/cc/selective_build.h b/openvino/conditional_compilation/include/openvino/cc/selective_build.h index 3b9fe9dd66f7ed..0994a86419c789 100644 --- a/openvino/conditional_compilation/include/openvino/cc/selective_build.h +++ b/openvino/conditional_compilation/include/openvino/cc/selective_build.h @@ -70,50 +70,52 @@ #include #ifdef SELECTIVE_BUILD_ANALYZER -#include +# include #endif #include #include -namespace openvino -{ - namespace cc - { +namespace openvino { +namespace cc { + #ifndef SELECTIVE_BUILD_ANALYZER - namespace internal - { - template - struct case_wrapper { - using type = T; - const C value {}; - - case_wrapper(C && val) - : value(std::forward(val)) - {} - }; - - template - case_wrapper make_case_wrapper(C && val) { - return case_wrapper(std::forward(val)); - } - - template class Fn, typename Ctx, typename T, typename Case> - bool match(Ctx && ctx, T && val, Case && cs) { - const bool is_matched = val == cs.value; - if (is_matched) - Fn()(std::forward(ctx)); - return is_matched; - } - - template class Fn, typename Ctx, typename T, typename Case, typename ...Cases> - bool match(Ctx && ctx, T && val, Case && cs, Cases&&... cases) { - if (match(std::forward(ctx), std::forward(val), std::forward(cs))) - return true; - return match(std::forward(ctx), std::forward(val), std::forward(cases)...); - } - } // namespace internal -#endif + +namespace internal { + +template +struct case_wrapper { + using type = T; + const C value {}; + + case_wrapper(C && val) + : value(std::forward(val)) + {} +}; + +template +case_wrapper make_case_wrapper(C && val) { + return case_wrapper(std::forward(val)); +} + +template class Fn, typename Ctx, typename T, typename Case> +bool match(Ctx && ctx, T && val, Case && cs) { + const bool is_matched = val == cs.value; + if (is_matched) + Fn()(std::forward(ctx)); + return is_matched; +} + +template class Fn, typename Ctx, typename T, typename Case, typename ...Cases> +bool match(Ctx && ctx, T && val, Case && cs, Cases&&... cases) { + if (match(std::forward(ctx), std::forward(val), std::forward(cs))) + return true; + return match(std::forward(ctx), std::forward(val), std::forward(cases)...); +} + +} // namespace internal + +#endif // SELECTIVE_BUILD_ANALYZER // Macros for names concatenation #define OV_CC_CAT_(x, y) x ## y @@ -137,52 +139,53 @@ namespace openvino OV_ITT_DOMAIN(OV_CC_CAT(SWITCH_, Module)); /* Domain for switch/cases */ \ OV_ITT_DOMAIN(OV_CC_CAT(FACTORY_, Module)); /* Domain for factories */ -namespace internal -{ - template - struct case_wrapper { - using type = T; - const C value {}; - const char *name = nullptr; - - case_wrapper(C && val, const char *name) - : value(std::forward(val)) - , name(name) - {} - }; - - template - case_wrapper make_case_wrapper(C && val, const char *name) { - return case_wrapper(std::forward(val), name); - } +namespace internal { - template class Fn, - typename Ctx, - typename T, - typename Case> - bool match(char const *region, Ctx && ctx, T && val, Case && cs) { - const bool is_matched = val == cs.value; - if (is_matched) { - openvino::itt::ScopedTask task( - openvino::itt::handle( - std::string(region) + "$" + cs.name)); - Fn()(std::forward(ctx)); - } - return is_matched; - } +template +struct case_wrapper { + using type = T; + const C value {}; + const char *name = nullptr; + + case_wrapper(C && val, const char *name) + : value(std::forward(val)) + , name(name) + {} +}; + +template +case_wrapper make_case_wrapper(C && val, const char *name) { + return case_wrapper(std::forward(val), name); +} - template class Fn, - typename Ctx, - typename T, - typename Case, typename ...Cases> - bool match(char const *region, Ctx && ctx, T && val, Case && cs, Cases&&... cases) { - if (match(region, std::forward(ctx), std::forward(val), std::forward(cs))) - return true; - return match(region, std::forward(ctx), std::forward(val), std::forward(cases)...); +template class Fn, + typename Ctx, + typename T, + typename Case> +bool match(char const *region, Ctx && ctx, T && val, Case && cs) { + const bool is_matched = val == cs.value; + if (is_matched) { + openvino::itt::ScopedTask task( + openvino::itt::handle( + std::string(region) + "$" + cs.name)); + Fn()(std::forward(ctx)); } -} // namespace internal + return is_matched; +} + +template class Fn, + typename Ctx, + typename T, + typename Case, typename ...Cases> +bool match(char const *region, Ctx && ctx, T && val, Case && cs, Cases&&... cases) { + if (match(region, std::forward(ctx), std::forward(val), std::forward(cs))) + return true; + return match(region, std::forward(ctx), std::forward(val), std::forward(cases)...); +} + +} // namespace internal #define OV_SCOPE(Module, region, ...) \ OV_ITT_SCOPED_TASK(OV_CC_CAT(SIMPLE_, Module), OV_CC_TOSTRING(region)); \ @@ -261,5 +264,5 @@ namespace internal #endif - } -} +} // namespace cc +} // namespace openvino diff --git a/openvino/itt/CMakeLists.txt b/openvino/itt/CMakeLists.txt index 766521a13997fc..efbdf7f392bd08 100644 --- a/openvino/itt/CMakeLists.txt +++ b/openvino/itt/CMakeLists.txt @@ -18,7 +18,7 @@ set(TARGET_NAME itt) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") -file(GLOB_RECURSE SOURCES "src/*.cpp") +file(GLOB_RECURSE SOURCES "src/*.cpp" "src/*.hpp") if(ENABLE_PROFILING_ITT) if(DEFINED INTEL_VTUNE_DIR OR DEFINED ENV{INTEL_VTUNE_DIR}) @@ -71,3 +71,5 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") endif() target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) + +add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) diff --git a/openvino/itt/src/itt.cpp b/openvino/itt/src/itt.cpp index 92dff2b8ba9c61..9fe27e1d8f12d5 100644 --- a/openvino/itt/src/itt.cpp +++ b/openvino/itt/src/itt.cpp @@ -21,62 +21,59 @@ #include #endif -namespace openvino -{ - namespace itt - { - namespace internal - { +namespace openvino { +namespace itt { +namespace internal { + #ifdef ENABLE_PROFILING_ITT - static size_t callStackDepth() - { - static const char *env = std::getenv("OPENVINO_TRACE_DEPTH"); - static const size_t depth = env ? std::strtoul(env, nullptr, 10): 0; - return depth; - } - - static thread_local uint32_t call_stack_depth = 0; - - domain_t domain(char const* name) - { - return reinterpret_cast(__itt_domain_create(name)); - } - - handle_t handle(char const* name) - { - return reinterpret_cast(__itt_string_handle_create(name)); - } - - void taskBegin(domain_t d, handle_t t) - { - if (!callStackDepth() || call_stack_depth++ < callStackDepth()) - __itt_task_begin(reinterpret_cast<__itt_domain*>(d), - __itt_null, - __itt_null, - reinterpret_cast<__itt_string_handle*>(t)); - } - - void taskEnd(domain_t d) - { - if (!callStackDepth() || call_stack_depth-- > 0) - __itt_task_end(reinterpret_cast<__itt_domain*>(d)); - } - - void threadName(const char* name) - { - __itt_thread_set_name(name); - } -#else - domain_t domain(char const *) { return nullptr; } - handle_t handle(char const *) { return nullptr; } +static size_t callStackDepth() { + static const char *env = std::getenv("OPENVINO_TRACE_DEPTH"); + static const size_t depth = env ? std::strtoul(env, nullptr, 10): 0; + return depth; +} - void taskBegin(domain_t, handle_t) { } +static thread_local uint32_t call_stack_depth = 0; - void taskEnd(domain_t) { } +domain_t domain(char const* name) { + return reinterpret_cast(__itt_domain_create(name)); +} - void threadName(const char *) { } -#endif - } - } +handle_t handle(char const* name) { + return reinterpret_cast(__itt_string_handle_create(name)); +} + +void taskBegin(domain_t d, handle_t t) { + if (!callStackDepth() || call_stack_depth++ < callStackDepth()) + __itt_task_begin(reinterpret_cast<__itt_domain*>(d), + __itt_null, + __itt_null, + reinterpret_cast<__itt_string_handle*>(t)); +} + +void taskEnd(domain_t d) { + if (!callStackDepth() || call_stack_depth-- > 0) + __itt_task_end(reinterpret_cast<__itt_domain*>(d)); +} + +void threadName(const char* name) { + __itt_thread_set_name(name); } + +#else + +domain_t domain(char const *) { return nullptr; } + +handle_t handle(char const *) { return nullptr; } + +void taskBegin(domain_t, handle_t) { } + +void taskEnd(domain_t) { } + +void threadName(const char *) { } + +#endif // ENABLE_PROFILING_ITT + +} // namespace internal +} // namespace itt +} // namespace openvino diff --git a/scripts/setupvars/setupvars.bat b/scripts/setupvars/setupvars.bat index 4ba1b0dffa83ff..21ee3663aa232e 100644 --- a/scripts/setupvars/setupvars.bat +++ b/scripts/setupvars/setupvars.bat @@ -38,11 +38,23 @@ set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\model_optimizer;%PATH%" :: Inference Engine set "InferenceEngine_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\share" set "HDDL_INSTALL_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\hddl" -set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\bin;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Release;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%PATH%" +set "OPENMP_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\omp\lib" +set "GNA_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\gna\lib" + +set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Release;%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\Debug;%HDDL_INSTALL_DIR%\bin;%OPENMP_DIR%;%GNA_DIR%;%PATH%" +if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ( +set ARCH_ROOT_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions +) if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ( set ARCH_ROOT_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\bin\intel64\arch_descriptions ) +:: TBB +if exist %INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb ( +set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\bin;%PATH%" +set "TBB_DIR=%INTEL_OPENVINO_DIR%\deployment_tools\inference_engine\external\tbb\cmake" +) + :: nGraph if exist %INTEL_OPENVINO_DIR%\deployment_tools\ngraph ( set "PATH=%INTEL_OPENVINO_DIR%\deployment_tools\ngraph\lib;%PATH%" diff --git a/scripts/setupvars/setupvars.sh b/scripts/setupvars/setupvars.sh index 03ec42b22d0179..02cd4c5934ebd6 100755 --- a/scripts/setupvars/setupvars.sh +++ b/scripts/setupvars/setupvars.sh @@ -51,10 +51,10 @@ if [ -e "$INSTALLDIR/deployment_tools/inference_engine" ]; then export HDDL_INSTALL_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/hddl if [[ "$OSTYPE" == "darwin"* ]]; then - export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${IE_PLUGINS_PATH}${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} - export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:${IE_PLUGINS_PATH}${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} + export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_mac/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} else - export LD_LIBRARY_PATH=$HDDL_INSTALL_DIR/lib:$INSTALLDIR/deployment_tools/inference_engine/external/gna/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_lnx/lib:$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export LD_LIBRARY_PATH=$HDDL_INSTALL_DIR/lib:$INSTALLDIR/deployment_tools/inference_engine/external/omp/lib:$INSTALLDIR/deployment_tools/inference_engine/external/gna/lib:$INSTALLDIR/deployment_tools/inference_engine/external/mkltiny_lnx/lib:${IE_PLUGINS_PATH}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} fi HDDL_UNITE_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/hddl_unite @@ -64,6 +64,14 @@ if [ -e "$INSTALLDIR/deployment_tools/inference_engine" ]; then fi fi +if [ -e "$INSTALLDIR/deployment_tools/inference_engine/external/tbb" ]; then + if [[ "$OSTYPE" == "darwin"* ]]; then + export DYLD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${DYLD_LIBRARY_PATH:+:DYLD_LIBRARY_PATH} + fi + export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/lib:${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} + export TBB_DIR=$INSTALLDIR/deployment_tools/inference_engine/external/tbb/cmake +fi + if [ -e "$INSTALLDIR/deployment_tools/ngraph" ]; then export LD_LIBRARY_PATH=$INSTALLDIR/deployment_tools/ngraph/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} export ngraph_DIR=$INSTALLDIR/deployment_tools/ngraph/cmake diff --git a/tests/time_tests/test_runner/.automation/tgl_test_config.yml b/tests/time_tests/test_runner/.automation/tgl_test_config.yml index 8b1a35fc4ac9cf..30df4972514cac 100644 --- a/tests/time_tests/test_runner/.automation/tgl_test_config.yml +++ b/tests/time_tests/test_runner/.automation/tgl_test_config.yml @@ -365,28 +365,28 @@ - device: name: CPU model: - path: ${VPUX_MODELS_PKG}/squeezenet1.1/tf/FP16/squeezenet1.1.xml + path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16/squeezenet1.1.xml name: squeezenet1.1 precision: FP16 framework: caffe2 - device: name: GPU model: - path: ${VPUX_MODELS_PKG}/squeezenet1.1/tf/FP16/squeezenet1.1.xml + path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16/squeezenet1.1.xml name: squeezenet1.1 precision: FP16 framework: caffe2 - device: name: CPU model: - path: ${VPUX_MODELS_PKG}/squeezenet1.1/tf/FP16-INT8/squeezenet1.1.xml + path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16-INT8/squeezenet1.1.xml name: squeezenet1.1 precision: FP16-INT8 framework: caffe2 - device: name: GPU model: - path: ${VPUX_MODELS_PKG}/squeezenet1.1/tf/FP16-INT8/squeezenet1.1.xml + path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16-INT8/squeezenet1.1.xml name: squeezenet1.1 precision: FP16-INT8 framework: caffe2