diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 0e0df7af27b3fa..e153030e8b692a 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -52,6 +52,8 @@ jobs: displayName: 'System info' - script: | + echo TargetBranch: $(System.PullRequest.TargetBranch) + echo SourceBranch: $(Build.SourceBranch) rm -rf $(WORK_DIR) ; mkdir $(WORK_DIR) rm -rf $(BUILD_DIR) ; mkdir $(BUILD_DIR) displayName: 'Make dir' @@ -90,7 +92,17 @@ jobs: - task: CMake@1 inputs: # CMake must get Python 3.x version by default - cmakeArgs: -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR) + cmakeArgs: > + -GNinja + -DVERBOSE_BUILD=ON + -DENABLE_TEMPLATE_PLUGIN=ON + -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) + -DENABLE_PYTHON=ON + -DPYTHON_EXECUTABLE=/usr/bin/python3.6 + -DENABLE_TESTS=ON + -DENABLE_FASTER_BUILD=ON + -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules + $(REPO_DIR) workingDirectory: $(BUILD_DIR) - script: ninja @@ -132,6 +144,10 @@ jobs: displayName: 'IE FuncTests' continueOnError: false + - script: $(BIN_DIR)/templateFuncTests --gtest_filter=*smoke* --gtest_output=xml:TEST-templateFuncTests.xml + displayName: 'TEMPLATE FuncTests' + continueOnError: false + - script: $(BIN_DIR)/cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml displayName: 'CPU FuncTests' continueOnError: false diff --git a/.ci/azure/linux_ngraph_onnx.yml b/.ci/azure/linux_ngraph_onnx.yml index f993670f98c95b..c6e363d7c99f19 100644 --- a/.ci/azure/linux_ngraph_onnx.yml +++ b/.ci/azure/linux_ngraph_onnx.yml @@ -64,7 +64,7 @@ jobs: - task: CMake@1 inputs: # CMake must get Python 3.x version by default - cmakeArgs: -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_VPU=OFF -DENABLE_GNA=OFF -DENABLE_OPENCV=OFF -DENABLE_CPPLINT=OFF -DENABLE_TESTS=OFF -DENABLE_BEH_TESTS=OFF -DENABLE_FUNCTIONAL_TESTS=OFF -DENABLE_MKL_DNN=ON -DENABLE_CLDNN=OFF -DENABLE_PROFILING_ITT=OFF -DENABLE_SAMPLES=OFF -DENABLE_SPEECH_DEMO=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_INTERPRETER_ENABLE=ON -DNGRAPH_DEBUG_ENABLE=OFF -DNGRAPH_DYNAMIC_COMPONENTS_ENABLE=ON -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) $(REPO_DIR) + cmakeArgs: -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_VPU=OFF -DENABLE_GNA=OFF -DENABLE_OPENCV=OFF -DENABLE_CPPLINT=OFF -DENABLE_TESTS=OFF -DENABLE_MKL_DNN=ON -DENABLE_CLDNN=OFF -DENABLE_PROFILING_ITT=OFF -DENABLE_SAMPLES=OFF -DENABLE_SPEECH_DEMO=OFF -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3.6 -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_INTERPRETER_ENABLE=ON -DNGRAPH_DEBUG_ENABLE=OFF -DNGRAPH_DYNAMIC_COMPONENTS_ENABLE=ON -DCMAKE_INSTALL_PREFIX=$(INSTALL_DIR) $(REPO_DIR) workingDirectory: $(BUILD_DIR) enabled: false diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index efd5afba0a0a9d..14ba507980da83 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -32,6 +32,7 @@ jobs: BIN_DIR: $(REPO_DIR)\bin\intel64 MSVS_VARS_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat MSVC_COMPILER_PATH: C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\MSVC\14.24.28314\bin\Hostx64\x64\cl.exe + TEST_ENV_PATH: $(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.1\opencv\bin;%PATH% steps: - script: | @@ -78,19 +79,14 @@ jobs: displayName: 'Install dependencies' - script: | - certutil -urlcache -split -f https://incredibuilddiag1wu2.blob.core.windows.net/incredibuild/IBSetupConsole_9_5_0.exe IBSetupConsole_9_5_0.exe - call IBSetupConsole_9_5_0.exe /Install /Components=Agent,oneuse /Coordinator=11.1.0.4 /AGENT:OPENFIREWALL=ON /AGENT:AUTOSELECTPORTS=ON /ADDTOPATH=ON /AGENT:INSTALLADDINS=OFF + certutil -urlcache -split -f https://incredibuilddiag1wu2.blob.core.windows.net/incredibuild/install_ib_console.bat install_ib_console.bat + call install_ib_console.bat workingDirectory: $(WORK_DIR) displayName: 'Install IncrediBuild' - - script: | - echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent - reg add HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Xoreax\IncrediBuild\Builder /f /v LastEnabled /d 0 && echo Start IncrediBuild_Agent && net start IncrediBuild_Agent - displayName: 'Start IncrediBuild' - - script: | set PATH=$(WORK_DIR)\ninja-win;%PATH% - call "$(MSVS_VARS_PATH)" && cmake -GNinja -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR) + call "$(MSVS_VARS_PATH)" && cmake -GNinja -DENABLE_FASTER_BUILD=ON -DENABLE_TEMPLATE_PLUGIN=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_TESTS=ON -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)\modules -DCMAKE_C_COMPILER:PATH="$(MSVC_COMPILER_PATH)" -DCMAKE_CXX_COMPILER:PATH="$(MSVC_COMPILER_PATH)" $(REPO_DIR) workingDirectory: $(BUILD_DIR) displayName: 'CMake' @@ -103,65 +99,72 @@ jobs: - script: echo Stop IncrediBuild_Agent && net stop IncrediBuild_Agent displayName: Stop IncrediBuild continueOnError: true + - script: dir $(REPO_DIR)\bin\ /s displayName: 'List files' - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml displayName: 'nGraph UT' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml displayName: 'IE UT old' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\ieUnitTests --gtest_output=xml:TEST-ieUnitTests.xml displayName: 'IE UT' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\cpuUnitTests --gtest_output=xml:TEST-cpuUnitTests.xml displayName: 'CPU UT' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\gnaUnitTests --gtest_output=xml:TEST-gnaUnitTests.xml displayName: 'GNA UT' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\vpuUnitTests --gtest_output=xml:TEST-vpuUnitTests.xml displayName: 'VPU UT' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\onnxImporterUnitTests --gtest_output=xml:TEST-onnxImporterUnitTests.xml displayName: 'ONNX Importer UT' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\ieFuncTests --gtest_output=xml:TEST-ieFuncTests.xml displayName: 'IE FuncTests' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) + $(BIN_DIR)\templateFuncTests --gtest_output=xml:TEST-templateFuncTests.xml + displayName: 'TEMPLATE FuncTests' + continueOnError: false + + - script: | + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\cpuFuncTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-cpuFuncTests.xml displayName: 'CPU FuncTests' continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;%PATH% + set PATH=$(TEST_ENV_PATH) $(BIN_DIR)\MklDnnBehaviorTests --gtest_output=xml:TEST-MklDnnBehaviorTests.xml displayName: 'MklDnnBehaviorTests' continueOnError: false @@ -169,7 +172,7 @@ jobs: # Add for gtest-parallel, it hangs now (CVS-33386) #python $(BUILD_DIR)\gtest-parallel\gtest-parallel $(BIN_DIR)\MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH% + set PATH=$(TEST_ENV_PATH) set DATA_PATH=$(MODELS_PATH) set MODELS_PATH=$(MODELS_PATH) $(BIN_DIR)\MklDnnFunctionalTests --gtest_filter=*smoke* --gtest_print_time=1 --gtest_output=xml:TEST-MklDnnFunctionalTests.xml @@ -177,7 +180,7 @@ jobs: continueOnError: false - script: | - set PATH=$(REPO_DIR)\inference-engine\temp\tbb\bin;$(REPO_DIR)\inference-engine\temp\opencv_4.5.0\opencv\bin;%PATH% + set PATH=$(TEST_ENV_PATH) set DATA_PATH=$(MODELS_PATH) set MODELS_PATH=$(MODELS_PATH) $(BIN_DIR)\InferenceEngineCAPITests --gtest_output=xml:TEST-InferenceEngineCAPITests.xml diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile index 954b1634ed2a23..5fe36e46219061 100644 --- a/.ci/openvino-onnx/Dockerfile +++ b/.ci/openvino-onnx/Dockerfile @@ -57,8 +57,6 @@ RUN cmake .. \ -DENABLE_OPENCV=OFF \ -DENABLE_CPPLINT=OFF \ -DENABLE_TESTS=OFF \ - -DENABLE_BEH_TESTS=OFF \ - -DENABLE_FUNCTIONAL_TESTS=OFF \ -DENABLE_MKL_DNN=ON \ -DENABLE_CLDNN=OFF \ -DENABLE_PROFILING_ITT=OFF \ diff --git a/.ci/openvino-onnx/Jenkinsfile b/.ci/openvino-onnx/Jenkinsfile index 147b73afdccb5e..b16a134553a35d 100644 --- a/.ci/openvino-onnx/Jenkinsfile +++ b/.ci/openvino-onnx/Jenkinsfile @@ -77,7 +77,14 @@ def gitSubmoduleUpdate(String repository_name) { } } +def updateModels() { + sh """ + ./ngraph/python/tests/test_onnx/model_zoo_preprocess.sh -d ${HOME}/ONNX_CI/data -o + """ +} + def buildDockerImage() { + updateModels() sh """ docker build --tag=${DOCKER_IMAGE_TAG} --file=.ci/openvino-onnx/Dockerfile \ --build-arg http_proxy=http://proxy-chain.intel.com:911/ \ @@ -88,12 +95,12 @@ def buildDockerImage() { def runTests() { sh """ docker run --name ${DOCKER_CONTAINER_NAME} \ - --volume ${HOME}/ONNX_CI/onnx-models-28-Oct/.onnx/model_zoo:/root/.onnx/model_zoo \ - --volume ${HOME}/ONNX_CI/onnx-models/.onnx/model_zoo/MSFT:/root/.onnx/model_zoo/MSFT \ + --volume ${HOME}/ONNX_CI/data/model_zoo:/root/.onnx/model_zoo \ ${DOCKER_IMAGE_TAG} """ } + pipeline { agent { label "OpenVino" @@ -104,7 +111,7 @@ pipeline { } options { skipDefaultCheckout true - timeout(activity: true, time: 10, unit: 'MINUTES') + timeout(activity: true, time: 60, unit: 'MINUTES') } stages { stage("Clone repository") { @@ -118,14 +125,14 @@ pipeline { } stage("Prepare Docker environment") { steps{ - dir("${WORKDIR}") { + dir("${WORKDIR}") { buildDockerImage() } } } stage("Run tests") { options { - timeout(time: 15, unit: 'MINUTES') + timeout(time: 60, unit: 'MINUTES') } steps{ runTests() diff --git a/CMakeLists.txt b/CMakeLists.txt index d6bf93044b9ce5..fa0b9ac9119e84 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,23 +2,23 @@ # SPDX-License-Identifier: Apache-2.0 # -cmake_minimum_required(VERSION 3.13 FATAL_ERROR) +cmake_minimum_required(VERSION 3.13) project(OpenVINO) set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine) -list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake") -include(CTest) -include(features) +find_package(IEDevScripts REQUIRED + PATHS "${OpenVINO_MAIN_SOURCE_DIR}/cmake/developer_package" + NO_CMAKE_FIND_ROOT_PATH + NO_DEFAULT_PATH) -# include developer package -include(developer_package) +include(CTest) +include(cmake/features.cmake) # These options are shared with 3rdparty plugins by means of developer package -include(check_features) -include(dependencies) +include(cmake/dependencies.cmake) # resolving dependencies for the project message (STATUS "PROJECT ............................... " ${PROJECT_NAME}) @@ -30,8 +30,11 @@ message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID message (STATUS "CMAKE_BUILD_TYPE ...................... " ${CMAKE_BUILD_TYPE}) # remove file with exported developer targets to force its regeneration -file(REMOVE "${CMAKE_BINARY_DIR}/targets_developer.cmake") -file(REMOVE "${CMAKE_BINARY_DIR}/targets.cmake") +file(REMOVE "${CMAKE_BINARY_DIR}/inference_engine_targets.cmake") +foreach(component IN LISTS openvino_export_components) + file(REMOVE "${CMAKE_BINARY_DIR}/${component}_dev_targets.cmake") + unset(${component} CACHE) +endforeach() # # Build @@ -44,28 +47,25 @@ function(build_ngraph) endif() endfunction() - set(NGRAPH_BUILD_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} CACHE STRING "" FORCE) - set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${OpenVINO_MAIN_SOURCE_DIR}/ngraph/cmake/Modules/") - if (ENABLE_SANITIZER) - ngraph_set(NGRAPH_ADDRESS_SANITIZER TRUE) + ngraph_set(NGRAPH_ADDRESS_SANITIZER ON) else () - ngraph_set(NGRAPH_ADDRESS_SANITIZER FALSE) + ngraph_set(NGRAPH_ADDRESS_SANITIZER OFF) endif () - ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE FALSE) + ngraph_set(NGRAPH_PYTHON_BUILD_ENABLE OFF) if(ENABLE_TESTS AND NOT ANDROID) - ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE) + ngraph_set(NGRAPH_UNIT_TEST_ENABLE ON) else() - ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE) + ngraph_set(NGRAPH_UNIT_TEST_ENABLE OFF) endif() if(NOT (ANDROID OR WINDOWS_STORE OR (MSVC AND (ARM OR AARCH64)) )) - ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE) + ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE ON) else() - ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE FALSE) + ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE OFF) endif() - ngraph_set(NGRAPH_INTERPRETER_ENABLE TRUE) + ngraph_set(NGRAPH_INTERPRETER_ENABLE ON) if(TREAT_WARNING_AS_ERROR) ngraph_set(NGRAPH_WARNINGS_AS_ERRORS ON) @@ -73,12 +73,6 @@ function(build_ngraph) ngraph_set(NGRAPH_WARNINGS_AS_ERRORS OFF) endif() - if(COVERAGE) - ngraph_set(NGRAPH_CODE_COVERAGE_ENABLE ON) - else() - ngraph_set(NGRAPH_CODE_COVERAGE_ENABLE OFF) - endif() - if(ENABLE_SANITIZER) ngraph_set(NGRAPH_ADDRESS_SANITIZER_ENABLE ON) else() @@ -94,10 +88,7 @@ function(build_ngraph) if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") ie_add_compiler_flags(-Wno-error=uninitialized -Wno-error=literal-conversion) elseif(UNIX) - ie_add_compiler_flags(-Wno-error=maybe-uninitialized -Wno-error=return-type -fPIC) - endif() - if(ANDROID) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=defaulted-function-deleted -Wno-error=unused-command-line-argument") + ie_add_compiler_flags(-Wno-error=maybe-uninitialized -Wno-error=return-type) endif() # WA for GCC 7.0 @@ -119,37 +110,41 @@ function(build_ngraph) set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE) endfunction() -file(REMOVE "${CMAKE_BINARY_DIR}/openvino_targets_developer.cmake") +function(openvino_developer_export_targets) + cmake_parse_arguments(EXPORT "" "COMPONENT" "TARGETS" ${ARGN}) -unset(OpenVINODeveloperPackageTargets CACHE) + if(EXPORT_UNPARSED_ARGUMENTS) + message(FATAL_ERROR "openvino_developer_export_targets has unparsed arguments: ${EXPORT_UNPARSED_ARGUMENTS}") + endif() -function(openvino_developer_export_targets) - set(OpenVINODeveloperPackageTargets "${OpenVINODeveloperPackageTargets};${ARGV}") + set(${EXPORT_COMPONENT} "${${EXPORT_COMPONENT}};${EXPORT_TARGETS}") # to allow exporting of aliased targets with the original names - foreach(target_name ${OpenVINODeveloperPackageTargets}) + foreach(target_name IN LISTS ${EXPORT_COMPONENT}) if(TARGET "${target_name}") get_target_property(original_name ${target_name} ALIASED_TARGET) if(TARGET "${original_name}") message(STATUS "The name ${target_name} is an ALIAS for ${original_name}. " "It will be exported to the InferenceEngineDeveloperPackage with the original name.") - list(REMOVE_ITEM OpenVINODeveloperPackageTargets ${target_name}) - list(APPEND OpenVINODeveloperPackageTargets ${original_name}) + list(REMOVE_ITEM ${EXPORT_COMPONENT} ${target_name}) + list(APPEND ${EXPORT_COMPONENT} ${original_name}) endif() endif() endforeach() - list(REMOVE_DUPLICATES OpenVINODeveloperPackageTargets) - set(OpenVINODeveloperPackageTargets "${OpenVINODeveloperPackageTargets}" CACHE INTERNAL - "Paths to extra Inference Engine plugins" FORCE) + list(REMOVE_DUPLICATES ${EXPORT_COMPONENT}) + set(${EXPORT_COMPONENT} "${${EXPORT_COMPONENT}}" CACHE INTERNAL + "A list of OpenVINO ${EXPORT_COMPONENT} exported targets" FORCE) + + list(APPEND openvino_export_components ${EXPORT_COMPONENT}) + list(REMOVE_DUPLICATES openvino_export_components) + set(openvino_export_components "${openvino_export_components}" CACHE INTERNAL + "A list of OpenVINO exported components" FORCE) endfunction() add_subdirectory(openvino) - build_ngraph() - add_subdirectory(inference-engine) - add_subdirectory(model-optimizer) add_subdirectory(docs) diff --git a/Jenkinsfile b/Jenkinsfile index 49a10fd921a5b7..739f5cd99909ad 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -2,10 +2,10 @@ properties([ parameters([ - booleanParam(defaultValue: true, + booleanParam(defaultValue: false, description: 'Cancel the rest of parallel stages if one of them fails and return status immediately', name: 'failFast'), - booleanParam(defaultValue: false, + booleanParam(defaultValue: true, description: 'Whether to propagate commit status to GitHub', name: 'propagateStatus'), string(defaultValue: '', diff --git a/README.md b/README.md index 3fa6a27fc9ce46..d8346171b7de3f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository -[![Stable release](https://img.shields.io/badge/version-2021.1-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2021.1) +[![Stable release](https://img.shields.io/badge/version-2021.2-green.svg)](https://github.com/openvinotoolkit/openvino/releases/tag/2021.2) [![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE) ![Azure DevOps builds (branch)](https://img.shields.io/azure-devops/build/openvinoci/b2bab62f-ab2f-4871-a538-86ea1be7d20f/9/master?label=Public%20CI) diff --git a/cmake/check_features.cmake b/cmake/check_features.cmake deleted file mode 100644 index 693227097eaa52..00000000000000 --- a/cmake/check_features.cmake +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2020 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -if (VERBOSE_BUILD) - set(CMAKE_VERBOSE_MAKEFILE ON CACHE BOOL "" FORCE) -endif() - -#64 bits platform -if (CMAKE_SIZEOF_VOID_P EQUAL 8) - message(STATUS "Detected 64 bit architecture") - SET(ARCH_64 ON) -else() - message(STATUS "Detected 32 bit architecture") - SET(ARCH_64 OFF) -endif() - -if(ENABLE_AVX512F) - if ((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") AND (MSVC_VERSION VERSION_LESS 1920)) - # 1920 version of MSVC 2019. In MSVC 2017 AVX512F not work - set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) - endif() - if ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6)) - set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) - endif() - if ((CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10)) - # TBD: clarify which AppleClang version supports avx512 - set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) - endif() - if ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)) - set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) - endif() -endif() - -print_enabled_features() diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index 56f935789c0491..aed76147342d02 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -4,8 +4,6 @@ set_temp_directory(TEMP "${IE_MAIN_SOURCE_DIR}") -include(dependency_solver) - if(CMAKE_CROSSCOMPILING AND CMAKE_HOST_SYSTEM_NAME MATCHES Linux AND CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*") set(protoc_version "3.7.1") diff --git a/inference-engine/cmake/FindTBB.cmake b/cmake/developer_package/FindTBB.cmake similarity index 93% rename from inference-engine/cmake/FindTBB.cmake rename to cmake/developer_package/FindTBB.cmake index 688e6fb46dc3ca..765b12e69eb3bb 100644 --- a/inference-engine/cmake/FindTBB.cmake +++ b/cmake/developer_package/FindTBB.cmake @@ -25,8 +25,9 @@ endif() find_package(TBB CONFIG - NO_DEFAULT_PATH PATHS ${TBBROOT}/cmake - ${CMAKE_CURRENT_LIST_DIR}/${IE_OWN_TBB_CONFIG} + ${IEDevScripts_DIR}/${IE_OWN_TBB_CONFIG} + NO_DEFAULT_PATH ) + find_package_handle_standard_args(TBB CONFIG_MODE) diff --git a/cmake/developer_package.cmake b/cmake/developer_package/IEDevScriptsConfig.cmake similarity index 57% rename from cmake/developer_package.cmake rename to cmake/developer_package/IEDevScriptsConfig.cmake index b9ea3e3d3b78fd..a28f77099b6ba8 100644 --- a/cmake/developer_package.cmake +++ b/cmake/developer_package/IEDevScriptsConfig.cmake @@ -4,7 +4,28 @@ cmake_minimum_required(VERSION 3.13) +if(NOT DEFINED IEDevScripts_DIR) + message(FATAL_ERROR "IEDevScripts_DIR is not defined") +endif() + +set(OLD_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}) +set(CMAKE_MODULE_PATH "${IEDevScripts_DIR}") + +function(set_ci_build_number) + set(repo_root "${CMAKE_SOURCE_DIR}") + include(version) + set(CI_BUILD_NUMBER "${CI_BUILD_NUMBER}" PARENT_SCOPE) +endfunction() + +set_ci_build_number() + +include(features) +include(message) + +# # Detect target +# + include(target_flags) string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH_FOLDER) @@ -18,84 +39,10 @@ elseif(MSVC AND AARCH64) set(ARCH_FOLDER arm64) endif() -list(APPEND CMAKE_MODULE_PATH - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/download" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cross_compile") - -# -# CPack -# - -include(CPackComponent) -unset(IE_CPACK_COMPONENTS_ALL CACHE) - -set(IE_CPACK_IE_DIR deployment_tools/inference_engine) - -# Search packages for the host system instead of packages for the target system -# in case of cross compilation these macros should be defined by the toolchain file -if(NOT COMMAND find_host_package) - macro(find_host_package) - find_package(${ARGN}) - endmacro() -endif() -if(NOT COMMAND find_host_program) - macro(find_host_program) - find_program(${ARGN}) - endmacro() -endif() - -# -# ie_cpack_set_library_dir() -# -# Set library directory for cpack -# -function(ie_cpack_set_library_dir) - if(WIN32) - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) - else() - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) - endif() -endfunction() - -ie_cpack_set_library_dir() - # -# ie_cpack_add_component(NAME ...) +# Prepare temporary folder # -# Wraps original `cpack_add_component` and adds component to internal IE list -# -macro(ie_cpack_add_component NAME) - list(APPEND IE_CPACK_COMPONENTS_ALL ${NAME}) - set(IE_CPACK_COMPONENTS_ALL "${IE_CPACK_COMPONENTS_ALL}" CACHE STRING "" FORCE) - cpack_add_component(${NAME} ${ARGN}) -endmacro() - -macro(ie_cpack) - set(CPACK_GENERATOR "TGZ") - string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}") - if(WIN32) - set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE}) - else() - set(CPACK_PACKAGE_NAME inference-engine) - endif() - set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) - set(CPACK_ARCHIVE_COMPONENT_INSTALL ON) - set(CPACK_PACKAGE_VENDOR "Intel") - set(CPACK_COMPONENTS_ALL ${ARGN}) - set(CPACK_STRIP_FILES ON) - - if(OS_FOLDER) - set(CPACK_SYSTEM_NAME "${OS_FOLDER}") - endif() - - include(CPack) -endmacro() -# prepare temporary folder function(set_temp_directory temp_variable source_tree_dir) if (DEFINED ENV{DL_SDK_TEMP} AND NOT $ENV{DL_SDK_TEMP} STREQUAL "") message(STATUS "DL_SDK_TEMP environment is set : $ENV{DL_SDK_TEMP}") @@ -119,22 +66,37 @@ function(set_temp_directory temp_variable source_tree_dir) endif() endfunction() +# +# For cross-compilation +# + +# Search packages for the host system instead of packages for the target system +# in case of cross compilation these macros should be defined by the toolchain file +if(NOT COMMAND find_host_package) + macro(find_host_package) + find_package(${ARGN}) + endmacro() +endif() +if(NOT COMMAND find_host_program) + macro(find_host_program) + find_program(${ARGN}) + endmacro() +endif() + # # Common scripts # +include(packaging) include(coverage/coverage) include(shellcheck/shellcheck) -# External dependencies -find_package(Threads) - # printing debug messages include(debug) if(OS_FOLDER) message ("**** OS FOLDER IS: [${OS_FOLDER}]") - if("${OS_FOLDER}" STREQUAL "ON") + if(OS_FOLDER STREQUAL "ON") message ("**** USING OS FOLDER: [${CMAKE_SYSTEM_NAME}]") set(BIN_FOLDER "bin/${CMAKE_SYSTEM_NAME}/${ARCH_FOLDER}") else() @@ -144,13 +106,27 @@ else() set(BIN_FOLDER "bin/${ARCH_FOLDER}") endif() -if("${CMAKE_BUILD_TYPE}" STREQUAL "") - debug_message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used") +if(NOT DEFINED CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "") + message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used") set(CMAKE_BUILD_TYPE "Release") +else() + set(RELEASE_TYPES "Debug" "Release" "RelWithDebInfo" "MinSizeRel") + list(FIND RELEASE_TYPES ${CMAKE_BUILD_TYPE} INDEX_FOUND) + if (INDEX_FOUND EQUAL -1) + message(FATAL_ERROR "CMAKE_BUILD_TYPE must be one of Debug, Release, RelWithDebInfo, or MinSizeRel") + endif() +endif() +message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") + +if(USE_BUILD_TYPE_SUBFOLDER) + set(BIN_FOLDER "${BIN_FOLDER}/${CMAKE_BUILD_TYPE}") endif() # allow to override default OUTPUT_ROOT root if(NOT DEFINED OUTPUT_ROOT) + if(NOT DEFINED OpenVINO_MAIN_SOURCE_DIR) + message(FATAL_ERROR "OpenVINO_MAIN_SOURCE_DIR is not defined") + endif() set(OUTPUT_ROOT ${OpenVINO_MAIN_SOURCE_DIR}) endif() @@ -176,81 +152,86 @@ endif() set(CMAKE_DEBUG_POSTFIX ${IE_DEBUG_POSTFIX}) set(CMAKE_RELEASE_POSTFIX ${IE_RELEASE_POSTFIX}) -if (WIN32 OR CMAKE_GENERATOR STREQUAL "Xcode") +if (MSVC OR CMAKE_GENERATOR STREQUAL "Xcode") # Support CMake multiconfiguration for Visual Studio or Xcode build set(IE_BUILD_POSTFIX $<$:${IE_DEBUG_POSTFIX}>$<$:${IE_RELEASE_POSTFIX}>) else () - if (${CMAKE_BUILD_TYPE} STREQUAL "Debug" ) + if (CMAKE_BUILD_TYPE STREQUAL "Debug") set(IE_BUILD_POSTFIX ${IE_DEBUG_POSTFIX}) else() set(IE_BUILD_POSTFIX ${IE_RELEASE_POSTFIX}) endif() endif() -message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") add_definitions(-DIE_BUILD_POSTFIX=\"${IE_BUILD_POSTFIX}\") if(NOT UNIX) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) - set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) - set(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) else() - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}/lib) - set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) - set(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/${CMAKE_BUILD_TYPE}) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/lib) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}/lib) endif() +set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) +set(CMAKE_PDB_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_ROOT}/${BIN_FOLDER}) if(APPLE) + set(CMAKE_MACOSX_RPATH ON) # WA for Xcode generator + object libraries issue: # https://gitlab.kitware.com/cmake/cmake/issues/20260 # http://cmake.3232098.n2.nabble.com/XCODE-DEPEND-HELPER-make-Deletes-Targets-Before-and-While-They-re-Built-td7598277.html set(CMAKE_XCODE_GENERATE_TOP_LEVEL_PROJECT_ONLY ON) - set(CMAKE_MACOSX_RPATH ON) endif() # Use solution folders set_property(GLOBAL PROPERTY USE_FOLDERS ON) -set(CMAKE_POLICY_DEFAULT_CMP0054 NEW) +# Enable CMAKE__COMPILER_ID AppleClang +set(CMAKE_POLICY_DEFAULT_CMP0025 NEW) # LTO -set(CMAKE_POLICY_DEFAULT_CMP0069 NEW) -include(CheckIPOSupported) +if(ENABLE_LTO) + set(CMAKE_POLICY_DEFAULT_CMP0069 NEW) + include(CheckIPOSupported) -check_ipo_supported(RESULT IPO_SUPPORTED - OUTPUT OUTPUT_MESSAGE - LANGUAGES C CXX) + check_ipo_supported(RESULT IPO_SUPPORTED + OUTPUT OUTPUT_MESSAGE + LANGUAGES C CXX) -if(NOT IPO_SUPPORTED) - set(ENABLE_LTO "OFF" CACHE STRING "Enable Link Time Optmization" FORCE) - message(WARNING "IPO / LTO is not supported: ${OUTPUT_MESSAGE}") + if(NOT IPO_SUPPORTED) + set(ENABLE_LTO "OFF" CACHE STRING "Enable Link Time Optmization" FORCE) + message(WARNING "IPO / LTO is not supported: ${OUTPUT_MESSAGE}") + endif() endif() # General flags -include(sdl) -include(os_flags) -include(sanitizer) -include(cross_compiled_func) +include(compile_flags/sdl) +include(compile_flags/os_flags) +include(compile_flags/sanitizer) +include(compile_flags/fuzzing) +include(download/dependency_solver) +include(cross_compile/cross_compiled_func) include(faster_build) include(whole_archive) +include(linux_name) +include(models) include(api_validator/api_validator) -function(set_ci_build_number) - set(OpenVINO_MAIN_SOURCE_DIR "${CMAKE_SOURCE_DIR}") - include(version) - set(CI_BUILD_NUMBER "${CI_BUILD_NUMBER}" PARENT_SCOPE) -endfunction() -set_ci_build_number() - include(vs_version/vs_version) +include(plugins/plugins) +include(add_ie_target) + +if(ENABLE_FUZZING) + enable_fuzzing() +endif() # Code style utils include(cpplint/cpplint) include(clang_format/clang_format) + +# Restore state +set(CMAKE_MODULE_PATH ${OLD_CMAKE_MODULE_PATH}) diff --git a/inference-engine/cmake/add_ie_target.cmake b/cmake/developer_package/add_ie_target.cmake similarity index 96% rename from inference-engine/cmake/add_ie_target.cmake rename to cmake/developer_package/add_ie_target.cmake index f6d4dd19ca6a5f..b081a69459da1d 100644 --- a/inference-engine/cmake/add_ie_target.cmake +++ b/cmake/developer_package/add_ie_target.cmake @@ -8,7 +8,7 @@ Example: addIeTarget( NAME core_lib ADD_CPPLINT - DEVELOPER_PACKAGE + DEVELOPER_PACKAGE TYPE ROOT ${CMAKE_CURRENT_SOURCE_DIR} ADDITIONAL_SOURCE_DIRS @@ -31,7 +31,6 @@ addIeTarget( function(addIeTarget) set(options ADD_CPPLINT # Enables code style checks for the target - DEVELOPER_PACKAGE # Enables exporting of the target through the developer package ) set(oneValueRequiredArgs TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable @@ -39,6 +38,7 @@ function(addIeTarget) ROOT # root directory to be used for recursive search of source files ) set(oneValueOptionalArgs + DEVELOPER_PACKAGE # Enables exporting of the target through the developer package ) set(multiValueArgs INCLUDES # Extra include directories @@ -121,10 +121,8 @@ function(addIeTarget) endif() if (ARG_DEVELOPER_PACKAGE) # developer package - ie_developer_export_targets(${ARG_NAME}) - if (ARG_EXPORT_DEPENDENCIES) - ie_developer_export_targets(${ARG_NAME} ${ARG_EXPORT_DEPENDENCIES}) - endif() + openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE} + TARGETS ${ARG_NAME} ${ARG_EXPORT_DEPENDENCIES}) endif() if(WIN32) # Provide default compile pdb name equal to target name diff --git a/cmake/api_validator/api_validator.cmake b/cmake/developer_package/api_validator/api_validator.cmake similarity index 97% rename from cmake/api_validator/api_validator.cmake rename to cmake/developer_package/api_validator/api_validator.cmake index d165256d3e4183..6b0222a03d1f0d 100644 --- a/cmake/api_validator/api_validator.cmake +++ b/cmake/developer_package/api_validator/api_validator.cmake @@ -108,7 +108,7 @@ function(_ie_add_api_validator_post_build_step) -D UWP_API_VALIDATOR_EXCLUSION=${UWP_API_VALIDATOR_EXCLUSION} -D UWP_API_VALIDATOR_OUTPUT=${output_file} -D CMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/api_validator/api_validator_run.cmake" + -P "${IEDevScripts_DIR}/api_validator/api_validator_run.cmake" BYPRODUCTS ${output_file} COMMENT "[apiValidator] Check ${target_name} for OneCore compliance" VERBATIM) diff --git a/cmake/api_validator/api_validator_run.cmake b/cmake/developer_package/api_validator/api_validator_run.cmake similarity index 100% rename from cmake/api_validator/api_validator_run.cmake rename to cmake/developer_package/api_validator/api_validator_run.cmake diff --git a/cmake/clang_format/clang_format.cmake b/cmake/developer_package/clang_format/clang_format.cmake similarity index 92% rename from cmake/clang_format/clang_format.cmake rename to cmake/developer_package/clang_format/clang_format.cmake index ae37ae134e3f4f..6e35f387c72c10 100644 --- a/cmake/clang_format/clang_format.cmake +++ b/cmake/developer_package/clang_format/clang_format.cmake @@ -76,10 +76,10 @@ function(add_clang_format_target TARGET_NAME) -D "CLANG_FORMAT=${CLANG_FORMAT}" -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_check.cmake" + -P "${IEDevScripts_DIR}/clang_format/clang_format_check.cmake" DEPENDS "${source_file}" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_check.cmake" + "${IEDevScripts_DIR}/clang_format/clang_format_check.cmake" COMMENT "[clang-format] ${source_file}" VERBATIM) @@ -102,10 +102,10 @@ function(add_clang_format_target TARGET_NAME) -D "CLANG_FORMAT=${CLANG_FORMAT}" -D "INPUT_FILES=${CLANG_FORMAT_FOR_SOURCES}" -D "EXCLUDE_PATTERNS=${CLANG_FORMAT_EXCLUDE_PATTERNS}" - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_fix.cmake" + -P "${IEDevScripts_DIR}/clang_format/clang_format_fix.cmake" DEPENDS "${CLANG_FORMAT_FOR_SOURCES}" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/clang_format/clang_format_fix.cmake" + "${IEDevScripts_DIR}/clang_format/clang_format_fix.cmake" COMMENT "[clang-format] ${TARGET_NAME}_fix" VERBATIM) diff --git a/cmake/clang_format/clang_format_check.cmake b/cmake/developer_package/clang_format/clang_format_check.cmake similarity index 100% rename from cmake/clang_format/clang_format_check.cmake rename to cmake/developer_package/clang_format/clang_format_check.cmake diff --git a/cmake/clang_format/clang_format_fix.cmake b/cmake/developer_package/clang_format/clang_format_fix.cmake similarity index 100% rename from cmake/clang_format/clang_format_fix.cmake rename to cmake/developer_package/clang_format/clang_format_fix.cmake diff --git a/cmake/developer_package/compile_flags/fuzzing.cmake b/cmake/developer_package/compile_flags/fuzzing.cmake new file mode 100644 index 00000000000000..cb60203fb462f1 --- /dev/null +++ b/cmake/developer_package/compile_flags/fuzzing.cmake @@ -0,0 +1,25 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +macro(enable_fuzzing) + # Enable (libFuzzer)[https://llvm.org/docs/LibFuzzer.html] if supported. + set(FUZZING_COMPILER_FLAGS "-fsanitize=fuzzer-no-link -fprofile-instr-generate -fcoverage-mapping") + set(FUZZING_LINKER_FLAGS "-fsanitize-coverage=trace-pc-guard -fprofile-instr-generate") + + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FUZZING_COMPILER_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FUZZING_COMPILER_FLAGS}") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FUZZING_LINKER_FLAGS}") + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FUZZING_LINKER_FLAGS}") + + unset(FUZZING_COMPILER_FLAGS) + unset(FUZZING_LINKER_FLAGS) +endmacro() + +function(add_fuzzer FUZZER_EXE_NAME FUZZER_SOURCES) + add_executable(${FUZZER_EXE_NAME} ${FUZZER_SOURCES}) + target_link_libraries(${FUZZER_EXE_NAME} PRIVATE fuzz-testhelper) + if(ENABLE_FUZZING) + set_target_properties(${FUZZER_EXE_NAME} PROPERTIES LINK_FLAGS "-fsanitize=fuzzer") + endif() +endfunction(add_fuzzer) diff --git a/cmake/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake similarity index 99% rename from cmake/os_flags.cmake rename to cmake/developer_package/compile_flags/os_flags.cmake index 9803e7439fd271..8e3a5606ab78c5 100644 --- a/cmake/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -205,6 +205,7 @@ if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ie_add_compiler_flags(-fsigned-char) endif() +# Honor visibility properties for all target types set(CMAKE_POLICY_DEFAULT_CMP0063 NEW) set(CMAKE_CXX_VISIBILITY_PRESET hidden) set(CMAKE_C_VISIBILITY_PRESET hidden) diff --git a/cmake/sanitizer.cmake b/cmake/developer_package/compile_flags/sanitizer.cmake similarity index 85% rename from cmake/sanitizer.cmake rename to cmake/developer_package/compile_flags/sanitizer.cmake index e303b203100f7a..a9b8a47c72a171 100644 --- a/cmake/sanitizer.cmake +++ b/cmake/developer_package/compile_flags/sanitizer.cmake @@ -4,6 +4,14 @@ include(CheckCXXCompilerFlag) +if (ENABLE_SANITIZER OR ENABLE_THREAD_SANITIZER) + # This is workaround for https://gitlab.kitware.com/cmake/cmake/-/issues/16609. + # It ensures pthread is searched without ASAN linking. + # Line bellow must be before adding -fsanitize=address or -fsanitize=thread to + # build options for the trick to work. + find_package(Threads REQUIRED) +endif() + if (ENABLE_SANITIZER) set(SANITIZER_COMPILER_FLAGS "-g -fsanitize=address -fno-omit-frame-pointer") CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=address" SANITIZE_RECOVER_SUPPORTED) diff --git a/cmake/sdl.cmake b/cmake/developer_package/compile_flags/sdl.cmake similarity index 100% rename from cmake/sdl.cmake rename to cmake/developer_package/compile_flags/sdl.cmake diff --git a/cmake/coverage/coverage.cmake b/cmake/developer_package/coverage/coverage.cmake similarity index 99% rename from cmake/coverage/coverage.cmake rename to cmake/developer_package/coverage/coverage.cmake index e2fa3b57edee79..71c24fcd9ddab3 100644 --- a/cmake/coverage/coverage.cmake +++ b/cmake/developer_package/coverage/coverage.cmake @@ -18,7 +18,7 @@ if(NOT TARGET ie_coverage) endif() set(IE_COVERAGE_REPORTS "${CMAKE_BINARY_DIR}/coverage") -set(IE_COVERAGE_SCRIPT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake/coverage") +set(IE_COVERAGE_SCRIPT_DIR "${IEDevScripts_DIR}/coverage") include(CMakeParseArguments) diff --git a/cmake/coverage/coverage_clean.cmake b/cmake/developer_package/coverage/coverage_clean.cmake similarity index 100% rename from cmake/coverage/coverage_clean.cmake rename to cmake/developer_package/coverage/coverage_clean.cmake diff --git a/cmake/coverage/coverage_merge.cmake b/cmake/developer_package/coverage/coverage_merge.cmake similarity index 100% rename from cmake/coverage/coverage_merge.cmake rename to cmake/developer_package/coverage/coverage_merge.cmake diff --git a/cmake/cpplint/cpplint.cmake b/cmake/developer_package/cpplint/cpplint.cmake similarity index 52% rename from cmake/cpplint/cpplint.cmake rename to cmake/developer_package/cpplint/cpplint.cmake index 23e022d6a514ad..ccd97f8df8c8bd 100644 --- a/cmake/cpplint/cpplint.cmake +++ b/cmake/developer_package/cpplint/cpplint.cmake @@ -68,17 +68,17 @@ function(add_cpplint_target TARGET_NAME) "${output_file}" COMMAND "${CMAKE_COMMAND}" - -D "CPPLINT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint.py" + -D "CPPLINT_SCRIPT=${IEDevScripts_DIR}/cpplint/cpplint.py" -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" -D "WORKING_DIRECTORY=${CMAKE_CURRENT_SOURCE_DIR}" -D "SKIP_RETURN_CODE=${ENABLE_CPPLINT_REPORT}" -D "CUSTOM_FILTER=${custom_filter}" - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_run.cmake" + -P "${IEDevScripts_DIR}/cpplint/cpplint_run.cmake" DEPENDS "${source_file}" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint.py" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_run.cmake" + "${IEDevScripts_DIR}/cpplint/cpplint.py" + "${IEDevScripts_DIR}/cpplint/cpplint_run.cmake" COMMENT "[cpplint] ${source_file}" VERBATIM) @@ -104,71 +104,3 @@ function(add_cpplint_target TARGET_NAME) add_dependencies(cpplint_all ${TARGET_NAME}) endfunction() - -function(add_cpplint_report_target) - if(NOT ENABLE_CPPLINT OR NOT ENABLE_CPPLINT_REPORT) - return() - endif() - - set(cpplint_output_file "${CMAKE_BINARY_DIR}/cpplint/final_output.cpplint") - add_custom_command( - OUTPUT - "${cpplint_output_file}" - COMMAND - "${CMAKE_COMMAND}" - -D "FINAL_OUTPUT_FILE=${cpplint_output_file}" - -D "OUTPUT_FILES=${CPPLINT_ALL_OUTPUT_FILES}" - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_merge.cmake" - DEPENDS - ${CPPLINT_ALL_OUTPUT_FILES} - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_merge.cmake" - COMMENT - "[cpplint] Merge all output files" - VERBATIM) - - set(cppcheck_output_file "${CMAKE_BINARY_DIR}/cpplint/cpplint-cppcheck-result.xml") - add_custom_command( - OUTPUT - "${cppcheck_output_file}" - COMMAND - "${CMAKE_COMMAND}" - -D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}" - -D "CONVERT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py" - -D "INPUT_FILE=${cpplint_output_file}" - -D "OUTPUT_FILE=${cppcheck_output_file}" - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_to_cppcheck_xml.cmake" - DEPENDS - "${cpplint_output_file}" - "${OpenVINO_MAIN_SOURCE_DIR}/scripts/cpplint_to_cppcheckxml.py" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_to_cppcheck_xml.cmake" - COMMENT - "[cpplint] Convert to cppcheck XML format" - VERBATIM) - - set(report_dir "${OpenVINO_MAIN_SOURCE_DIR}/report/cpplint") - set(html_output_file "${report_dir}/index.html") - add_custom_command( - OUTPUT - "${html_output_file}" - COMMAND - "${CMAKE_COMMAND}" - -D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}" - -D "CONVERT_SCRIPT=${OpenVINO_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py" - -D "INPUT_FILE=${cppcheck_output_file}" - -D "REPORT_DIR=${report_dir}" - -D "SOURCE_DIR=${OpenVINO_MAIN_SOURCE_DIR}" - -D "TITLE=${CMAKE_PROJECT_NAME}" - -P "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_html.cmake" - DEPENDS - "${cppcheck_output_file}" - "${OpenVINO_MAIN_SOURCE_DIR}/scripts/cppcheck-htmlreport.py" - "${OpenVINO_MAIN_SOURCE_DIR}/cmake/cpplint/cpplint_html.cmake" - COMMENT - "[cpplint] Generate HTML report" - VERBATIM) - - add_custom_target(cpplint_report - DEPENDS "${html_output_file}" - COMMENT "[cpplint] Generate report") - set_target_properties(cpplint_report PROPERTIES FOLDER cpplint) -endfunction() diff --git a/cmake/cpplint/cpplint.py b/cmake/developer_package/cpplint/cpplint.py similarity index 100% rename from cmake/cpplint/cpplint.py rename to cmake/developer_package/cpplint/cpplint.py diff --git a/cmake/cpplint/cpplint_html.cmake b/cmake/developer_package/cpplint/cpplint_html.cmake similarity index 100% rename from cmake/cpplint/cpplint_html.cmake rename to cmake/developer_package/cpplint/cpplint_html.cmake diff --git a/cmake/cpplint/cpplint_merge.cmake b/cmake/developer_package/cpplint/cpplint_merge.cmake similarity index 100% rename from cmake/cpplint/cpplint_merge.cmake rename to cmake/developer_package/cpplint/cpplint_merge.cmake diff --git a/cmake/cpplint/cpplint_run.cmake b/cmake/developer_package/cpplint/cpplint_run.cmake similarity index 100% rename from cmake/cpplint/cpplint_run.cmake rename to cmake/developer_package/cpplint/cpplint_run.cmake diff --git a/cmake/cpplint/cpplint_to_cppcheck_xml.cmake b/cmake/developer_package/cpplint/cpplint_to_cppcheck_xml.cmake similarity index 100% rename from cmake/cpplint/cpplint_to_cppcheck_xml.cmake rename to cmake/developer_package/cpplint/cpplint_to_cppcheck_xml.cmake diff --git a/cmake/cross_compile/cross_compiled_disp_gen.cmake b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake similarity index 100% rename from cmake/cross_compile/cross_compiled_disp_gen.cmake rename to cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake diff --git a/cmake/cross_compile/cross_compiled_disp_gen_options.in b/cmake/developer_package/cross_compile/cross_compiled_disp_gen_options.in similarity index 100% rename from cmake/cross_compile/cross_compiled_disp_gen_options.in rename to cmake/developer_package/cross_compile/cross_compiled_disp_gen_options.in diff --git a/cmake/cross_compile/cross_compiled_func.cmake b/cmake/developer_package/cross_compile/cross_compiled_func.cmake similarity index 100% rename from cmake/cross_compile/cross_compiled_func.cmake rename to cmake/developer_package/cross_compile/cross_compiled_func.cmake diff --git a/cmake/debug.cmake b/cmake/developer_package/debug.cmake similarity index 100% rename from cmake/debug.cmake rename to cmake/developer_package/debug.cmake diff --git a/cmake/download/dependency_solver.cmake b/cmake/developer_package/download/dependency_solver.cmake similarity index 99% rename from cmake/download/dependency_solver.cmake rename to cmake/developer_package/download/dependency_solver.cmake index a089fd80b626bd..3d181a45b21a29 100644 --- a/cmake/download/dependency_solver.cmake +++ b/cmake/developer_package/download/dependency_solver.cmake @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -include ("download") +include (download/download) function (resolve_archive_dependency VAR COMPONENT ARCHIVE ARCHIVE_UNIFIED ARCHIVE_WIN ARCHIVE_LIN ARCHIVE_MAC ARCHIVE_ANDROID TARGET_PATH FOLDER ENVIRONMENT SHA256) if (ENVIRONMENT AND (DEFINED ${ENVIRONMENT} OR DEFINED ENV{${ENVIRONMENT}})) diff --git a/cmake/download/download.cmake b/cmake/developer_package/download/download.cmake similarity index 87% rename from cmake/download/download.cmake rename to cmake/developer_package/download/download.cmake index 80d3d24f05b571..566109cb6fa93e 100644 --- a/cmake/download/download.cmake +++ b/cmake/developer_package/download/download.cmake @@ -21,5 +21,5 @@ function (Download from to fatal result output sha256) endfunction(Download) -include ("download_and_apply") -include ("download_and_extract") +include(download/download_and_apply) +include(download/download_and_extract) diff --git a/cmake/download/download_and_apply.cmake b/cmake/developer_package/download/download_and_apply.cmake similarity index 100% rename from cmake/download/download_and_apply.cmake rename to cmake/developer_package/download/download_and_apply.cmake diff --git a/cmake/download/download_and_check.cmake b/cmake/developer_package/download/download_and_check.cmake similarity index 71% rename from cmake/download/download_and_check.cmake rename to cmake/developer_package/download/download_and_check.cmake index 6847435cd60b7d..9395814ef9e724 100644 --- a/cmake/download/download_and_check.cmake +++ b/cmake/developer_package/download/download_and_check.cmake @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -include (FindWget) +find_package(Wget QUIET) function (DownloadAndCheck from to fatal result sha256) set(status_res "ON") @@ -18,18 +18,23 @@ function (DownloadAndCheck from to fatal result sha256) message(STATUS "Downloading from ${from} to ${to} ...") find_program(aria2c "aria2c") if (${aria2c} STREQUAL "aria2c-NOTFOUND") - if (NOT ${WGET_FOUND}) + if (NOT WGET_FOUND) Download(${from} ${to} ${fatal} ${result} output ${sha256}) list(GET output 0 status_code) else() - message(STATUS "${WGET_EXECUTABLE} --no-cache --no-check-certificate - --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 ${from}") - execute_process(COMMAND ${WGET_EXECUTABLE} "--no-cache" "--no-check-certificate" - "--retry-connrefused" "--waitretry=1" "--read-timeout=20" "--timeout=15" "--tries=5" - "${from}" "-O" "${to}" - TIMEOUT 2000 - RESULT_VARIABLE status_code) - file(SHA256 ${to} CHECKSUM) + foreach(index RANGE 5) + message(STATUS "${WGET_EXECUTABLE} --no-cache --no-check-certificate + --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=5 ${from}") + execute_process(COMMAND ${WGET_EXECUTABLE} "--no-cache" "--no-check-certificate" + "--retry-connrefused" "--waitretry=1" "--read-timeout=20" "--timeout=15" "--tries=5" + "${from}" "-O" "${to}" + TIMEOUT 2000 + RESULT_VARIABLE status_code) + file(SHA256 ${to} CHECKSUM) + if (${CHECKSUM} STREQUAL ${sha256}) + break() + endif() + endforeach() if (NOT ${CHECKSUM} STREQUAL ${sha256}) message(FATAL_ERROR "Hash mismatch:\n" "expected: ${sha256}\n" diff --git a/cmake/download/download_and_extract.cmake b/cmake/developer_package/download/download_and_extract.cmake similarity index 99% rename from cmake/download/download_and_extract.cmake rename to cmake/developer_package/download/download_and_extract.cmake index cd51c9b263d01b..cf4da01b743fa2 100644 --- a/cmake/download/download_and_extract.cmake +++ b/cmake/developer_package/download/download_and_extract.cmake @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # -include ("extract") -include ("download_and_check") +include(download/extract) +include(download/download_and_check) function (GetNameAndUrlToDownload name url archive_name_unified archive_name_win archive_name_lin archive_name_mac archive_name_android) if (archive_name_unified) diff --git a/cmake/download/extract.cmake b/cmake/developer_package/download/extract.cmake similarity index 100% rename from cmake/download/extract.cmake rename to cmake/developer_package/download/extract.cmake diff --git a/cmake/faster_build.cmake b/cmake/developer_package/faster_build.cmake similarity index 100% rename from cmake/faster_build.cmake rename to cmake/developer_package/faster_build.cmake diff --git a/cmake/developer_package/features.cmake b/cmake/developer_package/features.cmake new file mode 100644 index 00000000000000..3c1cb6febb8fad --- /dev/null +++ b/cmake/developer_package/features.cmake @@ -0,0 +1,82 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +include(options) +include(target_flags) + +# FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but +# this must be addressed in a proper way +ie_dependent_option (ENABLE_LTO "Enable Link Time Optimization" OFF "LINUX;NOT CMAKE_CROSSCOMPILING; CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9" OFF) + +ie_option (OS_FOLDER "create OS dedicated folder in output" OFF) + +if(UNIX) + ie_option(USE_BUILD_TYPE_SUBFOLDER "Create dedicated sub-folder per build type for output binaries" ON) +else() + ie_option(USE_BUILD_TYPE_SUBFOLDER "Create dedicated sub-folder per build type for output binaries" OFF) +endif() + +# FIXME: ARM cross-compiler generates several "false positive" warnings regarding __builtin_memcpy buffer overflow +ie_dependent_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" ON "X86 OR X86_64" OFF) + +ie_option (ENABLE_INTEGRITYCHECK "build DLLs with /INTEGRITYCHECK flag" OFF) + +ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF) + +ie_option (ENABLE_THREAD_SANITIZER "enable checking data races via ThreadSanitizer" OFF) + +ie_dependent_option (COVERAGE "enable code coverage" OFF "CMAKE_CXX_COMPILER_ID STREQUAL GNU" OFF) + +# Defines CPU capabilities + +ie_dependent_option (ENABLE_SSE42 "Enable SSE4.2 optimizations" ON "X86_64 OR X86" OFF) + +ie_dependent_option (ENABLE_AVX2 "Enable AVX2 optimizations" ON "X86_64 OR X86" OFF) + +ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR X86" OFF) + +# Type of build, we add this as an explicit option to default it to ON +# FIXME: Ah this moment setting this to OFF will only build ngraph a static library +ie_option (BUILD_SHARED_LIBS "Build as a shared library" ON) + +ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF) + +ie_dependent_option (ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF) + +ie_dependent_option (ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF) + +ie_option (ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON) + +ie_option (VERBOSE_BUILD "shows extra information about build" OFF) + +ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF) + +ie_option (ENABLE_ALTERNATIVE_TEMP "in case of dependency conflict, to avoid modification in master, use local copy of dependency" ON) + +ie_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "CMAKE_CXX_COMPILER_ID MATCHES ^(Apple)?Clang$; NOT WIN32" OFF) + +# +# Check features +# + +if(ENABLE_AVX512F) + if ((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") AND (MSVC_VERSION VERSION_LESS 1920)) + # 1920 version of MSVC 2019. In MSVC 2017 AVX512F not work + set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) + endif() + if ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6)) + set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) + endif() + if ((CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10)) + # TBD: clarify which AppleClang version supports avx512 + set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) + endif() + if ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9)) + set(ENABLE_AVX512F OFF CACHE BOOL "" FORCE) + endif() +endif() + +if (VERBOSE_BUILD) + set(CMAKE_VERBOSE_MAKEFILE ON CACHE BOOL "" FORCE) +endif() diff --git a/inference-engine/cmake/linux_name.cmake b/cmake/developer_package/linux_name.cmake similarity index 93% rename from inference-engine/cmake/linux_name.cmake rename to cmake/developer_package/linux_name.cmake index e87a61ba1dd53b..28afd53746a454 100644 --- a/inference-engine/cmake/linux_name.cmake +++ b/cmake/developer_package/linux_name.cmake @@ -2,6 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # +include(target_flags) + if (LINUX) function(get_linux_name res_var) if (NOT EXISTS "/etc/lsb-release") @@ -11,7 +13,7 @@ if (LINUX) set(name_regex "NAME=\"([^ \"\n]*).*\"\n") set(version_regex "VERSION=\"([0-9]+(\\.[0-9]+)?)[^\n]*\"") else () - #linux version detection using cat /etc/lsb-release + # linux version detection using cat /etc/lsb-release file(READ "/etc/lsb-release" release_data) set(name_regex "DISTRIB_ID=([^ \n]*)\n") set(version_regex "DISTRIB_RELEASE=([0-9]+(\\.[0-9]+)?)") @@ -28,6 +30,5 @@ if (LINUX) else () set(${res_var} NOTFOUND PARENT_SCOPE) endif () - endfunction() endif () diff --git a/cmake/developer_package/message.cmake b/cmake/developer_package/message.cmake new file mode 100644 index 00000000000000..eb6a1af60035ad --- /dev/null +++ b/cmake/developer_package/message.cmake @@ -0,0 +1,22 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +if(UNIX AND ENABLE_ERROR_HIGHLIGHT) + function(message) + string(ASCII 27 ESC) + set(RESET "${ESC}[m") + set(RED "${ESC}[31;1m") + set(YELLOW "${ESC}[33;1m") + + list(GET ARGV 0 MessageType) + list(REMOVE_AT ARGV 0) + if(MessageType STREQUAL FATAL_ERROR OR MessageType STREQUAL SEND_ERROR) + _message(${MessageType} "${RED}${ARGV}${RESET}") + elseif(MessageType STREQUAL WARNING) + _message(${MessageType} "${YELLOW}${ARGV}${RESET}") + else() + _message(${MessageType} "${ARGV}") + endif() + endfunction() +endif() diff --git a/inference-engine/cmake/models.cmake b/cmake/developer_package/models.cmake similarity index 98% rename from inference-engine/cmake/models.cmake rename to cmake/developer_package/models.cmake index 69dc569048f244..031112044b36f0 100644 --- a/inference-engine/cmake/models.cmake +++ b/cmake/developer_package/models.cmake @@ -2,8 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -cmake_policy(SET CMP0054 NEW) - find_package(Git REQUIRED) set(MODELS_LST "") diff --git a/cmake/options.cmake b/cmake/developer_package/options.cmake similarity index 92% rename from cmake/options.cmake rename to cmake/developer_package/options.cmake index b4bb86b1d6e23f..cedbd099962029 100644 --- a/cmake/options.cmake +++ b/cmake/developer_package/options.cmake @@ -4,7 +4,6 @@ # Usage: ie_option( "description" [IF ]) include (CMakeDependentOption) -include (version) macro (ie_option variable description value) option(${variable} "${description}" ${value}) @@ -32,6 +31,10 @@ macro (ie_option_enum variable description value) endmacro() function (print_enabled_features) + if(NOT COMMAND set_ci_build_number) + message(FATAL_ERROR "CI_BUILD_NUMBER is not set yet") + endif() + message(STATUS "Inference Engine enabled features: ") message(STATUS "") message(STATUS " CI_BUILD_NUMBER: ${CI_BUILD_NUMBER}") diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake new file mode 100644 index 00000000000000..b846bf732dcb1a --- /dev/null +++ b/cmake/developer_package/packaging.cmake @@ -0,0 +1,58 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +include(CPackComponent) +unset(IE_CPACK_COMPONENTS_ALL CACHE) + +set(IE_CPACK_IE_DIR deployment_tools/inference_engine) + +# +# ie_cpack_set_library_dir() +# +# Set library directory for cpack +# +function(ie_cpack_set_library_dir) + if(WIN32) + set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + else() + set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH_FOLDER} PARENT_SCOPE) + endif() +endfunction() + +ie_cpack_set_library_dir() + +# +# ie_cpack_add_component(NAME ...) +# +# Wraps original `cpack_add_component` and adds component to internal IE list +# +macro(ie_cpack_add_component NAME) + list(APPEND IE_CPACK_COMPONENTS_ALL ${NAME}) + set(IE_CPACK_COMPONENTS_ALL "${IE_CPACK_COMPONENTS_ALL}" CACHE STRING "" FORCE) + cpack_add_component(${NAME} ${ARGN}) +endmacro() + +macro(ie_cpack) + set(CPACK_GENERATOR "TGZ") + string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}") + if(WIN32) + set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE}) + else() + set(CPACK_PACKAGE_NAME inference-engine) + endif() + set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) + set(CPACK_ARCHIVE_COMPONENT_INSTALL ON) + set(CPACK_PACKAGE_VENDOR "Intel") + set(CPACK_COMPONENTS_ALL ${ARGN}) + set(CPACK_STRIP_FILES ON) + + if(OS_FOLDER) + set(CPACK_SYSTEM_NAME "${OS_FOLDER}") + endif() + + include(CPack) +endmacro() diff --git a/inference-engine/cmake/plugins/create_plugin_file.cmake b/cmake/developer_package/plugins/create_plugin_file.cmake similarity index 100% rename from inference-engine/cmake/plugins/create_plugin_file.cmake rename to cmake/developer_package/plugins/create_plugin_file.cmake diff --git a/inference-engine/cmake/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake similarity index 89% rename from inference-engine/cmake/plugins/plugins.cmake rename to cmake/developer_package/plugins/plugins.cmake index 683f02ff0a8fe4..a66d2568d98cc1 100644 --- a/inference-engine/cmake/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -6,9 +6,9 @@ include(CMakeParseArguments) set(PLUGIN_FILES "" CACHE INTERNAL "") -function(get_shared_library_name target_name library_name) - set(LIB_PREFIX "${CMAKE_SHARED_LIBRARY_PREFIX}") - set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}") +function(ie_plugin_get_file_name target_name library_name) + set(LIB_PREFIX "${CMAKE_SHARED_MODULE_PREFIX}") + set(LIB_SUFFIX "${IE_BUILD_POSTFIX}${CMAKE_SHARED_MODULE_SUFFIX}") set("${library_name}" "${LIB_PREFIX}${target_name}${LIB_SUFFIX}" PARENT_SCOPE) endfunction() @@ -52,7 +52,7 @@ function(ie_add_plugin) add_cpplint_target(${obj_lib}_cpplint FOR_TARGETS ${obj_lib}) endforeach() - add_library(${IE_PLUGIN_NAME} SHARED ${input_files}) + add_library(${IE_PLUGIN_NAME} MODULE ${input_files}) target_compile_definitions(${IE_PLUGIN_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_PLUGIN) ie_add_vs_version_file(NAME ${TARGET_NAME} @@ -106,7 +106,7 @@ function(ie_add_plugin) install(TARGETS ${IE_PLUGIN_NAME} RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component} - LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT ${install_component}) + LIBRARY DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT ${install_component}) endif() endfunction() @@ -135,7 +135,7 @@ macro(ie_register_plugins) -D "IE_CONFIG_OUTPUT_FILE=${config_output_file}" -D "IE_PLUGIN_NAME=${plugin}" -D "IE_CONFIGS_DIR=${CMAKE_BINARY_DIR}/plugins" - -P "${IE_MAIN_SOURCE_DIR}/cmake/plugins/unregister_plugin_cmake.cmake" + -P "${IEDevScripts_DIR}/plugins/unregister_plugin_cmake.cmake" COMMENT "Remove ${plugin} from the plugins.xml file" VERBATIM) @@ -152,7 +152,7 @@ macro(ie_register_plugins) # create plugin file set(config_file_name "${CMAKE_BINARY_DIR}/plugins/${name}.xml") - get_shared_library_name(${name} library_name) + ie_plugin_get_file_name(${name} library_name) add_custom_command(TARGET ${IE_REGISTER_MAIN_TARGET} POST_BUILD COMMAND @@ -160,7 +160,7 @@ macro(ie_register_plugins) -D "IE_CONFIG_OUTPUT_FILE=${config_file_name}" -D "IE_DEVICE_NAME=${device_name}" -D "IE_PLUGIN_LIBRARY_NAME=${library_name}" - -P "${IE_MAIN_SOURCE_DIR}/cmake/plugins/create_plugin_file.cmake" + -P "${IEDevScripts_DIR}/plugins/create_plugin_file.cmake" COMMENT "Register ${name} plugin" VERBATIM) @@ -170,10 +170,10 @@ macro(ie_register_plugins) add_custom_command(TARGET ${IE_REGISTER_MAIN_TARGET} POST_BUILD COMMAND "${CMAKE_COMMAND}" - -D "CMAKE_SHARED_LIBRARY_PREFIX=${CMAKE_SHARED_LIBRARY_PREFIX}" + -D "CMAKE_SHARED_MODULE_PREFIX=${CMAKE_SHARED_MODULE_PREFIX}" -D "IE_CONFIG_OUTPUT_FILE=${config_output_file}" -D "IE_CONFIGS_DIR=${CMAKE_BINARY_DIR}/plugins" - -P "${IE_MAIN_SOURCE_DIR}/cmake/plugins/register_plugin_cmake.cmake" + -P "${IEDevScripts_DIR}/plugins/register_plugin_cmake.cmake" COMMENT "Registering plugins to plugins.xml config file" VERBATIM) diff --git a/cmake/developer_package/plugins/register_plugin_cmake.cmake b/cmake/developer_package/plugins/register_plugin_cmake.cmake new file mode 100644 index 00000000000000..39a9657944756b --- /dev/null +++ b/cmake/developer_package/plugins/register_plugin_cmake.cmake @@ -0,0 +1,65 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(file_content +" + + +") + +if(NOT EXISTS "${IE_CONFIG_OUTPUT_FILE}") + file(WRITE "${IE_CONFIG_OUTPUT_FILE}" "${file_content}") +endif() + +# get list of plugin files +file(GLOB plugin_files "${IE_CONFIGS_DIR}/*.xml") + +function(check_plugin_exists plugin_name outvar) + set(${outvar} OFF PARENT_SCOPE) + + # check if config file already has this plugin + file(STRINGS "${IE_CONFIG_OUTPUT_FILE}" content REGEX "plugin .*=\"") + + foreach(line IN LISTS content) + string(REGEX MATCH "location=\"([^\"]*)\"" location "${line}") + get_filename_component(location "${CMAKE_MATCH_1}" NAME_WE) + + if("${CMAKE_SHARED_MODULE_PREFIX}${plugin_name}" MATCHES "${location}") + # plugin has already registered + set(${outvar} ON PARENT_SCOPE) + endif() + endforeach() +endfunction() + +set(plugin_files_to_add) +foreach(plugin_file IN LISTS plugin_files) + get_filename_component(plugin_name "${plugin_file}" NAME_WE) + check_plugin_exists("${plugin_name}" exists) + + if(NOT exists) + list(APPEND plugin_files_to_add "${plugin_file}") + endif() +endforeach() + +# add plugin +set(newContent "") +file(STRINGS "${IE_CONFIG_OUTPUT_FILE}" content) + +foreach(line IN LISTS content) + if("${line}" MATCHES "") + foreach(plugin_file IN LISTS plugin_files_to_add) + file(READ "${plugin_file}" content) + set(newContent "${newContent} +${content}") + endforeach() + endif() + + if(newContent) + set(newContent "${newContent}\n${line}") + else() + set(newContent "${line}") + endif() +endforeach() + +file(WRITE "${IE_CONFIG_OUTPUT_FILE}" "${newContent}") diff --git a/inference-engine/cmake/plugins/unregister_plugin_cmake.cmake b/cmake/developer_package/plugins/unregister_plugin_cmake.cmake similarity index 72% rename from inference-engine/cmake/plugins/unregister_plugin_cmake.cmake rename to cmake/developer_package/plugins/unregister_plugin_cmake.cmake index db66332ccae99c..c28aeed12b700b 100644 --- a/inference-engine/cmake/plugins/unregister_plugin_cmake.cmake +++ b/cmake/developer_package/plugins/unregister_plugin_cmake.cmake @@ -3,7 +3,7 @@ # if(NOT EXISTS "${IE_CONFIG_OUTPUT_FILE}") - return() + return() endif() # remove plugin file @@ -16,19 +16,19 @@ file(STRINGS "${IE_CONFIG_OUTPUT_FILE}" content) set(skip_plugin OFF) foreach(line IN LISTS content) if("${line}" MATCHES "${IE_PLUGIN_NAME}") - set(skip_plugin ON) + set(skip_plugin ON) endif() if(NOT skip_plugin) - if(newContent) - set(newContent "${newContent}\n${line}") - else() - set(newContent "${line}") - endif() + if(newContent) + set(newContent "${newContent}\n${line}") + else() + set(newContent "${line}") + endif() endif() if("${line}" MATCHES "") - set(skip_plugin OFF) + set(skip_plugin OFF) endif() endforeach() diff --git a/cmake/shellcheck/shellcheck.cmake b/cmake/developer_package/shellcheck/shellcheck.cmake similarity index 94% rename from cmake/shellcheck/shellcheck.cmake rename to cmake/developer_package/shellcheck/shellcheck.cmake index c2e1186d1b122b..df7a310792ba37 100644 --- a/cmake/shellcheck/shellcheck.cmake +++ b/cmake/developer_package/shellcheck/shellcheck.cmake @@ -14,7 +14,7 @@ function(ie_shellcheck_process) cmake_parse_arguments(IE_SHELLCHECK "" "DIRECTORY" "SKIP" ${ARGN}) - set(IE_SHELLCHECK_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/cmake/shellcheck/shellcheck_process.cmake") + set(IE_SHELLCHECK_SCRIPT "${IEDevScripts_DIR}/shellcheck/shellcheck_process.cmake") file(GLOB_RECURSE scripts "${IE_SHELLCHECK_DIRECTORY}/*.sh") foreach(script IN LISTS scripts) # check if we need to skip scripts diff --git a/cmake/shellcheck/shellcheck_process.cmake b/cmake/developer_package/shellcheck/shellcheck_process.cmake similarity index 100% rename from cmake/shellcheck/shellcheck_process.cmake rename to cmake/developer_package/shellcheck/shellcheck_process.cmake diff --git a/cmake/target_flags.cmake b/cmake/developer_package/target_flags.cmake similarity index 100% rename from cmake/target_flags.cmake rename to cmake/developer_package/target_flags.cmake diff --git a/inference-engine/cmake/tbb/lnx/TBBConfig.cmake b/cmake/developer_package/tbb/lnx/TBBConfig.cmake similarity index 100% rename from inference-engine/cmake/tbb/lnx/TBBConfig.cmake rename to cmake/developer_package/tbb/lnx/TBBConfig.cmake diff --git a/inference-engine/cmake/tbb/mac/TBBConfig.cmake b/cmake/developer_package/tbb/mac/TBBConfig.cmake similarity index 100% rename from inference-engine/cmake/tbb/mac/TBBConfig.cmake rename to cmake/developer_package/tbb/mac/TBBConfig.cmake diff --git a/inference-engine/cmake/tbb/win/TBBConfig.cmake b/cmake/developer_package/tbb/win/TBBConfig.cmake similarity index 100% rename from inference-engine/cmake/tbb/win/TBBConfig.cmake rename to cmake/developer_package/tbb/win/TBBConfig.cmake diff --git a/cmake/version.cmake b/cmake/developer_package/version.cmake similarity index 80% rename from cmake/version.cmake rename to cmake/developer_package/version.cmake index db0fe2bb79da7e..9dd1ecbc923780 100644 --- a/cmake/version.cmake +++ b/cmake/developer_package/version.cmake @@ -3,18 +3,24 @@ # function (branchName VAR) + if(NOT DEFINED repo_root) + message(FATAL_ERROR "repo_root is not defined") + endif() execute_process( COMMAND git rev-parse --abbrev-ref HEAD - WORKING_DIRECTORY ${OpenVINO_MAIN_SOURCE_DIR} + WORKING_DIRECTORY ${repo_root} OUTPUT_VARIABLE GIT_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE) set (${VAR} ${GIT_BRANCH} PARENT_SCOPE) endfunction() function (commitHash VAR) + if(NOT DEFINED repo_root) + message(FATAL_ERROR "repo_root is not defined") + endif() execute_process( COMMAND git rev-parse HEAD - WORKING_DIRECTORY ${OpenVINO_MAIN_SOURCE_DIR} + WORKING_DIRECTORY ${repo_root} OUTPUT_VARIABLE GIT_COMMIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE) set (${VAR} ${GIT_COMMIT_HASH} PARENT_SCOPE) diff --git a/cmake/vs_version/vs_version.cmake b/cmake/developer_package/vs_version/vs_version.cmake similarity index 87% rename from cmake/vs_version/vs_version.cmake rename to cmake/developer_package/vs_version/vs_version.cmake index d857e2e4bcc4db..12f11a9f1d74e9 100644 --- a/cmake/vs_version/vs_version.cmake +++ b/cmake/developer_package/vs_version/vs_version.cmake @@ -21,6 +21,7 @@ if(IE_VS_VER_HAS_VERSION) set(IE_VS_VER_FILEVERSION_STR "${IE_VERSION_MAJOR}.${IE_VERSION_MINOR}.${IE_VERSION_PATCH}.0") endif() +set(IE_VS_VER_COMPANY_NAME_STR "Intel Corporation") set(IE_VS_VER_PRODUCTVERSION_STR "${CI_BUILD_NUMBER}") set(IE_VS_VER_PRODUCTNAME_STR "OpenVINO toolkit") set(IE_VS_VER_COPYRIGHT_STR "Copyright (C) 2018-2020, Intel Corporation") @@ -29,6 +30,7 @@ set(IE_VS_VER_COMMENTS_STR "https://docs.openvinotoolkit.org/") # # ie_add_vs_version_file(NAME # FILEDESCRIPTION +# [COMPANY_NAME ] # [FILEVERSION ] # [INTERNALNAME ] # [COPYRIGHT ] @@ -43,7 +45,7 @@ function(ie_add_vs_version_file) return() endif() - cmake_parse_arguments(VS_VER "" "NAME;FILEDESCRIPTION;FILEVERSION;INTERNALNAME;COPYRIGHT;PRODUCTNAME;PRODUCTVERSION;COMMENTS;FILEVERSION_QUAD;PRODUCTVERSION_QUAD" "" ${ARGN}) + cmake_parse_arguments(VS_VER "" "COMPANY_NAME;NAME;FILEDESCRIPTION;FILEVERSION;INTERNALNAME;COPYRIGHT;PRODUCTNAME;PRODUCTVERSION;COMMENTS;FILEVERSION_QUAD;PRODUCTVERSION_QUAD" "" ${ARGN}) if(NOT TARGET ${VS_VER_NAME}) message(FATAL_ERROR "${VS_VER_NAME} must define a target") @@ -68,6 +70,7 @@ function(ie_add_vs_version_file) endif() endmacro() + _vs_ver_update_str_variable(COMPANY_NAME) _vs_ver_update_str_variable(FILEDESCRIPTION) _vs_ver_update_str_variable(FILEVERSION) _vs_ver_update_str_variable(INTERNALNAME) @@ -80,7 +83,7 @@ function(ie_add_vs_version_file) set(IE_VS_VER_INTERNALNAME_STR ${VS_VER_NAME}) set(vs_version_output "${CMAKE_CURRENT_BINARY_DIR}/vs_version.rc") - configure_file("${OpenVINO_MAIN_SOURCE_DIR}/cmake/vs_version/vs_version.rc.in" "${vs_version_output}" @ONLY) + configure_file("${IEDevScripts_DIR}/vs_version/vs_version.rc.in" "${vs_version_output}" @ONLY) source_group("src" FILES ${vs_version_output}) target_sources(${VS_VER_NAME} PRIVATE ${vs_version_output}) diff --git a/cmake/vs_version/vs_version.rc.in b/cmake/developer_package/vs_version/vs_version.rc.in similarity index 94% rename from cmake/vs_version/vs_version.rc.in rename to cmake/developer_package/vs_version/vs_version.rc.in index 037247e0061722..b515b3118832e5 100644 --- a/cmake/vs_version/vs_version.rc.in +++ b/cmake/developer_package/vs_version/vs_version.rc.in @@ -19,6 +19,7 @@ BEGIN BEGIN BLOCK "040904E4" BEGIN + VALUE "CompanyName", "@IE_VS_VER_COMPANY_NAME_STR@\0" VALUE "FileDescription", "@IE_VS_VER_FILEDESCRIPTION_STR@\0" #if @IE_VS_VER_HAS_VERSION@ VALUE "FileVersion", "@IE_VS_VER_FILEVERSION_STR@\0" diff --git a/cmake/whole_archive.cmake b/cmake/developer_package/whole_archive.cmake similarity index 100% rename from cmake/whole_archive.cmake rename to cmake/developer_package/whole_archive.cmake diff --git a/cmake/features.cmake b/cmake/features.cmake index a99de90445a92f..49b29562abe3b1 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -2,67 +2,33 @@ # SPDX-License-Identifier: Apache-2.0 # -include (target_flags) -include (options) - -# these options are aimed to optimize build time on development system - if(X86_64) set(ENABLE_MKL_DNN_DEFAULT ON) else() set(ENABLE_MKL_DNN_DEFAULT OFF) endif() -ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF) - ie_option (ENABLE_MKL_DNN "MKL-DNN plugin for inference engine" ${ENABLE_MKL_DNN_DEFAULT}) -ie_dependent_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON "X86_64;NOT APPLE;NOT MINGW;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF) - -# FIXME: there are compiler failures with LTO and Cross-Compile toolchains. Disabling for now, but -# this must be addressed in a proper way -ie_dependent_option (ENABLE_LTO "Enable Link Time Optimization" OFF "LINUX;NOT CMAKE_CROSSCOMPILING; CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9" OFF) - -ie_option (OS_FOLDER "create OS dedicated folder in output" OFF) - -# FIXME: ARM cross-compiler generates several "false positive" warnings regarding __builtin_memcpy buffer overflow -ie_dependent_option (TREAT_WARNING_AS_ERROR "Treat build warnings as errors" ON "X86 OR X86_64" OFF) - -ie_option (ENABLE_INTEGRITYCHECK "build DLLs with /INTEGRITYCHECK flag" OFF) - -ie_option (ENABLE_SANITIZER "enable checking memory errors via AddressSanitizer" OFF) - -ie_option (ENABLE_THREAD_SANITIZER "enable checking data races via ThreadSanitizer" OFF) - -ie_dependent_option (COVERAGE "enable code coverage" OFF "CMAKE_CXX_COMPILER_ID STREQUAL GNU" OFF) - -# Define CPU capabilities - -ie_dependent_option (ENABLE_SSE42 "Enable SSE4.2 optimizations" ON "X86_64 OR X86" OFF) - -ie_dependent_option (ENABLE_AVX2 "Enable AVX2 optimizations" ON "X86_64 OR X86" OFF) +ie_option (ENABLE_TESTS "unit, behavior and functional tests" OFF) -ie_dependent_option (ENABLE_AVX512F "Enable AVX512 optimizations" ON "X86_64 OR X86" OFF) +ie_dependent_option (ENABLE_CLDNN "clDnn based plugin for inference engine" ON "X86_64;NOT APPLE;NOT MINGW;NOT WINDOWS_STORE;NOT WINDOWS_PHONE" OFF) ie_option (ENABLE_PROFILING_ITT "Build with ITT tracing. Optionally configure pre-built ittnotify library though INTEL_VTUNE_DIR variable." OFF) ie_option (ENABLE_DOCS "Build docs using Doxygen" OFF) -ie_dependent_option (ENABLE_FASTER_BUILD "Enable build features (PCH, UNITY) to speed up build time" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.16" OFF) - -# Type of build, we add this as an explicit option to default it to ON -# FIXME: Ah this moment setting this to OFF will only build ngraph a static library -ie_option (BUILD_SHARED_LIBS "Build as a shared library" ON) - -ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" ON "UNIX;NOT ANDROID" OFF) - -ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF) - -ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ON) +ie_option(ENABLE_TEMPLATE_PLUGIN "Register template plugin into plugins.xml" OFF) ie_option_enum(SELECTIVE_BUILD "Enable OpenVINO conditional compilation or statistics collection. \ In case SELECTIVE_BUILD is enabled, the SELECTIVE_BUILD_STAT variable should contain the path to the collected InelSEAPI statistics. \ Usage: -DSELECTIVE_BUILD=ON -DSELECTIVE_BUILD_STAT=/path/*.csv" OFF ALLOWED_VALUES ON OFF COLLECT) -set(LINKCHECKER_PY "" CACHE FILEPATH "Path to linkchecker.py for documentation check") +ie_option(ENABLE_ERROR_HIGHLIGHT "Highlight errors and warnings during compile time" OFF) + +# +# Process options +# + +print_enabled_features() diff --git a/cmake/fuzzing.cmake b/cmake/fuzzing.cmake deleted file mode 100644 index 4e62429f9a604a..00000000000000 --- a/cmake/fuzzing.cmake +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2020 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -function(enable_fuzzing) - # Enable (libFuzzer)[https://llvm.org/docs/LibFuzzer.html] if supported. - if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$" AND NOT WIN32) - # Communicate libfuzzer is enabled - set(WITH_LIBFUZZER ON PARENT_SCOPE) - add_compile_definitions(WITH_LIBFUZZER) - - # Enable libfuzzer and code coverage - set(FUZZING_COMPILER_FLAGS "-fsanitize=fuzzer-no-link -fprofile-instr-generate -fcoverage-mapping") - set(FUZZING_LINKER_FLAGS "-fsanitize-coverage=trace-pc-guard -fprofile-instr-generate") - - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FUZZING_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FUZZING_COMPILER_FLAGS}" PARENT_SCOPE) - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FUZZING_LINKER_FLAGS}" PARENT_SCOPE) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FUZZING_LINKER_FLAGS}") - endif() -endfunction(enable_fuzzing) - - -function(add_fuzzer FUZZER_EXE_NAME FUZZER_SOURCES) - add_executable(${FUZZER_EXE_NAME} ${FUZZER_SOURCES}) - if(WITH_LIBFUZZER) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=fuzzer" PARENT_SCOPE) - endif() - target_link_libraries(${FUZZER_EXE_NAME} PRIVATE fuzz-testhelper) -endfunction(add_fuzzer) diff --git a/cmake/onecoreuap.toolchain.cmake b/cmake/toolchains/onecoreuap.toolchain.cmake similarity index 97% rename from cmake/onecoreuap.toolchain.cmake rename to cmake/toolchains/onecoreuap.toolchain.cmake index 3c1ad25c606532..8902d6b9acca58 100644 --- a/cmake/onecoreuap.toolchain.cmake +++ b/cmake/toolchains/onecoreuap.toolchain.cmake @@ -63,6 +63,7 @@ foreach(lib kernel32 user32 advapi32 ole32 mscoree combase) endforeach() set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${linker_flags}") +set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${linker_flags}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${linker_flags}") unset(linker_flags) diff --git a/cmake/uwp.toolchain.cmake b/cmake/toolchains/uwp.toolchain.cmake similarity index 100% rename from cmake/uwp.toolchain.cmake rename to cmake/toolchains/uwp.toolchain.cmake diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 5d14fd7e16bc5c..a4ee2f62aa5851 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -40,6 +40,12 @@ if(NOT ENABLE_DOCKER) endforeach() endif() +set(LINKCHECKER_PY "" CACHE FILEPATH "Path to linkchecker.py for documentation check") +set(OMZ_DOCS_DIR "" CACHE PATH "Path to open_model_zoo documentation") +set(WORKBENCH_DOCS_DIR "" CACHE PATH "Path to workbench documentation") +set(POT_DOCS_DIR "" CACHE PATH "Path to post-training-compression-tool documentation") +set(GST_DOCS_DIR "" CACHE PATH "Path to gst-video-analytics documentation") + function(build_docs) find_package(Doxygen REQUIRED dot) find_package(Python3 COMPONENTS Interpreter) @@ -53,6 +59,16 @@ function(build_docs) message(FATAL_ERROR "Python3 is required to build the documentation") endif() + execute_process( + COMMAND ${Python3_EXECUTABLE} -m pip show lxml + RESULT_VARIABLE PIP_EXIT_CODE + OUTPUT_QUIET + ) + + if (NOT ${PIP_EXIT_CODE} EQUAL 0) + message(FATAL_ERROR "lxml package is not installed. Please use \"pip install lxml\".") + endif() + if(NOT LATEX_FOUND) message(FATAL_ERROR "LATEX is required to build the documentation") endif() @@ -70,18 +86,34 @@ function(build_docs) # Preprocessing scripts set(DOXY_MD_FILTER "${DOXYGEN_DIR}/doxy_md_filter.py") + set(DOXY_LAYOUT_SCRIPT "${DOXYGEN_DIR}/build_main_layout.py") + set(DOXY_LOG_SCRIPT "${DOXYGEN_DIR}/log.py") set(PYX_FILTER "${DOXYGEN_DIR}/pyx_filter.py") + # assets dir + set(ASSETS_DIR "${DOXYGEN_DIR}/assets") + + # header and footer + set(HEADER_SOURCE "${DOXYGEN_DIR}/header.html.in") + set(FOOTER_SOURCE "${DOXYGEN_DIR}/footer.html.in") + set(HEADER_BUILD "${DOCS_BUILD_DIR}/header.html") + set(FOOTER_BUILD "${DOCS_BUILD_DIR}/footer.html") + + configure_file(${HEADER_SOURCE} ${HEADER_BUILD} @ONLY) + configure_file(${FOOTER_SOURCE} ${FOOTER_BUILD} @ONLY) + file(GLOB_RECURSE doc_source_files LIST_DIRECTORIES true RELATIVE ${OpenVINO_MAIN_SOURCE_DIR} "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.md" "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.png" "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.gif" "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.jpg" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.svg" "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.md" "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.png" "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.gif" - "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.jpg") + "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.jpg" + "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.svg") configure_file(${PYTHON_API_IN} ${PYTHON_API_OUT} @ONLY) @@ -102,6 +134,7 @@ function(build_docs) set(NGRAPH_CPP_LAYOUT_SOURCE "${DOXYGEN_DIR}/ngraph_cpp_api.xml") set(NGRAPH_PY_LAYOUT_SOURCE "${DOXYGEN_DIR}/ngraph_py_api.xml") set(IE_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_docs.xml") + set(OPENVINO_LAYOUT_SOURCE "${DOXYGEN_DIR}/openvino_docs.xml") set(C_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_c_api.xml") set(PY_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_py_api.xml") set(PLUGIN_LAYOUT_SOURCE "${DOXYGEN_DIR}/ie_plugin_api.xml") @@ -109,14 +142,25 @@ function(build_docs) set(NGRAPH_CPP_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ngraph_cpp_api.xml") set(NGRAPH_PY_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ngraph_py_api.xml") set(IE_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_docs.xml") + set(OPENVINO_LAYOUT_BUILD "${DOCS_BUILD_DIR}/openvino_docs.xml") set(C_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_c_api.xml") set(PY_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_py_api.xml") set(PLUGIN_LAYOUT_BUILD "${DOCS_BUILD_DIR}/ie_plugin_api.xml") + # out dirs + set(OUTPUT_DIRECTORY "${DOCS_BUILD_DIR}/html") + set(IE_OUTPUT "${OUTPUT_DIRECTORY}") + set(C_OUTPUT "${OUTPUT_DIRECTORY}/ie_c_api") + set(PY_OUTPUT "${OUTPUT_DIRECTORY}/ie_python_api") + set(PLUGIN_OUTPUT "${OUTPUT_DIRECTORY}/ie_plugin_api") + set(NGRAPH_CPP_OUTPUT "${OUTPUT_DIRECTORY}/ngraph_cpp_api") + set(NGRAPH_PY_OUTPUT "${OUTPUT_DIRECTORY}/ngraph_python_api") + # Tables of contents configure_file(${NGRAPH_CPP_LAYOUT_SOURCE} ${NGRAPH_CPP_LAYOUT_BUILD} @ONLY) configure_file(${NGRAPH_PY_LAYOUT_SOURCE} ${NGRAPH_PY_LAYOUT_BUILD} @ONLY) configure_file(${IE_LAYOUT_SOURCE} ${IE_LAYOUT_BUILD} @ONLY) + configure_file(${OPENVINO_LAYOUT_SOURCE} ${OPENVINO_LAYOUT_BUILD} @ONLY) configure_file(${C_LAYOUT_SOURCE} ${C_LAYOUT_BUILD} @ONLY) configure_file(${PY_LAYOUT_SOURCE} ${PY_LAYOUT_BUILD} @ONLY) configure_file(${PLUGIN_LAYOUT_SOURCE} ${PLUGIN_LAYOUT_BUILD} @ONLY) @@ -136,6 +180,7 @@ function(build_docs) # nGraph C++ API add_custom_target(ngraph_cpp_api + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${NGRAPH_CPP_OUTPUT}/assets COMMAND ${DOXYGEN_EXECUTABLE} ${NGRAPH_CPP_CONFIG_BUILD} WORKING_DIRECTORY ${DOCS_BUILD_DIR} VERBATIM) @@ -143,6 +188,7 @@ function(build_docs) # nGraph Python API add_custom_target(ngraph_py_api + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${NGRAPH_PY_OUTPUT}/assets COMMAND ${DOXYGEN_EXECUTABLE} ${NGRAPH_PY_CONFIG_BUILD} WORKING_DIRECTORY ${DOCS_BUILD_DIR} VERBATIM) @@ -150,6 +196,7 @@ function(build_docs) # C API add_custom_target(c_api + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${C_OUTPUT}/assets COMMAND ${DOXYGEN_EXECUTABLE} ${C_CONFIG_BUILD} WORKING_DIRECTORY ${DOCS_BUILD_DIR} COMMENT "Generating C API Reference" @@ -158,6 +205,7 @@ function(build_docs) # Python API add_custom_target(py_api + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${PY_OUTPUT}/assets COMMAND ${DOXYGEN_EXECUTABLE} ${PY_CONFIG_BUILD} WORKING_DIRECTORY ${DOCS_BUILD_DIR} COMMENT "Generating Python API Reference" @@ -174,14 +222,98 @@ function(build_docs) COMMENT "Pre-process docs" VERBATIM) - foreach(source_file ${doc_source_files}) + # ovino doc files + file(GLOB_RECURSE ovino_doc_files + LIST_DIRECTORIES true RELATIVE ${OpenVINO_MAIN_SOURCE_DIR} + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.md" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.png" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.gif" + "${OpenVINO_MAIN_SOURCE_DIR}/docs/*.jpg" + "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.md" + "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.png" + "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.gif" + "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/*.jpg") + + foreach(source_file ${ovino_doc_files}) list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy - "${OpenVINO_MAIN_SOURCE_DIR}/${source_file}" "${DOCS_BUILD_DIR}/${source_file}") + "${OpenVINO_MAIN_SOURCE_DIR}/${source_file}" "${DOCS_BUILD_DIR}/openvino/${source_file}") endforeach() + # omz doc files + if(EXISTS "${OMZ_DOCS_DIR}") + get_filename_component(OMZ_DOCS_DIR "${OMZ_DOCS_DIR}" ABSOLUTE) + + file(GLOB_RECURSE omz_doc_files + LIST_DIRECTORIES true RELATIVE ${OMZ_DOCS_DIR} + "${OMZ_DOCS_DIR}/*.md" + "${OMZ_DOCS_DIR}/*.png" + "${OMZ_DOCS_DIR}/*.gif" + "${OMZ_DOCS_DIR}/*.jpg") + + foreach(source_file ${omz_doc_files}) + list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy + "${OMZ_DOCS_DIR}/${source_file}" "${DOCS_BUILD_DIR}/omz/${source_file}") + endforeach() + configure_file("${OMZ_DOCS_DIR}/omz_docs.xml" "${DOCS_BUILD_DIR}/omz_docs.xml" @ONLY) + endif() + + # workbench doc files + if(EXISTS "${WORKBENCH_DOCS_DIR}") + get_filename_component(WORKBENCH_DOCS_DIR "${WORKBENCH_DOCS_DIR}" ABSOLUTE) + + file(GLOB_RECURSE workbench_doc_files + LIST_DIRECTORIES true RELATIVE ${WORKBENCH_DOCS_DIR} + "${WORKBENCH_DOCS_DIR}/*.md" + "${WORKBENCH_DOCS_DIR}/*.png" + "${WORKBENCH_DOCS_DIR}/*.gif" + "${WORKBENCH_DOCS_DIR}/*.jpg") + + foreach(source_file ${workbench_doc_files}) + list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy + "${WORKBENCH_DOCS_DIR}/${source_file}" "${DOCS_BUILD_DIR}/workbench/${source_file}") + endforeach() + configure_file("${WORKBENCH_DOCS_DIR}/docs/Workbench_DG/workbench_docs.xml" "${DOCS_BUILD_DIR}/workbench_docs.xml" @ONLY) + endif() + + # pot doc files + if(EXISTS "${POT_DOCS_DIR}") + get_filename_component(POT_DOCS_DIR "${POT_DOCS_DIR}" ABSOLUTE) + + file(GLOB_RECURSE pot_doc_files + LIST_DIRECTORIES true RELATIVE ${POT_DOCS_DIR} + "${POT_DOCS_DIR}/*.md" + "${POT_DOCS_DIR}/*.png" + "${POT_DOCS_DIR}/*.gif" + "${POT_DOCS_DIR}/*.jpg") + + foreach(source_file ${pot_doc_files}) + list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy + "${POT_DOCS_DIR}/${source_file}" "${DOCS_BUILD_DIR}/pot/${source_file}") + endforeach() + configure_file("${POT_DOCS_DIR}/docs/pot_docs.xml" "${DOCS_BUILD_DIR}/pot_docs.xml" @ONLY) + endif() + + # gst doc files + if(EXISTS "${GST_DOCS_DIR}") + get_filename_component(GST_DOCS_DIR "${GST_DOCS_DIR}" ABSOLUTE) + + file(GLOB_RECURSE gst_doc_files + LIST_DIRECTORIES true RELATIVE ${GST_DOCS_DIR} + "${GST_DOCS_DIR}/*.md" + "${GST_DOCS_DIR}/*.png" + "${GST_DOCS_DIR}/*.gif" + "${GST_DOCS_DIR}/*.jpg") + + foreach(source_file ${gst_doc_files}) + list(APPEND commands COMMAND ${CMAKE_COMMAND} -E copy + "${GST_DOCS_DIR}/${source_file}" "${DOCS_BUILD_DIR}/gst/${source_file}") + endforeach() + endif() + add_custom_command(TARGET preprocess_docs PRE_BUILD ${commands} + COMMAND ${Python3_EXECUTABLE} ${DOXY_LAYOUT_SCRIPT} --openvino ${OPENVINO_LAYOUT_BUILD} COMMAND ${Python3_EXECUTABLE} ${DOXY_MD_FILTER} ${DOCS_BUILD_DIR} COMMENT "Pre-process markdown and image links") @@ -189,6 +321,7 @@ function(build_docs) add_custom_target(ie_docs DEPENDS ngraph_cpp_api preprocess_docs + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${IE_OUTPUT}/assets COMMAND ${DOXYGEN_EXECUTABLE} ${IE_CONFIG_BUILD} WORKING_DIRECTORY ${DOCS_BUILD_DIR} VERBATIM) @@ -197,6 +330,7 @@ function(build_docs) add_custom_target(plugin_api DEPENDS ngraph_cpp_api ie_docs + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${PLUGIN_OUTPUT}/assets COMMAND ${DOXYGEN_EXECUTABLE} ${PLUGIN_CONFIG_BUILD} WORKING_DIRECTORY ${DOCS_BUILD_DIR} COMMENT "Generating Plugin API Reference" @@ -213,13 +347,21 @@ function(build_docs) ngraph_py_api ngraph_cpp_api PROPERTIES FOLDER docs) + add_custom_command(TARGET openvino_docs + POST_BUILD + COMMAND ${Python3_EXECUTABLE} ${DOXY_LOG_SCRIPT} --log "${DOCS_BUILD_DIR}/ie_docs.log" + --include_omz $ + --include_wb $ + --include_pot $ + --include_gst $ + COMMENT "Parse doxygen log to find errors." + VERBATIM) + # added linkcheker if(EXISTS "${LINKCHECKER_PY}") add_custom_target(docs_check - COMMAND ${Python3_EXECUTABLE} "${LINKCHECKER_PY}" - "${DOCS_BUILD_DIR}/html/" -f "${DOXYGEN_DIR}/linkchecker_filter.yaml" - --no_recursive -l "${DOCS_BUILD_DIR}" + COMMAND ${Python3_EXECUTABLE} "${LINKCHECKER_PY}" -v "${DOCS_BUILD_DIR}/html/" COMMENT "Check links in generated documentation" WORKING_DIRECTORY "${DOCS_BUILD_DIR}" VERBATIM) diff --git a/docs/HOWTO/Custom_Layers_Guide.md b/docs/HOWTO/Custom_Layers_Guide.md index 23437de247aabb..0cacca13451ad7 100644 --- a/docs/HOWTO/Custom_Layers_Guide.md +++ b/docs/HOWTO/Custom_Layers_Guide.md @@ -1,200 +1,371 @@ -# Custom Layers Guide {#openvino_docs_HOWTO_Custom_Layers_Guide} +# Custom Operations Guide {#openvino_docs_HOWTO_Custom_Layers_Guide} + +The Intel® Distribution of OpenVINO™ toolkit supports neural network models trained with multiple frameworks including +TensorFlow*, Caffe*, MXNet*, Kaldi* and ONNX* file format. The list of supported operations (layers) is different for +each of the supported frameworks. To see the operations supported by your framework, refer to +[Supported Framework Layers](../MO_DG/prepare_model/Supported_Frameworks_Layers.md). + +Custom operations are operations that are not included in the list of known operations. If your model contains any +operation that is not in the list of known operations, the Model Optimizer is not able to generate an Intermediate +Representation (IR) for this model. + +This guide illustrates the workflow for running inference on topologies featuring custom operations, allowing you to +plug in your own implementation for existing or completely new operation. + +> **NOTE:** *Layer* — The legacy term for an *operation* which came from Caffe\* framework. Currently it is not used. +> Refer to the [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../MO_DG/IR_and_opsets.md) +> for more information on the topic. + +## Terms Used in This Guide + +- *Intermediate Representation (IR)* — Neural Network used only by the Inference Engine in OpenVINO abstracting the + different frameworks and describing the model topology, operations parameters and weights. + +- *Operation* — The abstract concept of a math function that is selected for a specific purpose. Operations supported by + OpenVINO™ are listed in the supported operation set provided in the [Available Operations Sets](../ops/opset.md). + Examples of the operations are: [ReLU](../ops/activation/ReLU_1.md), [Convolution](../ops/convolution/Convolution_1.md), + [Add](../ops/arithmetic/Add_1.md), etc. + +- *Kernel* — The implementation of a operation function in the OpenVINO™ plugin, in this case, the math programmed (in + C++ and OpenCL) to perform the operation for a target hardware (CPU or GPU). + +- *Inference Engine Extension* — Device-specific module implementing custom operations (a set of kernels). + +## Custom Operation Support Overview + +There are three steps to support inference of a model with custom operation(s): +1. Add support for a custom operation in the [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) so +the Model Optimizer can generate the IR with the operation. +2. Create an operation set and implement a custom nGraph operation in it as described in the +[Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md). +3. Implement a customer operation in one of the [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) +plugins to support inference of this operation using a particular target hardware (CPU, GPU or VPU). + +To see the operations that are supported by each device plugin for the Inference Engine, refer to the +[Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md). + +> **NOTE:** If a device doesn't support a particular operation, an alternative to creating a new operation is to target +> an additional device using the HETERO plugin. The [Heterogeneous Plugin](../IE_DG/supported_plugins/HETERO.md) may be +> used to run an inference model on multiple devices allowing the unsupported operations on one device to "fallback" to +> run on another device (e.g., CPU) that does support those operations. + +### Custom Operation Support for the Model Optimizer + +Model Optimizer model conversion pipeline is described in details in "Model Conversion Pipeline" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). +It is recommended to read that article first for a better understanding of the following material. + +Model Optimizer provides extensions mechanism to support new operations and implement custom model transformations to +generate optimized IR. This mechanism is described in the "Model Optimizer Extensions" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). + +Two types of the Model Optimizer extensions should be implemented to support custom operation at minimum: +1. Operation class for a new operation. This class stores information about the operation, its attributes, shape +inference function, attributes to be saved to an IR and some others internally used attributes. Refer to the +"Model Optimizer Operation" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for the +detailed instruction on how to implement it. +2. Operation attributes extractor. The extractor is responsible for parsing framework-specific representation of the +operation and uses corresponding operation class to update graph node attributes with necessary attributes of the +operation. Refer to the "Operation Extractor" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for the +detailed instruction on how to implement it. + +> **NOTE:** In some cases you may need to implement some transformation to support the operation. This topic is covered +> in the "Graph Transformation Extensions" section on the +> [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). + +## Custom Operations Extensions for the Inference Engine + +Inference Engine provides extensions mechanism to support new operations. This mechanism is described in the +[Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md). + +Each device plugin includes a library of optimized implementations to execute known operations which must be extended to +execute a custom operation. The custom operation extension is implemented according to the target device: + +- Custom Operation CPU Extension + - A compiled shared library (`.so`, `.dylib` or `.dll`) needed by the CPU Plugin for executing the custom operation + on a CPU. Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more + details. +- Custom Operation GPU Extension + - OpenCL source code (.cl) for the custom operation kernel that will be compiled to execute on the GPU along with a + operation description file (.xml) needed by the GPU Plugin for the custom operation kernel. Refer to the + [How to Implement Custom GPU Operations](../IE_DG/Extensibility_DG/GPU_Kernel.md) for more details. +- Custom Operation VPU Extension + - OpenCL source code (.cl) for the custom operation kernel that will be compiled to execute on the VPU along with a + operation description file (.xml) needed by the VPU Plugin for the custom operation kernel. Refer to the + [How to Implement Custom Operations for VPU](../IE_DG/Extensibility_DG/VPU_Kernel.md) for more details. + +Also, it is necessary to implement nGraph custom operation according to the +[Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) so the Inference Engine can read an IR with this +operation and correctly infer output tensors shape and type. + +## Enabling Magnetic Resonance Image Reconstruction Model +This chapter provides a step-by-step instruction on how to enable the magnetic resonance image reconstruction model +implemented in the [repository](https://github.com/rmsouza01/Hybrid-CS-Model-MRI/) using a custom operation on CPU. The +example is prepared for a model generated from the repository with hash `2ede2f96161ce70dcdc922371fe6b6b254aafcc8`. + +### Download and Convert the Model to a Frozen TensorFlow\* Model Format +The original pre-trained model is provided in the hdf5 format which is not supported by OpenVINO directly and needs to +be converted to TensorFlow\* frozen model format first. + +1. Download repository `https://github.com/rmsouza01/Hybrid-CS-Model-MRI`:
+```py + import keras as K + import numpy as np + import Modules.frequency_spatial_network as fsnet + import tensorflow as tf -Custom layers are layers that are not included in the list of known layers. If your topology contains any layers that are not in the list of known layers, the Model Optimizer classifies them as custom. + under_rate = '20' -This guide illustrates the workflow for running inference on topologies featuring custom layers, allowing you to plug in your own implementation for existing or completely new layers. -For a step-by-step example of creating and executing a custom layer, see the [Custom Layer Implementation Tutorials for Linux and Windows.](https://github.com/david-drew/OpenVINO-Custom-Layers/tree/master/2019.r2.0) + stats = np.load("Data/stats_fs_unet_norm_" + under_rate + ".npy") + var_sampling_mask = np.load("Data/sampling_mask_" + under_rate + "perc.npy") -## Terms used in this guide + model = fsnet.wnet(stats[0], stats[1], stats[2], stats[3], kshape = (5,5), kshape2=(3,3)) + model_name = "Models/wnet_" + under_rate + ".hdf5" + model.load_weights(model_name) -- *Layer* — The abstract concept of a math function that is selected for a specific purpose (relu, sigmoid, tanh, convolutional). This is one of a sequential series of building blocks within the neural network. -- *Kernel* — The implementation of a layer function, in this case, the math programmed (in C++ and Python) to perform the layer operation for target hardware (CPU or GPU). -- *Intermediate Representation (IR)* — Neural Network used only by the Inference Engine in OpenVINO abstracting the different frameworks and describing topology, layer parameters and weights. -The original format will be a supported framework such as TensorFlow, Caffe, or MXNet. + inp = np.random.standard_normal([1, 256, 256, 2]).astype(np.float32) + np.save('inp', inp) -- *Model Extension Generator* — Generates template source code files for each of the extensions needed by the Model Optimizer and the Inference Engine. + sess = K.backend.get_session() + sess.as_default() + graph_def = sess.graph.as_graph_def() + graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['conv2d_44/BiasAdd']) + with tf.gfile.FastGFile('wnet_20.pb', 'wb') as f: + f.write(graph_def.SerializeToString()) +``` -- *Inference Engine Extension* — Device-specific module implementing custom layers (a set of kernels). - - -## Custom Layer Overview - -The [Model Optimizer](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) searches the list of known layers for each layer contained in the input model topology before building the model's internal representation, optimizing the model, and producing the Intermediate Representation files. - -The [Inference Engine](../IE_DG/Deep_Learning_Inference_Engine_DevGuide.md) loads the layers from the input model IR files into the specified device plugin, which will search a list of known layer implementations for the device. If your topology contains layers that are not in the list of known layers for the device, the Inference Engine considers the layer to be unsupported and reports an error. To see the layers that are supported by each device plugin for the Inference Engine, refer to the [Supported Devices](../IE_DG/supported_plugins/Supported_Devices.md) documentation. -
-> **NOTE:** If a device doesn't support a particular layer, an alternative to creating a new custom layer is to target an additional device using the HETERO plugin. The [Heterogeneous Plugin](../IE_DG/supported_plugins/HETERO.md) may be used to run an inference model on multiple devices allowing the unsupported layers on one device to "fallback" to run on another device (e.g., CPU) that does support those layers. - -## Custom Layer Implementation Workflow - -When implementing a custom layer for your pre-trained model in the Intel® Distribution of OpenVINO™ toolkit, you will need to add extensions to both the Model Optimizer and the Inference Engine. - -## Custom Layer Extensions for the Model Optimizer - -The following figure shows the basic processing steps for the Model Optimizer highlighting the two necessary custom layer extensions, the Custom Layer Extractor and the Custom Layer Operation. - -![](img/MO_extensions_flow.png) - - -The Model Optimizer first extracts information from the input model which includes the topology of the model layers along with parameters, input and output format, etc., for each layer. The model is then optimized from the various known characteristics of the layers, interconnects, and data flow which partly comes from the layer operation providing details including the shape of the output for each layer. Finally, the optimized model is output to the model IR files needed by the Inference Engine to run the model. - -The Model Optimizer starts with a library of known extractors and operations for each [supported model framework](../MO_DG/prepare_model/Supported_Frameworks_Layers.md) which must be extended to use each unknown custom layer. The custom layer extensions needed by the Model Optimizer are: - -- Custom Layer Extractor - - Responsible for identifying the custom layer operation and extracting the parameters for each instance of the custom layer. The layer parameters are stored per instance and used by the layer operation before finally appearing in the output IR. Typically the input layer parameters are unchanged, which is the case covered by this tutorial. -- Custom Layer Operation - - Responsible for specifying the attributes that are supported by the custom layer and computing the output shape for each instance of the custom layer from its parameters.
The `--mo-op` command-line argument shown in the examples below generates a custom layer operation for the Model Optimizer. - -## Custom Layer Extensions for the Inference Engine - -The following figure shows the basic flow for the Inference Engine highlighting two custom layer extensions for the CPU and GPU Plugins, the Custom Layer CPU extension and the Custom Layer GPU Extension. - -![](img/IE_extensions_flow.png) - -Each device plugin includes a library of optimized implementations to execute known layer operations which must be extended to execute a custom layer. The custom layer extension is implemented according to the target device: - -- Custom Layer CPU Extension - - A compiled shared library (.so or .dll binary) needed by the CPU Plugin for executing the custom layer on the CPU. -- Custom Layer GPU Extension - - OpenCL source code (.cl) for the custom layer kernel that will be compiled to execute on the GPU along with a layer description file (.xml) needed by the GPU Plugin for the custom layer kernel. - -## Model Extension Generator +As a result the TensorFlow\* frozen model file "wnet_20.pb" is generated. -Using answers to interactive questions or a *.json* configuration file, the Model Extension Generator tool generates template source code files for each of the extensions needed by the Model Optimizer and the Inference Engine. To complete the implementation of each extension, the template functions may need to be edited to fill-in details specific to the custom layer or the actual custom layer functionality itself. +### Convert the Frozen TensorFlow\* Model to Intermediate Representation -### Command-line - -The Model Extension Generator is included in the Intel® Distribution of OpenVINO™ toolkit installation and is run using the command (here with the "--help" option): +Firstly, open the model in the TensorBoard or other TensorFlow* model visualization tool. The model supports dynamic +batch dimension because the value for the batch dimension is not hardcoded in the model. Model Optimizer need to set all +dynamic dimensions to some specific value to create the IR, therefore specify the command line parameter `-b 1` to set +the batch dimension equal to 1. The actual batch size dimension can be changed at runtime using the Inference Engine API +described in the [Using Shape Inference](../IE_DG/ShapeInference.md). Also refer to +[Converting a Model Using General Conversion Parameters](../MO_DG/prepare_model/convert_model/Converting_Model_General.md) +and [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) +for more details and command line parameters used for the model conversion. ```bash -python3 /opt/intel/openvino/deployment_tools/tools/extension_generator/extgen.py new --help +.//mo.py --input_model /wnet_20.pb -b 1 ``` -where the output will appear similar to: - -``` -usage: You can use any combination of the following arguments: - -Arguments to configure extension generation in the interactive mode: - -optional arguments: - -h, --help show this help message and exit - --mo-caffe-ext generate a Model Optimizer Caffe* extractor - --mo-mxnet-ext generate a Model Optimizer MXNet* extractor - --mo-tf-ext generate a Model Optimizer TensorFlow* extractor - --mo-op generate a Model Optimizer operation - --ie-cpu-ext generate an Inference Engine CPU extension - --ie-gpu-ext generate an Inference Engine GPU extension - --output_dir OUTPUT_DIR - set an output directory. If not specified, the current - directory is used by default. +Model Optimizer produces the following error: +```bash +[ ERROR ] List of operations that cannot be converted to Inference Engine IR: +[ ERROR ] Complex (1) +[ ERROR ] lambda_2/Complex +[ ERROR ] IFFT2D (1) +[ ERROR ] lambda_2/IFFT2D +[ ERROR ] ComplexAbs (1) +[ ERROR ] lambda_2/Abs +[ ERROR ] Part of the nodes was not converted to IR. Stopped. ``` -The available command-line arguments are used to specify which extension(s) to generate templates for the Model Optimizer or Inference Engine. The generated extension files for each argument will appear starting from the top of the output directory as follows: - -Command-line Argument | Output Directory Location | ---------------------- | ------------------------------ | -`--mo-caffe-ext` | user_mo_extensions/front/caffe | -`--mo-mxnet-ext` | user_mo_extensions/front/mxnet | -`--mo-tf-ext` | user_mo_extensions/front/tf | -`--mo-op` | user_mo_extensions/ops | -`--ie-cpu-ext` | user_ie_extensions/cpu | -`--ie-gpu-ext` | user_ie_extensions/gpu | - -### Extension Workflow - -The workflow for each generated extension follows the same basic steps: - -![](img/MEG_generic_flow.png) - -**Step 1: Generate:** Use the Model Extension Generator to generate the Custom Layer Template Files. - -**Step 2: Edit:** Edit the Custom Layer Template Files as necessary to create the specialized Custom Layer Extension Source Code. - -**Step 3: Specify:** Specify the custom layer extension locations to be used by the Model Optimizer or Inference Engine. +The error means that the Model Optimizer doesn't know how to handle 3 types of TensorFlow\* operations: "Complex", +"IFFT2D" and "ComplexAbs". In order to see more details about the conversion process run the model conversion with +additional parameter `--log_level DEBUG`. It is worth to mention the following lines from the detailed output: -## Caffe\* Models with Custom Layers +```bash +[ INFO ] Called "tf_native_tf_node_infer" for node "lambda_2/Complex" +[ ] [ DEBUG ] [ tf:228 ] Added placeholder with name 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder' +[ ] [ DEBUG ] [ tf:228 ] Added placeholder with name 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder' +[ ] [ DEBUG ] [ tf:241 ] update_input_in_pbs: replace input 'lambda_2/lambda_3/strided_slice' with input 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder' +[ ] [ DEBUG ] [ tf:249 ] Replacing input '0' of the node 'lambda_2/Complex' with placeholder 'lambda_2/lambda_3/strided_slice_port_0_ie_placeholder' +[ ] [ DEBUG ] [ tf:241 ] update_input_in_pbs: replace input 'lambda_2/lambda_4/strided_slice' with input 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder' +[ ] [ DEBUG ] [ tf:249 ] Replacing input '1' of the node 'lambda_2/Complex' with placeholder 'lambda_2/lambda_4/strided_slice_port_0_ie_placeholder' +[ ] [ DEBUG ] [ tf:148 ] Inferred shape of the output tensor with index '0' of the node 'lambda_2/Complex': '[ 1 256 256]' +[ ] [ DEBUG ] [ infer:145 ] Outputs: +[ ] [ DEBUG ] [ infer:32 ] output[0]: shape = [ 1 256 256], value = +[ ] [ DEBUG ] [ infer:129 ] -------------------- +[ ] [ DEBUG ] [ infer:130 ] Partial infer for lambda_2/IFFT2D +[ ] [ DEBUG ] [ infer:131 ] Op: IFFT2D +[ ] [ DEBUG ] [ infer:132 ] Inputs: +[ ] [ DEBUG ] [ infer:32 ] input[0]: shape = [ 1 256 256], value = +``` -If your Caffe\* model has custom layers: +This is a part of the log of the partial inference phase of the model conversion. See the "Partial Inference" section on +the [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for +more information about this phase. Model Optimizer inferred output shape for the unknown operation of type "Complex" +using a "fallback" to TensorFlow\*. However, it is not enough to generate the IR because Model Optimizer doesn't know +which attributes of the operation should be saved to IR. So it is necessary to implement Model Optimizer extensions to +support these operations. + +Before going into the extension development it is necessary to understand what these unsupported operations do according +to the TensorFlow\* framework specification. + +* "Complex" - returns a tensor of complex type constructed from two real input tensors specifying real and imaginary +part of a complex number. +* "IFFT2D" - returns a tensor with inverse 2-dimensional discrete Fourier transform over the inner-most 2 dimensions of + an input. +* "ComplexAbs" - returns a tensor with absolute values of input tensor with complex numbers. + +The part of the model with all three unsupported operations is depicted below: + +![Unsupported sub-graph](img/unsupported_subgraph.png) + +This model uses complex numbers during the inference but Inference Engine does not support tensors of this data type. So +it is necessary to find a way how to avoid using tensors of such a type in the model. Fortunately, the complex tensor +appear as a result of "Complex" operation, is used as input in the "IFFT2D" operation then is passed to "ComplexAbs" +which produces real value tensor as output. So there are just 3 operations consuming/producing complex tensors in the +model. + +Let's design an OpenVINO operation "FFT" which get a single real number tensor describing the complex number and +produces a single real number tensor describing output complex tensor. This way the fact that the model uses complex +numbers is hidden inside the "FFT" operation implementation. The operation gets a tensor of shape `[N, H, W, 2]` and +produces the output tensor with the same shape, where the innermost dimension contains pairs of real numbers describing +the complex number (its real and imaginary part). As we will see further this operation will allow us to support the +model. The implementation of the Model Optimizer operation should be saved to `mo_extensions/ops/FFT.py` file: + +@snippet FFT.py fft:operation + +The attribute `inverse` is a flag specifying type of the FFT to apply: forward or inverse. + +See the "Model Optimizer Operation" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for the +detailed instruction on how to implement the operation. + +Now it is necessary to implement extractor for the "IFFT2D" operation according to the +"Operation Extractor" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md). The +following snippet provides two extractors: one for "IFFT2D", another one for "FFT2D", however only on of them is used +in this example. The implementation should be saved to the file `mo_extensions/front/tf/FFT_ext.py`. + +@snippet FFT_ext.py fft_ext:extractor + +> **NOTE:** The graph is in inconsistent state after extracting node attributes because according to original operation +> "IFFT2D" semantic it should have an input consuming a tensor of complex numbers, but the extractor instantiated an +> operation "FFT" which expects a real tensor with specific layout. But the inconsistency will be resolved during +> applying front phase transformations discussed below. + +The output shape of the operation "AddV2" from the picture above is `[N, H, W, 2]`. Where the innermost dimension +contains pairs of real numbers describing the complex number (its real and imaginary part). The following "StridedSlice" +operations split the input tensor into 2 parts to get a tensor of real and a tensor of imaginary parts which are then +consumed with the "Complex" operation to produce a tensor of complex numbers. These "StridedSlice" and "Complex" +operations can be removed so the "FFT" operation will get a real value tensor encoding complex numbers. To achieve this +we implement the front phase transformation which searches for a pattern of two "StridedSlice" operations with specific +attributes producing data to "Complex" operation and removes it from the graph. Refer to the +"Pattern-Defined Front Phase Transformations" section on the +[Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) for more +information on how this type of transformation works. The code snippet should be saved to the file +`mo_extensions/front/tf/Complex.py`. + +@snippet Complex.py complex:transformation + +> **NOTE:** The graph is in inconsistent state because the "ComplexAbs" operation consumes complex value tensor but +> "FFT" produces real value tensor. + +Now lets implement a transformation which replace a "ComplexAbs" operation with a sub-graph of primitive operations +which calculate the result using the following formulae: \f$module(z) = \sqrt{real(z) \cdot real(z) + imag(z) \cdot imag(z)}\f$. +Original "IFFT2D" operation produces tensor of complex values, but the "FFT" operation produces a real value tensor with +the same format and shape as the input for the operation. So the input shape for the "ComplexAbs" will be `[N, H, W, 2]` +with the innermost dimension containing tuple with real and imaginary part of a complex number. In order to calculate +absolute values for the complex tensor we do the following: +1. Raise all elements in the power of 2. +2. Calculate a reduced sum over the innermost dimension. +3. Calculate a square root. + +The implementation should be saved to the file `mo_extensions/front/tf/ComplexAbs.py` and provided below: + +@snippet ComplexAbs.py complex_abs:transformation + +Now it is possible to convert the model using the following command line: +```bash +.//mo.py --input_model /wnet_20.pb -b 1 --extensions mo_extensions/ +``` -**Register the custom layers as extensions to the Model Optimizer**. For instructions, see [Extending Model Optimizer with New Primitives](../MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You will need a bit of Python\* code that lets the Model Optimizer; +The sub-graph corresponding to the originally non-supported one is depicted on the image below: -- Generate a valid Intermediate Representation according to the rules you specified. -- Be independent from the availability of Caffe on your computer. - -If your model contains Custom Layers, it is important to understand the internal workflow of the Model Optimizer. Consider the following example. +![Converted sub-graph](img/converted_subgraph.png) -**Example**: +> **NOTE:** Model Optimizer performed conversion of the model from NHWC to NCHW layout that is why the dimension with +> the value 2 moved to another position. -The network has: +### Inference Engine Extension Implementation +Now it is necessary to implement the extension for the CPU plugin with operation "FFT" introduced previously. The code +below is based on the template extension described on the +[Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md). -* One input layer (#1) -* One output Layer (#5) -* Three internal layers (#2, 3, 4) +#### CMake Build File +The first step is to create a CMake configuration file which builds the extension. The content of the "CMakeLists.txt" +file is the following: -The custom and standard layer types are: +@snippet ../template_extension/CMakeLists.txt cmake:extension -* Layers #2 and #5 are implemented as Model Optimizer extensions. -* Layers #1 and #4 are supported in Model Optimizer out-of-the box. -* Layer #3 is neither in the list of supported layers nor in extensions, but is specified in CustomLayersMapping.xml. +The CPU FFT kernel implementation uses OpenCV to perform the FFT that is why the extension library is linked with +"opencv_core" which comes with the OpenVINO. -> **NOTE**: If any of the layers are not in one of three categories described above, the Model Optimizer fails with an appropriate message and a link to the corresponding question in [Model Optimizer FAQ](../MO_DG/prepare_model/Model_Optimizer_FAQ.md). +#### Custom nGraph Operation "FFT" Implementation +The next step is to create the nGraph operation FFT. The header file "fft_op.hpp" has the following content: -The general process is as shown: +@snippet ../template_extension/fft_op.hpp fft_op:header -![Example custom layer network](img/mo_caffe_priorities.png) -
+The operation has just one boolean attribute `inverse`. Implementation of the necessary nGraph operation functions are +in the "fft_op.cpp" file with the following content: -**Step 1:** The example model is fed to the Model Optimizer that **loads the model** with the special parser built on top of the `caffe.proto` file. In case of failure, the Model Optimizer asks you to prepare the parser that can read the model. For more information, refer to the Model Optimizer, FAQ #1. +@snippet ../template_extension/fft_op.cpp fft_op:implementation -**Step 2:** The Model Optimizer **extracts the attributes of all layers** by going through the list of layers and attempting to find the appropriate extractor. In order of priority, the Model Optimizer checks if the layer is: - -* A. Registered as a Model Optimizer extension -* B. Registered as a standard Model Optimizer layer - -When the Model Optimizer finds a satisfying condition from the list above, it extracts the attributes according to the following rules: - -* For A. - takes only the parameters specified in the extension -* For B. - takes only the parameters specified in the standard extractor -
+Refer to the [Custom nGraph Operation](../IE_DG/Extensibility_DG/AddingNGraphOps.md) for more details. -**Step 3:** The Model Optimizer **calculates the output shape of all layers**. The logic is the same as it is for the priorities. **Important:** the Model Optimizer always takes the first available option. +#### CPU FFT Kernel Implementation +The operation implementation for CPU plugin uses OpenCV to perform the FFT. The header file "fft_kernel.hpp" has the +following content: -**Step 4:** The Model Optimizer **optimizes the original model and produces the two Intermediate Representation (IR) files in .xml and .bin**. -
+@snippet ../template_extension/fft_kernel.hpp fft_kernel:header -## TensorFlow\* Models with Custom Layers +The "fft_kernel.cpp" with the implementation of the CPU has the following content: -You have two options for TensorFlow\* models with custom layers: -
+@snippet ../template_extension/fft_kernel.cpp fft_kernel:implementation -* **Register those layers as extensions to the Model Optimizer.** In this case, the Model Optimizer generates a valid and optimized Intermediate Representation. -* **If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option.** This feature is helpful for many TensorFlow models. To read more, see [Sub-graph Replacement in the Model Optimizer](../MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md). - -## MXNet\* Models with Custom Layers +Refer to the [How to Implement Custom CPU Operations](../IE_DG/Extensibility_DG/CPU_Kernel.md) for more details. -There are two options to convert your MXNet* model that contains custom layers: +#### Extension Library Implementation +The last step is to create an extension library "extension.cpp" and "extension.hpp" which will include the FFT +operation for the CPU plugin. The code of the library is described in the [Extension Library](../IE_DG/Extensibility_DG/Extension.md). -1. Register the custom layers as extensions to the Model Optimizer. For instructions, see [Extending MXNet Model Optimizer with New Primitives](../MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You can create Model Optimizer extensions for both MXNet layers with op `Custom` and layers which are not standard MXNet layers. +### Building and Running the Custom Extension +In order to build the extension run the following:
+```bash +mkdir build && cd build +source /opt/intel/openvino/bin/setupvars.sh +cmake .. -DCMAKE_BUILD_TYPE=Release +make --jobs=$(nproc) +``` -2. If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option. In MXNet the function is actively used for ssd models provides an opportunity to for the necessary subgraph sequences and replace them. To read more, see [Sub-graph Replacement in the Model Optimizer](../MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md). +The result of this command is a compiled shared library (`.so`, `.dylib` or `.dll`). It should be loaded in the +application using `Core` class instance method `AddExtension` like this +`core.AddExtension(make_so_pointer(compiled_library_file_name), "CPU");`. -## Kaldi\* Models with Custom Layers -For information on converting your Kaldi* model containing custom layers see [Converting a Kaldi Model in the Model Optimizer Developer Guide](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md). +To test that the extension is implemented correctly we can run the "mri_reconstruction_demo.py" with the following content: -## ONNX\* Models with Custom Layers -For information on converting your ONNX* model containing custom layers see [Converting an ONNX Model in the Model Optimizer Developer Guide](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md). +@snippet mri_reconstruction_demo.py mri_demo:demo -## Step-by-Step Custom Layers Tutorial -For a step-by-step walk-through creating and executing a custom layer, see [Custom Layer Implementation Tutorial for Linux and Windows.](https://github.com/david-drew/OpenVINO-Custom-Layers/tree/master/2019.r2.0) +The script can be executed using the following command line: +```bash +python3 mri_reconstruction_demo.py \ + -m /wnet_20.xml \ + -i .npy \ + -p /Data/sampling_mask_20perc.npy \ + -l /libtemplate_extension.so \ + -d CPU +``` ## Additional Resources - Intel® Distribution of OpenVINO™ toolkit home page: [https://software.intel.com/en-us/openvino-toolkit](https://software.intel.com/en-us/openvino-toolkit) - OpenVINO™ toolkit online documentation: [https://docs.openvinotoolkit.org](https://docs.openvinotoolkit.org) - [Model Optimizer Developer Guide](../MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) +- [Model Optimizer Extensibility](../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) - [Inference Engine Extensibility Mechanism](../IE_DG/Extensibility_DG/Intro.md) - [Inference Engine Samples Overview](../IE_DG/Samples_Overview.md) - [Overview of OpenVINO™ Toolkit Pre-Trained Models](@ref omz_models_intel_index) @@ -204,9 +375,7 @@ For a step-by-step walk-through creating and executing a custom layer, see [Cust ## Converting Models: - [Convert Your Caffe* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Caffe.md) +- [Convert Your Kaldi* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md) - [Convert Your TensorFlow* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) - [Convert Your MXNet* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_MxNet.md) - [Convert Your ONNX* Model](../MO_DG/prepare_model/convert_model/Convert_Model_From_ONNX.md) - - - diff --git a/docs/HOWTO/img/IE_extensions_flow.png b/docs/HOWTO/img/IE_extensions_flow.png deleted file mode 100644 index ca665ca3298bbb..00000000000000 --- a/docs/HOWTO/img/IE_extensions_flow.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c2f362a39ae6c2af080e4f055b6fdba4954f918f85731545d1df3d687d9213d5 -size 421056 diff --git a/docs/HOWTO/img/MEG_generic_flow.png b/docs/HOWTO/img/MEG_generic_flow.png deleted file mode 100644 index a492c3fff5026b..00000000000000 --- a/docs/HOWTO/img/MEG_generic_flow.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cb5c700d003936779455353bfa4ed9432410c0975c46e2dfd30c6a1abccd1727 -size 23320 diff --git a/docs/HOWTO/img/MO_extensions_flow.png b/docs/HOWTO/img/MO_extensions_flow.png deleted file mode 100644 index 5009c0ce2604ad..00000000000000 --- a/docs/HOWTO/img/MO_extensions_flow.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:99d6b5146be85fa408dc5432883c3e2745cffe890133854a97dcf22f5c5962d4 -size 47564 diff --git a/docs/HOWTO/img/converted_subgraph.png b/docs/HOWTO/img/converted_subgraph.png new file mode 100644 index 00000000000000..6a5b7220777d54 --- /dev/null +++ b/docs/HOWTO/img/converted_subgraph.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7c8ab4f15874d235968471bcf876c89c795d601e69891208107b8b72aa58eb1 +size 70014 diff --git a/docs/HOWTO/img/mo_caffe_priorities.png b/docs/HOWTO/img/mo_caffe_priorities.png deleted file mode 100644 index 665892316c17fc..00000000000000 --- a/docs/HOWTO/img/mo_caffe_priorities.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0a4de6e502cae7542f1f311bcdbea6bb145f960f0d27d86a03160d1a60133778 -size 301310 diff --git a/docs/HOWTO/img/unsupported_subgraph.png b/docs/HOWTO/img/unsupported_subgraph.png new file mode 100644 index 00000000000000..80f7084a78a859 --- /dev/null +++ b/docs/HOWTO/img/unsupported_subgraph.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d5ccf51fe1babb93d96d042494695a6a6e055d1f8ebf7eef5083d54d8987a23 +size 58789 diff --git a/docs/HOWTO/mo_extensions/front/tf/Complex.py b/docs/HOWTO/mo_extensions/front/tf/Complex.py new file mode 100644 index 00000000000000..465608dfaba644 --- /dev/null +++ b/docs/HOWTO/mo_extensions/front/tf/Complex.py @@ -0,0 +1,57 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +#! [complex:transformation] +import logging as log + +import numpy as np + +from mo.front.common.replacement import FrontReplacementSubgraph +from mo.graph.graph import Graph + + +class Complex(FrontReplacementSubgraph): + enabled = True + + def pattern(self): + return dict( + nodes=[ + ('strided_slice_real', dict(op='StridedSlice')), + ('strided_slice_imag', dict(op='StridedSlice')), + ('complex', dict(op='Complex')), + ], + edges=[ + ('strided_slice_real', 'complex', {'in': 0}), + ('strided_slice_imag', 'complex', {'in': 1}), + ]) + + @staticmethod + def replace_sub_graph(graph: Graph, match: dict): + strided_slice_real = match['strided_slice_real'] + strided_slice_imag = match['strided_slice_imag'] + complex_node = match['complex'] + + # make sure that both strided slice operations get the same data as input + assert strided_slice_real.in_port(0).get_source() == strided_slice_imag.in_port(0).get_source() + + # identify the output port of the operation producing datat for strided slice nodes + input_node_output_port = strided_slice_real.in_port(0).get_source() + input_node_output_port.disconnect() + + # change the connection so now all consumers of "complex_node" get data from input node of strided slice nodes + complex_node.out_port(0).get_connection().set_source(input_node_output_port) +#! [complex:transformation] + diff --git a/docs/HOWTO/mo_extensions/front/tf/ComplexAbs.py b/docs/HOWTO/mo_extensions/front/tf/ComplexAbs.py new file mode 100644 index 00000000000000..bac4140d732f91 --- /dev/null +++ b/docs/HOWTO/mo_extensions/front/tf/ComplexAbs.py @@ -0,0 +1,40 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +#! [complex_abs:transformation] +import numpy as np + +from extensions.ops.elementwise import Pow +from extensions.ops.ReduceOps import ReduceSum +from mo.front.common.replacement import FrontReplacementOp +from mo.graph.graph import Graph, Node +from mo.ops.const import Const + + +class ComplexAbs(FrontReplacementOp): + op = "ComplexAbs" + enabled = True + + def replace_op(self, graph: Graph, node: Node): + pow_2 = Const(graph, {'value': np.float32(2.0)}).create_node() + reduce_axis = Const(graph, {'value': np.int32(-1)}).create_node() + pow_0_5 = Const(graph, {'value': np.float32(0.5)}).create_node() + + sq = Pow(graph, dict(name=node.in_node(0).name + '/sq', power=2.0)).create_node([node.in_node(0), pow_2]) + sum = ReduceSum(graph, dict(name=sq.name + '/sum')).create_node([sq, reduce_axis]) + sqrt = Pow(graph, dict(name=sum.name + '/sqrt', power=0.5)).create_node([sum, pow_0_5]) + return [sqrt.id] +#! [complex_abs:transformation] diff --git a/docs/HOWTO/mo_extensions/front/tf/FFT_ext.py b/docs/HOWTO/mo_extensions/front/tf/FFT_ext.py new file mode 100644 index 00000000000000..283c87ba838f80 --- /dev/null +++ b/docs/HOWTO/mo_extensions/front/tf/FFT_ext.py @@ -0,0 +1,47 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# ! [fft_ext:extractor] +from ...ops.FFT import FFT +from mo.front.extractor import FrontExtractorOp +from mo.utils.error import Error + + +class FFT2DFrontExtractor(FrontExtractorOp): + op = 'FFT2D' + enabled = True + + @classmethod + def extract(cls, node): + attrs = { + 'inverse': 0 + } + FFT.update_node_stat(node, attrs) + return cls.enabled + + +class IFFT2DFrontExtractor(FrontExtractorOp): + op = 'IFFT2D' + enabled = True + + @classmethod + def extract(cls, node): + attrs = { + 'inverse': 1 + } + FFT.update_node_stat(node, attrs) + return cls.enabled +# ! [fft_ext:extractor] diff --git a/docs/HOWTO/mo_extensions/ops/FFT.py b/docs/HOWTO/mo_extensions/ops/FFT.py new file mode 100644 index 00000000000000..c3f37f7d6d6919 --- /dev/null +++ b/docs/HOWTO/mo_extensions/ops/FFT.py @@ -0,0 +1,40 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +#! [fft:operation] +from mo.front.common.partial_infer.elemental import copy_shape_infer +from mo.graph.graph import Node, Graph +from mo.ops.op import Op + + +class FFT(Op): + op = 'FFT' + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'type': self.op, + 'op': self.op, + 'version': 'custom_opset', + 'inverse': None, + 'in_ports_count': 1, + 'out_ports_count': 1, + 'infer': copy_shape_infer + }, attrs) + + def backend_attrs(self): + return ['inverse'] +#! [fft:operation] diff --git a/docs/HOWTO/mri_reconstruction_demo.py b/docs/HOWTO/mri_reconstruction_demo.py new file mode 100644 index 00000000000000..74ce15721fc68a --- /dev/null +++ b/docs/HOWTO/mri_reconstruction_demo.py @@ -0,0 +1,119 @@ +""" + Copyright (C) 2018-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +#! [mri_demo:demo] +import numpy as np +import cv2 as cv +import argparse +import time +from openvino.inference_engine import IECore + + +def kspace_to_image(kspace): + assert(len(kspace.shape) == 3 and kspace.shape[-1] == 2) + fft = cv.idft(kspace, flags=cv.DFT_SCALE) + img = cv.magnitude(fft[:,:,0], fft[:,:,1]) + return cv.normalize(img, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='MRI reconstrution demo for network from https://github.com/rmsouza01/Hybrid-CS-Model-MRI (https://arxiv.org/abs/1810.12473)') + parser.add_argument('-i', '--input', dest='input', help='Path to input .npy file with MRI scan data.') + parser.add_argument('-p', '--pattern', dest='pattern', help='Path to sampling mask in .npy format.') + parser.add_argument('-m', '--model', dest='model', help='Path to .xml file of OpenVINO IR.') + parser.add_argument('-l', '--cpu_extension', dest='cpu_extension', help='Path to extensions library with FFT implementation.') + parser.add_argument('-d', '--device', dest='device', default='CPU', + help='Optional. Specify the target device to infer on; CPU, ' + 'GPU, HDDL or MYRIAD is acceptable. For non-CPU targets, ' + 'HETERO plugin is used with CPU fallbacks to FFT implementation. ' + 'Default value is CPU') + args = parser.parse_args() + + xml_path = args.model + assert(xml_path.endswith('.xml')) + bin_path = xml_path[:xml_path.rfind('.xml')] + '.bin' + + ie = IECore() + ie.add_extension(args.cpu_extension, "CPU") + + net = ie.read_network(xml_path, bin_path) + + device = 'CPU' if args.device == 'CPU' else ('HETERO:' + args.device + ',CPU') + exec_net = ie.load_network(net, device) + + # Hybrid-CS-Model-MRI/Data/stats_fs_unet_norm_20.npy + stats = np.array([2.20295299e-01, 1.11048916e+03, 4.16997984e+00, 4.71741395e+00], dtype=np.float32) + # Hybrid-CS-Model-MRI/Data/sampling_mask_20perc.npy + var_sampling_mask = np.load(args.pattern) # TODO: can we generate it in runtime? + print('Sampling ratio:', 1.0 - var_sampling_mask.sum() / var_sampling_mask.size) + + data = np.load(args.input) + num_slices, height, width = data.shape[0], data.shape[1], data.shape[2] + pred = np.zeros((num_slices, height, width), dtype=np.uint8) + data /= np.sqrt(height * width) + + print('Compute...') + start = time.time() + for slice_id, kspace in enumerate(data): + kspace = kspace.copy() + + # Apply sampling + kspace[var_sampling_mask] = 0 + kspace = (kspace - stats[0]) / stats[1] + + # Forward through network + input = np.expand_dims(kspace.transpose(2, 0, 1), axis=0) + outputs = exec_net.infer(inputs={'input_1': input}) + output = next(iter(outputs.values())) + output = output.reshape(height, width) + + # Save predictions + pred[slice_id] = cv.normalize(output, dst=None, alpha=255, beta=0, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U) + + print('Elapsed time: %.1f seconds' % (time.time() - start)) + + WIN_NAME = 'MRI reconstruction with OpenVINO' + + slice_id = 0 + def callback(pos): + global slice_id + slice_id = pos + + kspace = data[slice_id] + img = kspace_to_image(kspace) + + kspace[var_sampling_mask] = 0 + masked = kspace_to_image(kspace) + + rec = pred[slice_id] + + # Add a header + border_size = 20 + render = cv.hconcat((img, masked, rec)) + render = cv.copyMakeBorder(render, border_size, 0, 0, 0, cv.BORDER_CONSTANT, value=255) + cv.putText(render, 'Original', (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0) + cv.putText(render, 'Sampled (PSNR %.1f)' % cv.PSNR(img, masked), (width, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0) + cv.putText(render, 'Reconstructed (PSNR %.1f)' % cv.PSNR(img, rec), (width*2, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0) + + cv.imshow(WIN_NAME, render) + cv.waitKey(1) + + cv.namedWindow(WIN_NAME, cv.WINDOW_NORMAL) + print(num_slices) + cv.createTrackbar('Slice', WIN_NAME, num_slices // 2, num_slices - 1, callback) + callback(num_slices // 2) # Trigger initial visualization + cv.waitKey() +#! [mri_demo:demo] diff --git a/docs/IE_DG/Bfloat16Inference.md b/docs/IE_DG/Bfloat16Inference.md index e814a8948c44bb..136607af8ad435 100644 --- a/docs/IE_DG/Bfloat16Inference.md +++ b/docs/IE_DG/Bfloat16Inference.md @@ -2,7 +2,8 @@ ## Disclaimer -Inference Engine with the bfloat16 inference implemented on CPU must support the `avx512_bf16` instruction and therefore the bfloat16 data format. +Inference Engine with the bfloat16 inference implemented on CPU must support the native `avx512_bf16` instruction and therefore the bfloat16 data format. +It is possible to use bfloat16 inference in simulation mode on platforms with Intel® Advanced Vector Extensions 512 (Intel® AVX-512), but it leads to significant performance degradation in comparison with FP32 or native `avx512_bf16` instruction usage. ## Introduction @@ -12,7 +13,7 @@ Bfloat16 computations (referred to as BF16) is the Brain Floating-Point format w Preserving the exponent bits keeps BF16 to the same range as the FP32 (~1e-38 to ~3e38). This simplifies conversion between two data types: you just need to skip or flush to zero 16 low bits. Truncated mantissa leads to occasionally less precision, but according to [investigations](https://cloud.google.com/blog/products/ai-machine-learning/bfloat16-the-secret-to-high-performance-on-cloud-tpus), neural networks are more sensitive to the size of the exponent than the mantissa size. Also, in lots of models, precision is needed close to zero but not so much at the maximum range. -Another useful feature of BF16 is possibility to encode an INT8 in BF16 without loss of accuracy, because INT8 range completely fits in BF16 mantissa field. It reduces data flow in conversion from INT8 input image data to BF16 directly without intermediate representation in FP32, or in combination of [INT8 inference](Int8Inference.md) and BF16 layers. +Another useful feature of BF16 is possibility to encode INT8 in BF16 without loss of accuracy, because INT8 range completely fits in BF16 mantissa field. It reduces data flow in conversion from INT8 input image data to BF16 directly without intermediate representation in FP32, or in combination of [INT8 inference](Int8Inference.md) and BF16 layers. See the [Intel's site](https://software.intel.com/sites/default/files/managed/40/8b/bf16-hardware-numerics-definition-white-paper.pdf) for more bfloat16 format details. @@ -22,14 +23,7 @@ There are two ways to check if CPU device can support bfloat16 computations for @snippet snippets/Bfloat16Inference0.cpp part0 -Current Inference Engine solution for bfloat16 inference uses Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the following layers in BF16 computation mode: -* Convolution -* FullyConnected -* InnerProduct -* LRN -* Pooling - -This means that BF16 inference can only be performed with the CPU plugin on the layers listed above. All other layers are executed in FP32. +Current Inference Engine solution for bfloat16 inference uses Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN) and supports inference of the significant number of layers in BF16 computation mode. ## Lowering Inference Precision @@ -43,18 +37,36 @@ Bfloat16 data usage provides the following benefits that increase performance: 4. Reduced size of data in memory, as a result, larger models fit in the same memory bounds. 5. Reduced amount of data that must be transferred, as a result, reduced data transition time. -For default optimization on CPU, source model converts from FP32 or FP16 to BF16 and executes internally on platforms with native BF16 support. In that case, `KEY_ENFORCE_BF16` is set to `YES`. +For default optimization on CPU, source model is converted from FP32 or FP16 to BF16 and executed internally on platforms with native BF16 support. In this case, `KEY_ENFORCE_BF16` is set to `YES`. The code below demonstrates how to check if the key is set: @snippet snippets/Bfloat16Inference1.cpp part1 -To disable BF16 internal transformations, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers AS IS without modifications with precisions that were set on each layer edge. +To disable BF16 internal transformations, set the `KEY_ENFORCE_BF16` to `NO`. In this case, the model infers as is without modifications with precisions that were set on each layer edge. @snippet snippets/Bfloat16Inference2.cpp part2 +To disable BF16 in C API: + +``` +ie_config_t config = { "ENFORCE_BF16", "NO", NULL}; +ie_core_load_network(core, network, device_name, &config, &exe_network); +``` -An exception with message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support. +An exception with message `Platform doesn't support BF16 format` is formed in case of setting `KEY_ENFORCE_BF16` to `YES` on CPU without native BF16 support or BF16 simulation mode. -Low-Precision 8-bit integer models do not convert to BF16, even if bfloat16 optimization is set by default. +Low-Precision 8-bit integer models cannot be converted to BF16, even if bfloat16 optimization is set by default. + +## Bfloat16 Simulation Mode + +Bfloat16 simulation mode is available on CPU and Intel® AVX-512 platforms that do not support the native `avx512_bf16` instruction. The simulator does not guarantee an adequate performance. +To enable Bfloat16 simulator: +* In [Benchmark App](../../inference-engine/samples/benchmark_app/README.md), add the `-enforcebf16=true` option +* In C++ API, set `KEY_ENFORCE_BF16` to `YES` +* In C API: +``` +ie_config_t config = { "ENFORCE_BF16", "YES", NULL}; +ie_core_load_network(core, network, device_name, &config, &exe_network); +``` ## Performance Counters @@ -77,4 +89,4 @@ prob EXECUTED layerType: SoftMax realT The `execType` column of the table includes inference primitives with specific suffixes. -[bf16_format]: img/bf16_format.png \ No newline at end of file +[bf16_format]: img/bf16_format.png diff --git a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md index 42eda8f83c0fa4..9717b08f1c427d 100644 --- a/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md +++ b/docs/IE_DG/Extensibility_DG/AddingNGraphOps.md @@ -1,4 +1,4 @@ -# Add Custom nGraph Operations {#openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps} +# Custom nGraph Operation {#openvino_docs_IE_DG_Extensibility_DG_AddingNGraphOps} Inference Engine Extension API allows to register operation sets (opsets) with custom nGraph operations, it allows to support Networks with unknown operations. @@ -71,10 +71,9 @@ nGraph provides opsets mechanism for operation versioning. Different opsets dist When specifying opset names, follow the rules below: * Use unique opset names. -* Do not use the following built-in opset names: `extension`, `experimental`, `opset1`, `opest2`. +* Do not use the following built-in opset names: `extension`, `experimental`, `opset1`, `opset2`, `opset3`, ... , `opsetN`. * Make sure that the Model Optimizer and your extension use the same opset names. -* IR v10 layers have the mandatory `version` attribute specifying the opset. -* `opset1` is the name of default operations set. +* IR v10 operations have the mandatory `version` attribute specifying the opset. Operations from the default opset cannot be redefined. Use a custom opset to create a new operation or extend functionality of an existing operation from another opset. diff --git a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md index 205ae64a6e1825..0e2adca76a8775 100644 --- a/docs/IE_DG/Extensibility_DG/CPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/CPU_Kernel.md @@ -1,4 +1,4 @@ -# How to Implement Custom CPU Layers {#openvino_docs_IE_DG_Extensibility_DG_CPU_Kernel} +# How to Implement Custom CPU Operations {#openvino_docs_IE_DG_Extensibility_DG_CPU_Kernel} The primary vehicle for the performance of the CPU codepath in the Inference Engine is the Intel® Math Kernel Library for Deep Neural Networks (Intel® MKL-DNN), and new CPU kernels extend the Inference Engine plugin for the Intel MKL-DNN. Implementing the InferenceEngine::ILayerExecImpl defines a general CPU-side extension. There are no Intel MKL-DNN specifics in the way you need to implement a kernel. diff --git a/docs/IE_DG/Extensibility_DG/Extension.md b/docs/IE_DG/Extensibility_DG/Extension.md index 6df3a1424ec0e4..69bb614e605681 100644 --- a/docs/IE_DG/Extensibility_DG/Extension.md +++ b/docs/IE_DG/Extensibility_DG/Extension.md @@ -1,7 +1,10 @@ # Extension Library {#openvino_docs_IE_DG_Extensibility_DG_Extension} Inference Engine provides an InferenceEngine::IExtension interface, which defines the interface for Inference Engine Extension libraries. -All extension libraries should be inherited from this interface. +All extension libraries should be inherited from this interface. The example below contains implementation of two operations: `Template` +used as an example in this document and `FFT` used as a more complex example from the [Custom Operations Guide](../../HOWTO/Custom_Layers_Guide.md). + +> **NOTE**: `FFT` operation is implemented using OpenCV library functions `cv::dft` and `cv::idft`. Based on that, declaration of an extension class can look as follows: diff --git a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md index a918076e756112..59c0f070cf0693 100644 --- a/docs/IE_DG/Extensibility_DG/GPU_Kernel.md +++ b/docs/IE_DG/Extensibility_DG/GPU_Kernel.md @@ -1,16 +1,16 @@ -# How to Implement Custom GPU Layers {#openvino_docs_IE_DG_Extensibility_DG_GPU_Kernel} +# How to Implement Custom GPU Operations {#openvino_docs_IE_DG_Extensibility_DG_GPU_Kernel} -The GPU codepath abstracts many details about OpenCL™. You need to provide the kernel code in OpenCL C and the configuration file that connects the kernel and its parameters to the parameters of the layer. +The GPU codepath abstracts many details about OpenCL™. You need to provide the kernel code in OpenCL C and the configuration file that connects the kernel and its parameters to the parameters of the operation. -There are two options of using custom layer configuration file: +There are two options of using custom operation configuration file: * Include a section with your kernels into the global automatically-loaded `cldnn_global_custom_kernels/cldnn_global_custom_kernels.xml` file, which is hosted in the `/deployment_tools/inference_engine/bin/intel64/{Debug/Release}` folder -* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom layers to the plugin: +* Call the `InferenceEngine::Core::SetConfig()` method from your application with the `InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE` key and the configuration file name as a value before loading the network that uses custom operations to the plugin: @snippet snippets/GPU_Kernel.cpp part0 All Inference Engine samples, except trivial `hello_classification`, -feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom layers for the classification sample, run the command below: +feature a dedicated command-line option `-c` to load custom kernels. For example, to load custom operations for the classification sample, run the command below: ```sh $ ./classification_sample -m /bvlc_alexnet_fp16.xml -i ./validation_set/daily/227x227/apron.bmp -d GPU -c /custom_layer_example.xml @@ -19,7 +19,7 @@ $ ./classification_sample -m /bvlc_alexnet_fp16.xml -i ./validati ## Configuration File Format The configuration file is expected to follow the `.xml` file structure -with a node of the type `CustomLayer` for every custom layer you provide. +with a node of the type `CustomLayer` for every custom operation you provide. The definitions described in the sections below use the following notations: @@ -32,14 +32,13 @@ Notation | Description ### CustomLayer Node and Sub-node Structure -`CustomLayer` node contains the entire configuration for a single custom -layer. +`CustomLayer` node contains the entire configuration for a single custom operation. | Attribute Name |\# | Description | |-----|-----|-----| -| `name` | (1) | The name of the layer type to be used. This name should be identical to the type used in the IR.| -| `type` | (1) | Must be `SimpleGPU`. | -| `version` | (1) | Must be `1`. | +| `name` | (1) | The name of the operation type to be used. This name should be identical to the type used in the IR.| +| `type` | (1) | Must be `SimpleGPU`. | +| `version` | (1) | Must be `1`. | **Sub-nodes**: `Kernel` (1), `Buffers` (1), `CompilerOptions` (0+), `WorkSizes` (0/1) @@ -69,9 +68,9 @@ the sources during compilation (JIT). | Attribute Name | \# | Description | |------|-------|------| | `name` | (1) | The name of the defined JIT. For static constants, this can include the value as well (taken as a string). | -| `param` | (0/1) | This parameter value is used as the value of this JIT definition. | +| `param` | (0/1) | This parameter value is used as the value of this JIT definition. | | `type` | (0/1) | The parameter type. Accepted values: `int`, `float`, and `int[]`, `float[]` for arrays. | -| `default` | (0/1) | The default value to be used if the specified parameters is missing from the layer in the IR. | +| `default` | (0/1) | The default value to be used if the specified parameters is missing from the operation in the IR. | **Sub-nodes:** None @@ -92,7 +91,7 @@ weights or biases). | Attribute Name | \# | Description | |----|-----|------| -| `name` | (1) | Name of a blob attached to a layer in the IR | +| `name` | (1) | Name of a blob attached to a operation in the IR | | `arg-index` | (1) | 0-based index in the entry function arguments to be bound to | **Sub-nodes**: None @@ -105,7 +104,7 @@ weights or biases). |------|-------|-------| | `arg-index` | (1) | 0-based index in the entry function arguments to be bound to. | | `type` | (1) | `input` or `output` | -| `port-index` | (1) | 0-based index in the layer’s input/output ports in the IR | +| `port-index` | (1) | 0-based index in the operation input/output ports in the IR | | `format` | (0/1) | Data layout declaration for the tensor. Accepted values: `BFYX`, `BYXF`, `YXFB`, `FYXB` (also in all lowercase). Default value: `BFYX` | ### CompilerOptions Node and Sub-node Structure @@ -178,7 +177,7 @@ For an example, see [Example Kernel](#example-kernel). | `_PITCHES_SIZE`| The size of the `_PITCHES` array | | `_OFFSET`| The number of elements from the start of the tensor to the first valid element (bypassing the lower padding) | All `` values are automatically defined for every tensor -bound to this layer (`INPUT0`, `INPUT1`, `OUTPUT0`, and so on), as shown +bound to this operation (`INPUT0`, `INPUT1`, `OUTPUT0`, and so on), as shown in the following for example: ```sh diff --git a/docs/IE_DG/Extensibility_DG/Intro.md b/docs/IE_DG/Extensibility_DG/Intro.md index b5d90cba061ad3..06d030fc710294 100644 --- a/docs/IE_DG/Extensibility_DG/Intro.md +++ b/docs/IE_DG/Extensibility_DG/Intro.md @@ -2,19 +2,22 @@ Inference Engine Extensibility API allows to add support of custom operations to the Inference Engine. Extension should contain operation sets with custom operations and execution kernels for custom operations. -Physically, an extension library can be represented as a dynamic library exporting the single `CreateExtension` function that allows to create a new extension instance. +Physically, an extension library can be represented as a dynamic library exporting the single `CreateExtension` function +that allows to create a new extension instance. -Extensibility library can be loaded to the InferenceEngine::Core object using the InferenceEngine::Core::AddExtension method. +Extensibility library can be loaded to the `InferenceEngine::Core` object using the +`InferenceEngine::Core::AddExtension` method. ## Inference Engine Extension Library -Inference Engine Extension dynamic library contains several main components: +Inference Engine Extension dynamic library contains several components: - * [Extension class](Extension.md): + * [Extension Library](Extension.md): - Contains custom operation sets - Provides CPU implementations for custom operations - * [Custom operations](Intro.md): - - Allows to use InferenceEngine::Core::ReadNetwork to read Intermediate Representation (IR) with unsupported operations + * [Custom nGraph Operation](AddingNGraphOps.md): + - Allows to use `InferenceEngine::Core::ReadNetwork` to read Intermediate Representation (IR) with unsupported + operations - Allows to create `ngraph::Function` with unsupported operations - Provides shape inference mechanism for custom operations @@ -26,13 +29,13 @@ at `/docs/template_extension`. The Inference Engine workflow involves the creation of custom kernels and either custom or existing operations. -An _Operation_ is a Network building block implemented in the training framework, for example, `Convolution` in Caffe*. +An _Operation_ is a network building block implemented in the training framework, for example, `Convolution` in Caffe*. A _Kernel_ is defined as the corresponding implementation in the Inference Engine. -Refer to the [Custom Layers in the Model Optimizer](../../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) section for details on how -mapping between framework layers and Inference Engine kernels is registered. +Refer to the [Model Optimizer Extensibility](../../MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) +for details on how a mapping between framework operations and Inference Engine kernels is registered. -In short, you can plug your own kernel implementations into the Inference Engine and map them to the layers in the original framework. +In short, you can plug your own kernel implementations into the Inference Engine and map them to the operations in the original framework. The following pages describe how to integrate custom _kernels_ into the Inference Engine: diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 224b235060e425..245fa68e900e80 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -35,6 +35,8 @@ Inference Engine sample applications include the following: - [Object Detection for SSD C Sample](../../inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/README.md) - [Object Detection for SSD Python* Sample](../../inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md) +> **NOTE**: All samples support input paths containing only ASCII characters, except the Hello Classification Sample, that supports Unicode. + ## Media Files Available for Samples To run the sample applications, you can use images and videos from the media files collection available at https://github.com/intel-iot-devkit/sample-videos. @@ -125,6 +127,63 @@ You can also build a generated solution manually. For example, if you want to bu Microsoft Visual Studio and open the generated solution file from the `C:\Users\\Documents\Intel\OpenVINO\inference_engine_cpp_samples_build\Samples.sln` directory. +### Build the Sample Applications on macOS* + +The officially supported macOS* build environment is the following: + +* macOS* 10.15 64-bit +* Clang* compiler from Xcode* 10.1 or higher +* CMake* version 3.13 or higher + +> **NOTE**: For building samples from the open-source version of OpenVINO™ toolkit, see the [build instructions on GitHub](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode). + +To build the C or C++ sample applications for macOS, go to the `/inference_engine/samples/c` or `/inference_engine/samples/cpp` directory, respectively, and run the `build_samples.sh` script: +```sh +build_samples.sh +``` + +Once the build is completed, you can find sample binaries in the following folders: +* C samples: `~/inference_engine_c_samples_build/intel64/Release` +* C++ samples: `~/inference_engine_cpp_samples_build/intel64/Release` + +You can also build the sample applications manually: + +> **NOTE**: If you have installed the product as a root user, switch to root mode before you continue: `sudo -i` + +> **NOTE**: Before proceeding, make sure you have OpenVINO™ environment set correctly. This can be done manually by +```sh +cd /bin +source setupvars.sh +``` + +1. Navigate to a directory that you have write access to and create a samples build directory. This example uses a directory named `build`: +```sh +mkdir build +``` +> **NOTE**: If you ran the Image Classification verification script during the installation, the C++ samples build directory was already created in your home directory: `~/inference_engine_samples_build/` + +2. Go to the created directory: +```sh +cd build +``` + +3. Run CMake to generate the Make files for release or debug configuration. For example, for C++ samples: + - For release configuration: + ```sh + cmake -DCMAKE_BUILD_TYPE=Release /inference_engine/samples/cpp + ``` + - For debug configuration: + ```sh + cmake -DCMAKE_BUILD_TYPE=Debug /inference_engine/samples/cpp + ``` +4. Run `make` to build the samples: +```sh +make +``` + +For the release configuration, the sample application binaries are in `/intel64/Release/`; +for the debug configuration — in `/intel64/Debug/`. + ## Get Ready for Running the Sample Applications ### Get Ready for Running the Sample Applications on Linux* diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md index 41e1b1dd1b08e8..41e8711e366acb 100644 --- a/docs/IE_DG/inference_engine_intro.md +++ b/docs/IE_DG/inference_engine_intro.md @@ -62,13 +62,13 @@ The table below shows the plugin libraries and additional dependencies for Linux | Plugin | Library name for Linux | Dependency libraries for Linux | Library name for Windows | Dependency libraries for Windows | Library name for macOS | Dependency libraries for macOS | |--------|-----------------------------|-------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------|------------------------------|---------------------------------------------| -| CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` | `libMKLDNNPlugin.dylib` | `inference_engine_lp_transformations.dylib` | +| CPU | `libMKLDNNPlugin.so` | `libinference_engine_lp_transformations.so` | `MKLDNNPlugin.dll` | `inference_engine_lp_transformations.dll` | `libMKLDNNPlugin.so` | `inference_engine_lp_transformations.dylib` | | GPU | `libclDNNPlugin.so` | `libinference_engine_lp_transformations.so`, `libOpenCL.so` | `clDNNPlugin.dll` | `OpenCL.dll`, `inference_engine_lp_transformations.dll` | Is not supported | - | -| MYRIAD | `libmyriadPlugin.so` | `libusb.so`, | `myriadPlugin.dll` | `usb.dll` | `libmyriadPlugin.dylib` | `libusb.dylib` | +| MYRIAD | `libmyriadPlugin.so` | `libusb.so`, | `myriadPlugin.dll` | `usb.dll` | `libmyriadPlugin.so` | `libusb.dylib` | | HDDL | `libHDDLPlugin.so` | `libbsl.so`, `libhddlapi.so`, `libmvnc-hddl.so` | `HDDLPlugin.dll` | `bsl.dll`, `hddlapi.dll`, `json-c.dll`, `libcrypto-1_1-x64.dll`, `libssl-1_1-x64.dll`, `mvnc-hddl.dll` | Is not supported | - | | GNA | `libGNAPlugin.so` | `libgna.so`, | `GNAPlugin.dll` | `gna.dll` | Is not supported | - | -| HETERO | `libHeteroPlugin.so` | Same as for selected plugins | `HeteroPlugin.dll` | Same as for selected plugins | `libHeteroPlugin.dylib` | Same as for selected plugins | -| MULTI | `libMultiDevicePlugin.so` | Same as for selected plugins | `MultiDevicePlugin.dll` | Same as for selected plugins | `libMultiDevicePlugin.dylib` | Same as for selected plugins | +| HETERO | `libHeteroPlugin.so` | Same as for selected plugins | `HeteroPlugin.dll` | Same as for selected plugins | `libHeteroPlugin.so` | Same as for selected plugins | +| MULTI | `libMultiDevicePlugin.so` | Same as for selected plugins | `MultiDevicePlugin.dll` | Same as for selected plugins | `libMultiDevicePlugin.so` | Same as for selected plugins | > **NOTE**: All plugin libraries also depend on core Inference Engine libraries. diff --git a/docs/IE_DG/supported_plugins/GNA.md b/docs/IE_DG/supported_plugins/GNA.md index d40db457abc05d..3a1bada28ba68c 100644 --- a/docs/IE_DG/supported_plugins/GNA.md +++ b/docs/IE_DG/supported_plugins/GNA.md @@ -2,98 +2,98 @@ ## Introducing the GNA Plugin -Intel® Gaussian & Neural Accelerator is a low-power neural coprocessor for continuous inference at the edge. +Intel® Gaussian & Neural Accelerator is a low-power neural coprocessor for continuous inference at the edge. -Intel® GNA is not intended to replace classic inference devices such as -CPU, graphics processing unit (GPU), or vision processing unit (VPU) . It is designed for offloading +Intel® GNA is not intended to replace classic inference devices such as +CPU, graphics processing unit (GPU), or vision processing unit (VPU). It is designed for offloading continuous inference workloads including but not limited to noise reduction or speech recognition to save power and free CPU resources. -The GNA plugin provides a way to run inference on Intel® GNA, as well as in the software execution mode on CPU. +The GNA plugin provides a way to run inference on Intel® GNA, as well as in the software execution mode on CPU. -## Devices with Intel® GNA +## Devices with Intel® GNA -Devices with Intel® GNA support: +Devices with Intel® GNA support: -* [Intel® Speech Enabling Developer Kit](https://www.intel.com/content/www/us/en/support/articles/000026156/boards-and-kits/smart-home.html) +* [Intel® Speech Enabling Developer Kit](https://www.intel.com/content/www/us/en/support/articles/000026156/boards-and-kits/smart-home.html) -* [Amazon Alexa* Premium Far-Field Developer Kit](https://developer.amazon.com/en-US/alexa/alexa-voice-service/dev-kits/amazon-premium-voice) +* [Amazon Alexa\* Premium Far-Field Developer Kit](https://developer.amazon.com/en-US/alexa/alexa-voice-service/dev-kits/amazon-premium-voice) -* [Intel® Pentium® Silver Processors N5xxx, J5xxx and Intel® Celeron® Processors N4xxx, J4xxx](https://ark.intel.com/content/www/us/en/ark/products/codename/83915/gemini-lake.html): - - Intel® Pentium® Silver J5005 Processor - - Intel® Pentium® Silver N5000 Processor - - Intel® Celeron® J4005 Processor - - Intel® Celeron® J4105 Processor - - Intel® Celeron® Processor N4100 - - Intel® Celeron® Processor N4000 +* [Intel® Pentium® Silver Processors N5xxx, J5xxx and Intel® Celeron® Processors N4xxx, J4xxx](https://ark.intel.com/content/www/us/en/ark/products/codename/83915/gemini-lake.html): + - Intel® Pentium® Silver J5005 Processor + - Intel® Pentium® Silver N5000 Processor + - Intel® Celeron® J4005 Processor + - Intel® Celeron® J4105 Processor + - Intel® Celeron® Processor N4100 + - Intel® Celeron® Processor N4000 -* [Intel® Core™ Processors (formerly codenamed Cannon Lake)](https://ark.intel.com/content/www/us/en/ark/products/136863/intel-core-i3-8121u-processor-4m-cache-up-to-3-20-ghz.html): -Intel® Core™ i3-8121U Processor +* [Intel® Core™ Processors (formerly codenamed Cannon Lake)](https://ark.intel.com/content/www/us/en/ark/products/136863/intel-core-i3-8121u-processor-4m-cache-up-to-3-20-ghz.html): +Intel® Core™ i3-8121U Processor -* [10th Generation Intel® Core™ Processors (formerly codenamed Ice Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/74979/ice-lake.html): - - Intel® Core™ i7-1065G7 Processor - - Intel® Core™ i7-1060G7 Processor - - Intel® Core™ i5-1035G4 Processor - - Intel® Core™ i5-1035G7 Processor - - Intel® Core™ i5-1035G1 Processor - - Intel® Core™ i5-1030G7 Processor - - Intel® Core™ i5-1030G4 Processor - - Intel® Core™ i3-1005G1 Processor - - Intel® Core™ i3-1000G1 Processor - - Intel® Core™ i3-1000G4 Processor +* [10th Generation Intel® Core™ Processors (formerly codenamed Ice Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/74979/ice-lake.html): + - Intel® Core™ i7-1065G7 Processor + - Intel® Core™ i7-1060G7 Processor + - Intel® Core™ i5-1035G4 Processor + - Intel® Core™ i5-1035G7 Processor + - Intel® Core™ i5-1035G1 Processor + - Intel® Core™ i5-1030G7 Processor + - Intel® Core™ i5-1030G4 Processor + - Intel® Core™ i3-1005G1 Processor + - Intel® Core™ i3-1000G1 Processor + - Intel® Core™ i3-1000G4 Processor -* All [11th Generation Intel® Core™ Processors (formerly codenamed Tiger Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/88759/tiger-lake.html). +* All [11th Generation Intel® Core™ Processors (formerly codenamed Tiger Lake)](https://ark.intel.com/content/www/us/en/ark/products/codename/88759/tiger-lake.html). -> **NOTE**: On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only. +> **NOTE**: On platforms where Intel® GNA is not enabled in the BIOS, the driver cannot be installed, so the GNA plugin uses the software emulation mode only. ## Drivers and Dependencies -Intel® GNA hardware requires a driver to be installed on the system. +Intel® GNA hardware requires a driver to be installed on the system. * Linux\* OS: -[Download Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.0+)](https://download.01.org/opencv/drivers/gna/) +[Download Intel® GNA driver for Ubuntu Linux 18.04.3 LTS (with HWE Kernel version 5.0+)](https://download.01.org/opencv/drivers/gna/) * Windows\* OS: -Intel® GNA driver for Windows is available through Windows Update\* +Intel® GNA driver for Windows is available through Windows Update\* ## Models and Layers Limitations -Because of specifics of hardware architecture, Intel® GNA supports a limited set of layers, their kinds and combinations. -For example, you should not expect the GNA Plugin to be able to run computer vision models, except those specifically adapted for the GNA Plugin, because the plugin does not fully support -2D convolutions. +Because of specifics of hardware architecture, Intel® GNA supports a limited set of layers, their kinds and combinations. +For example, you should not expect the GNA Plugin to be able to run computer vision models, except those specifically adapted +for the GNA Plugin, because the plugin does not fully support 2D convolutions. + +For the list of supported layers, see the **GNA** column of the **Supported Layers** section in [Supported Devices](Supported_Devices.md). -The list of supported layers can be found -[here](Supported_Devices.md) (see the GNA column of Supported Layers section). Limitations include: - Only 1D convolutions are natively supported in the models converted from: - - [Kaldi](../../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md) framework; - - [TensorFlow](../../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) framework; note that for TensorFlow models, the option `--disable_nhwc_to_nchw` must be used when running the Model Optimizer. -- The number of output channels for convolutions must be a multiple of 4 -- Permute layer support is limited to the cases where no data reordering is needed, or when reordering is happening for 2 dimensions, at least one of which is not greater than 8 + - [Kaldi](../../MO_DG/prepare_model/convert_model/Convert_Model_From_Kaldi.md) framework + - [TensorFlow](../../MO_DG/prepare_model/convert_model/Convert_Model_From_TensorFlow.md) framework. For TensorFlow models, use the `--disable_nhwc_to_nchw` option when running the Model Optimizer. +- The number of output channels for convolutions must be a multiple of 4. +- Permute layer support is limited to the cases where no data reordering is needed or when reordering is happening for two dimensions, at least one of which is not greater than 8. #### Experimental Support for 2D Convolutions -The Intel® GNA hardware natively supports only 1D convolution. +The Intel® GNA hardware natively supports only 1D convolution. -However, 2D convolutions can be mapped to 1D when a convolution kernel moves in a single direction. Such a transformation is performed by the GNA Plugin for Kaldi `nnet1` convolution. From this perspective, the Intel® GNA hardware convolution operation accepts a `NHWC` input and produces `NHWC` output. Because OpenVINO™ only supports the `NCHW` layout, it may be necessary to insert `Permute` layers before or after convolutions. +However, 2D convolutions can be mapped to 1D when a convolution kernel moves in a single direction. GNA Plugin performs such a transformation for Kaldi `nnet1` convolution. From this perspective, the Intel® GNA hardware convolution operation accepts an `NHWC` input and produces an `NHWC` output. Because OpenVINO™ only supports the `NCHW` layout, you may need to insert `Permute` layers before or after convolutions. -For example, the Kaldi model optimizer inserts such a permute after convolution for the [rm_cnn4a network](https://download.01.org/openvinotoolkit/models_contrib/speech/kaldi/rm_cnn4a_smbr/). This `Permute` layer is automatically removed by the GNA Plugin, because the Intel® GNA hardware convolution layer already produces the required `NHWC` result. +For example, the Kaldi model optimizer inserts such a permute after convolution for the [rm_cnn4a network](https://download.01.org/openvinotoolkit/models_contrib/speech/kaldi/rm_cnn4a_smbr/). This `Permute` layer is automatically removed by the GNA Plugin, because the Intel® GNA hardware convolution layer already produces the required `NHWC` result. ## Operation Precision -Intel® GNA essentially operates in the low-precision mode, which represents a mix of 8-bit (`I8`), 16-bit (`I16`), and 32-bit (`I32`) integer computations, so compared to 32-bit floating point (`FP32`) results – for example, calculated on CPU using Inference Engine [CPU Plugin](CPU.md) – outputs calculated using reduced integer precision are different from the scores calculated using floating point. +Intel® GNA essentially operates in the low-precision mode, which represents a mix of 8-bit (`I8`), 16-bit (`I16`), and 32-bit (`I32`) integer computations. Outputs calculated using a reduced integer precision are different from the scores calculated using the floating point format, for example, `FP32` outputs calculated on CPU using the Inference Engine [CPU Plugin](CPU.md). -Unlike other plugins supporting low-precision execution, the GNA plugin calculates quantization factors at the model loading time, so a model can run without calibration. +Unlike other plugins supporting low-precision execution, the GNA plugin calculates quantization factors at the model loading time, so you can run a model without calibration. -## Execution Modes +## Execution Modes | Mode | Description | | :---------------------------------| :---------------------------------------------------------| -| `GNA_AUTO` | Uses Intel® GNA if available, otherwise uses software execution mode on CPU. | -| `GNA_HW` | Uses Intel® GNA if available, otherwise raises an error. | -| `GNA_SW` | *Deprecated*. Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA, but not in the bit-exact mode. | -| `GNA_SW_EXACT` | Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA in the bit-exact mode. | +| `GNA_AUTO` | Uses Intel® GNA if available, otherwise uses software execution mode on CPU. | +| `GNA_HW` | Uses Intel® GNA if available, otherwise raises an error. | +| `GNA_SW` | *Deprecated*. Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA, but not in the bit-exact mode. | +| `GNA_SW_EXACT` | Executes the GNA-compiled graph on CPU performing calculations in the same precision as the Intel® GNA in the bit-exact mode. | | `GNA_SW_FP32` | Executes the GNA-compiled graph on CPU but substitutes parameters and calculations from low precision to floating point (`FP32`). | ## Supported Configuration Parameters @@ -101,42 +101,42 @@ Unlike other plugins supporting low-precision execution, the GNA plugin calculat The plugin supports the configuration parameters listed below. The parameters are passed as `std::map` on `InferenceEngine::Core::LoadNetwork` or `InferenceEngine::SetConfig`. -The parameter `KEY_GNA_DEVICE_MODE` can also be changed at run time using `InferenceEngine::ExecutableNetwork::SetConfig` (for any values excluding `GNA_SW_FP32`). This allows switching the +You can change the `KEY_GNA_DEVICE_MODE` parameter at run time using `InferenceEngine::ExecutableNetwork::SetConfig`, which works for any value excluding `GNA_SW_FP32`. This enables you to switch the execution between software emulation mode and hardware emulation mode after the model is loaded. The parameter names below correspond to their usage through API keys, such as `GNAConfigParams::KEY_GNA_DEVICE_MODE` or `PluginConfigParams::KEY_PERF_COUNT`. -When specifying key values as raw strings (that is, when using Python API), omit the `KEY_` prefix. +When specifying key values as raw strings, that is, when using Python API, omit the `KEY_` prefix. | Parameter Name | Parameter Values | Default Value | Description | | :---------------------------------| :---------------------------------------------------------| :-----------| :------------------------------------------------------------------------| -| `KEY_GNA_COMPACT_MODE` | `YES`/`NO` | `NO` | Reuse I/O buffers to save space (makes debugging harder) | -| `KEY_GNA_SCALE_FACTOR` | `FP32` number | 1.0 | Scale factor to use for input quantization | -| `KEY_GNA_DEVICE_MODE` | `GNA_AUTO`/`GNA_HW`/`GNA_SW_EXACT`/`GNA_SW_FP32` | `GNA_AUTO` | One of the modes described Execution Models | -| `KEY_GNA_FIRMWARE_MODEL_IMAGE` | `std::string` | `""` | Name for embedded model binary dump file | -| `KEY_GNA_PRECISION` | `I16`/`I8` | `I16` | Hint to GNA plugin: preferred integer weight resolution for quantization | -| `KEY_PERF_COUNT` | `YES`/`NO` | `NO` | Turn on performance counters reporting | -| `KEY_GNA_LIB_N_THREADS` | 1-127 integer number | 1 | Sets the number of GNA accelerator library worker threads used for inference computation in software modes +| `KEY_GNA_COMPACT_MODE` | `YES`/`NO` | `NO` | Enables I/O buffers reuse to save space. Makes debugging harder. | +| `KEY_GNA_SCALE_FACTOR` | `FP32` number | 1.0 | Sets the scale factor to use for input quantization. | +| `KEY_GNA_DEVICE_MODE` | `GNA_AUTO`/`GNA_HW`/`GNA_SW_EXACT`/`GNA_SW_FP32` | `GNA_AUTO` | One of the modes described in Execution Modes | +| `KEY_GNA_FIRMWARE_MODEL_IMAGE` | `std::string` | `""` | Sets the name for the embedded model binary dump file. | +| `KEY_GNA_PRECISION` | `I16`/`I8` | `I16` | Sets the preferred integer weight resolution for quantization. | +| `KEY_PERF_COUNT` | `YES`/`NO` | `NO` | Turns on performance counters reporting. | +| `KEY_GNA_LIB_N_THREADS` | 1-127 integer number | 1 | Sets the number of GNA accelerator library worker threads used for inference computation in software modes. ## How to Interpret Performance Counters As a result of collecting performance counters using `InferenceEngine::InferRequest::GetPerformanceCounts`, you can find various performance data about execution on GNA. -Returned map stores a counter description as a key, counter value is stored in the `realTime_uSec` field of the `InferenceEngineProfileInfo` structure. Current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. API allows to retrieve counter units in cycles, but they can be converted to seconds as follows: +Returned map stores a counter description as a key, and a counter value in the `realTime_uSec` field of the `InferenceEngineProfileInfo` structure. Current GNA implementation calculates counters for the whole utterance scoring and does not provide per-layer information. The API enables you to retrieve counter units in cycles, you can convert cycles to seconds as follows: ``` seconds = cycles / frequency ``` -Refer to the table below to learn about the frequency of Intel® GNA inside a particular processor. -Processor | Frequency of Intel® GNA +Refer to the table below to learn about the frequency of Intel® GNA inside a particular processor. +Processor | Frequency of Intel® GNA ---|--- -Intel® Ice Lake processors| 400MHz -Intel® Core™ i3-8121U processor| 400MHz -Intel® Gemini Lake processors | 200MHz +Intel® Ice Lake processors| 400MHz +Intel® Core™ i3-8121U processor| 400MHz +Intel® Gemini Lake processors | 200MHz Performance counters provided for the time being: * Scoring request performance results - * Number of total cycles spent on scoring in hardware (including compute and memory stall cycles) + * Number of total cycles spent on scoring in hardware including compute and memory stall cycles * Number of stall cycles spent in hardware ## Multithreading Support in GNA Plugin @@ -151,40 +151,40 @@ The GNA plugin supports the following configuration parameters for multithreadin ## Network Batch Size -Intel® GNA plugin supports the processing of context-windowed speech frames in batches of 1-8 frames in one +Intel® GNA plugin supports the processing of context-windowed speech frames in batches of 1-8 frames in one input blob using `InferenceEngine::ICNNNetwork::setBatchSize`. Increasing batch size only improves efficiency of `Fully Connected` layers. > **NOTE**: For networks with `Convolutional`, `LSTM`, or `Memory` layers, the only supported batch size is 1. ## Compatibility with Heterogeneous Plugin -Heterogeneous plugin was tested with the Intel® GNA as a primary device and CPU as a secondary device. To run inference of networks with layers unsupported by the GNA plugin (for example, Softmax), use the Heterogeneous plugin with the `HETERO:GNA,CPU` configuration. For the list of supported networks, see the [Supported Frameworks](#supported-frameworks). +Heterogeneous plugin was tested with the Intel® GNA as a primary device and CPU as a secondary device. To run inference of networks with layers unsupported by the GNA plugin, such as Softmax, use the Heterogeneous plugin with the `HETERO:GNA,CPU` configuration. -> **NOTE:** Due to limitation of the Intel® GNA backend library, heterogeneous support is limited to cases where in the resulted sliced graph, only one subgraph is scheduled to run on GNA\_HW or GNA\_SW devices. +> **NOTE:** Due to limitation of the Intel® GNA backend library, heterogenous support is limited to cases where in the resulted sliced graph, only one subgraph is scheduled to run on GNA\_HW or GNA\_SW devices. -## Recovery from interruption by high-priority Windows audio processes\* +## Recovery from Interruption by High-Priority Windows Audio Processes\* -As noted in the introduction, GNA is designed for real-time workloads such as noise reduction. +GNA is designed for real-time workloads such as noise reduction. For such workloads, processing should be time constrained, otherwise extra delays may cause undesired effects such as -audio "glitches". To make sure that processing can satisfy real time requirements, the GNA driver provides a QoS -(Quality of Service) mechanism which interrupts requests that might cause high-priority Windows audio processes to miss -schedule, thereby causing long running GNA tasks to terminate early. +*audio glitches*. To make sure that processing can satisfy real-time requirements, the GNA driver provides a Quality of Service +(QoS) mechanism, which interrupts requests that might cause high-priority Windows audio processes to miss +the schedule, thereby causing long running GNA tasks to terminate early. Applications should be prepared for this situation. -If an inference (in `GNA_HW` mode) cannot be executed because of such an interruption, then `InferRequest::Wait()` will return status code -`StatusCode::INFER_NOT_STARTED` (note that it will be changed to a more meaningful status code in future releases). +If an inference in the `GNA_HW` mode cannot be executed because of such an interruption, then `InferRequest::Wait()` returns status code +`StatusCode::INFER_NOT_STARTED`. In future releases, it will be changed to a more meaningful status code. -Any application working with GNA must properly react if it receives this code. Various strategies are possible. -One of the options is to immediately switch to GNA SW emulation mode: +Any application working with GNA must properly react to this code. +One of the strategies to adapt an application: +1. Immediately switch to the GNA_SW emulation mode: ```cpp std::map newConfig; newConfig[GNAConfigParams::KEY_GNA_DEVICE_MODE] = Parameter("GNA_SW_EXACT"); executableNet.SetConfig(newConfig); ``` - -then resubmit and switch back to GNA_HW after some time hoping that the competing application has finished. +2. Resubmit and switch back to GNA_HW expecting that the competing application has finished. ## See Also diff --git a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md index 98de8d014145c7..8ce80da1d1579b 100644 --- a/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md +++ b/docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md @@ -77,7 +77,6 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi * [Converting DeepSpeech from TensorFlow](prepare_model/convert_model/tf_specific/Convert_DeepSpeech_From_Tensorflow.md) * [Converting Language Model on One Billion Word Benchmark from TensorFlow](prepare_model/convert_model/tf_specific/Convert_lm_1b_From_Tensorflow.md) * [Converting Neural Collaborative Filtering Model from TensorFlow*](prepare_model/convert_model/tf_specific/Convert_NCF_From_Tensorflow.md) - * [Converting TensorFlow* Object Detection API Models](prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md) * [Converting TensorFlow*-Slim Image Classification Model Library Models](prepare_model/convert_model/tf_specific/Convert_Slim_Library_Models.md) * [Converting CRNN Model from TensorFlow*](prepare_model/convert_model/tf_specific/Convert_CRNN_From_Tensorflow.md) @@ -91,17 +90,15 @@ Model Optimizer produces an Intermediate Representation (IR) of the network, whi * [Model Optimizations Techniques](prepare_model/Model_Optimization_Techniques.md) * [Cutting parts of the model](prepare_model/convert_model/Cutting_Model.md) * [Sub-graph Replacement in Model Optimizer](prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md) - * [(Deprecated) Case-Study: Converting SSD models created with the TensorFlow* Object Detection API](prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md) - * [(Deprecated) Case-Study: Converting Faster R-CNN models created with the TensorFlow* Object Detection API](prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md) * [Supported Framework Layers](prepare_model/Supported_Frameworks_Layers.md) * [Intermediate Representation and Operation Sets](IR_and_opsets.md) * [Operations Specification](../ops/opset.md) * [Intermediate Representation suitable for INT8 inference](prepare_model/convert_model/IR_suitable_for_INT8_inference.md) - - * [Custom Layers in Model Optimizer](prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) + * [Model Optimizer Extensibility](prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md) * [Extending Model Optimizer with New Primitives](prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md) + * [Extending Model Optimizer with Caffe Python Layers](prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md) + * [Extending Model Optimizer with Custom MXNet* Operations](prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md) * [Legacy Mode for Caffe* Custom Layers](prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md) - * [Model Optimizer Frequently Asked Questions](prepare_model/Model_Optimizer_FAQ.md) * [Known Issues](Known_Issues_Limitations.md) diff --git a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md index c5c6402eaf0bed..869cfa49d5e942 100644 --- a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md +++ b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md @@ -123,6 +123,7 @@ Standard TensorFlow\* operations: | :----------| :----------| | Acosh | No | | Add | No | +| AddV2 | No | | AddN | No | | ArgMax | No | | Asinh | No | @@ -158,6 +159,8 @@ Standard TensorFlow\* operations: | Floor | No | | FloorDiv | No | | FusedBatchNorm | No | +| FusedBatchNormV2 | No | +| FusedBatchNormV3 | No | | Gather | No | | GatherNd | No | | GatherV2 | No | @@ -338,6 +341,7 @@ Standard ONNX\* operators: | Floor | No | | GRU | No | | Gather | No | +| GatherElements | Doesn't work with negative indices | | GatherND | No | | GatherTree | No | | Gemm | No | diff --git a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md index b523897a773c57..2df7773b8ad57d 100644 --- a/docs/MO_DG/prepare_model/convert_model/Converting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Converting_Model.md @@ -38,5 +38,5 @@ Framework-specific parameters for: ## See Also * [Configuring the Model Optimizer](../Config_Model_Optimizer.md) * [IR Notation Reference](../../IR_and_opsets.md) -* [Custom Layers in Model Optimizer](../customize_model_optimizer/Customize_Model_Optimizer.md) -* [Model Cutting](Cutting_Model.md) \ No newline at end of file +* [Model Optimizer Extensibility](../customize_model_optimizer/Customize_Model_Optimizer.md) +* [Model Cutting](Cutting_Model.md) diff --git a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md index b208a5f5b5c307..a4bb4e98017276 100644 --- a/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md +++ b/docs/MO_DG/prepare_model/convert_model/Cutting_Model.md @@ -9,7 +9,6 @@ The following examples are the situations when model cutting is useful or even r * model has pre- or post-processing parts that cannot be translated to existing Inference Engine layers. * model has a training part that is convenient to be kept in the model, but not used during inference. * model is too complex (contains lots of unsupported operations that cannot be easily implemented as custom layers), so the complete model cannot be converted in one shot. -* model is one of the supported [SSD models](../customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md). In this case, you need to cut a post-processing part off. * problem with model conversion in the Model Optimizer or inference in the Inference Engine occurred. To localize the issue, limit the scope for conversion by iteratively searching for problematic places in the model. * single custom layer or a combination of custom layers is isolated for debugging purposes. diff --git a/docs/MO_DG/prepare_model/convert_model/Legacy_IR_Layers_Catalog_Spec.md b/docs/MO_DG/prepare_model/convert_model/Legacy_IR_Layers_Catalog_Spec.md index dc35ff0fba7271..abd997f36c3f2c 100644 --- a/docs/MO_DG/prepare_model/convert_model/Legacy_IR_Layers_Catalog_Spec.md +++ b/docs/MO_DG/prepare_model/convert_model/Legacy_IR_Layers_Catalog_Spec.md @@ -1582,9 +1582,9 @@ OI, which means that Input changes the fastest, then Output. **Mathematical Formulation** - \f[ - output[:, ... ,:, i, ... , j,:, ... ,:] = input2[:, ... ,:, input1[i, ... ,j],:, ... ,:] - \f] +\f[ + output[:, ... ,:, i, ... , j,:, ... ,:] = input2[:, ... ,:, input1[i, ... ,j],:, ... ,:] +\f] **Inputs** @@ -5086,7 +5086,9 @@ t \in \left ( 0, \quad tiles \right ) Output tensor is populated by values computes in the following way: - output[i1, ..., i(axis-1), j, i(axis+1) ..., iN] = top_k(input[i1, ...., i(axis-1), :, i(axis+1), ..., iN]), k, sort, mode) +\f[ +output[i1, ..., i(axis-1), j, i(axis+1) ..., iN] = top_k(input[i1, ...., i(axis-1), :, i(axis+1), ..., iN]), k, sort, mode) +\f] So for each slice `input[i1, ...., i(axis-1), :, i(axis+1), ..., iN]` which represents 1D array, top_k value is computed individually. Sorting and minimum/maximum are controlled by `sort` and `mode` attributes. diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md index d0e310088e091e..a7880220c94af6 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_Object_Detection_API_Models.md @@ -8,29 +8,33 @@ With 2018 R3 release, the Model Optimizer introduces a new approach to convert models created using the TensorFlow\* Object Detection API. Compared with the previous approach, the new process produces inference results with higher accuracy and does not require modifying any configuration files and providing intricate command line parameters. -You can download TensorFlow\* Object Detection API models from the Object Detection Model Zoo. +You can download TensorFlow\* Object Detection API models from the TensorFlow 1 Detection Model Zoo +or TensorFlow 2 Detection Model Zoo. NOTE: Before converting, make sure you have configured the Model Optimizer. For configuration steps, refer to [Configuring the Model Optimizer](../../Config_Model_Optimizer.md). To convert a TensorFlow\* Object Detection API model, go to the `/deployment_tools/model_optimizer` directory and run the `mo_tf.py` script with the following required parameters: -* `--input_model ` --- File with a pre-trained model (binary or text .pb file after freezing) +* `--input_model ` --- File with a pre-trained model (binary or text .pb file after freezing) OR `--saved_model_dir ` for the TensorFlow\* 2 models * `--transformations_config ` --- A subgraph replacement configuration file with transformations description. For the models downloaded from the TensorFlow\* Object Detection API zoo, you can find the configuration files in the `/deployment_tools/model_optimizer/extensions/front/tf` directory. Use: * `ssd_v2_support.json` --- for frozen SSD topologies from the models zoo version up to 1.13.X inclusively - * `ssd_support_api_v.1.14.json` --- for frozen SSD topologies trained manually using the TensorFlow* Object Detection API version 1.14 up to 1.14.X inclusively - * `ssd_support_api_v.1.15.json` --- for frozen SSD topologies trained manually using the TensorFlow* Object Detection API version 1.15 or higher + * `ssd_support_api_v.1.14.json` --- for frozen SSD topologies trained using the TensorFlow\* Object Detection API version 1.14 up to 1.14.X inclusively + * `ssd_support_api_v.1.15.json` --- for frozen SSD topologies trained using the TensorFlow\* Object Detection API version 1.15 up to 2.0 + * `ssd_support_api_v.2.0.json` --- for frozen SSD topologies trained using the TensorFlow\* Object Detection API version 2.0 or higher * `faster_rcnn_support.json` --- for frozen Faster R-CNN topologies from the models zoo - * `faster_rcnn_support_api_v1.7.json` --- for Faster R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.7.0 up to 1.9.X inclusively - * `faster_rcnn_support_api_v1.10.json` --- for Faster R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.10.0 up to 1.12.X inclusively - * `faster_rcnn_support_api_v1.13.json` --- for Faster R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.13.X - * `faster_rcnn_support_api_v1.14.json` --- for Faster R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.14.0 up to 1.14.X inclusively - * `faster_rcnn_support_api_v1.15.json` --- for Faster R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.15.0 or higher + * `faster_rcnn_support_api_v1.7.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.7.0 up to 1.9.X inclusively + * `faster_rcnn_support_api_v1.10.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.10.0 up to 1.12.X inclusively + * `faster_rcnn_support_api_v1.13.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.13.X + * `faster_rcnn_support_api_v1.14.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.14.0 up to 1.14.X inclusively + * `faster_rcnn_support_api_v1.15.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.15.0 up to 2.0 + * `faster_rcnn_support_api_v2.0.json` --- for Faster R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.0 or higher * `mask_rcnn_support.json` --- for frozen Mask R-CNN topologies from the models zoo - * `mask_rcnn_support_api_v1.7.json` --- for Mask R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.7.0 up to 1.9.X inclusively - * `mask_rcnn_support_api_v1.11.json` --- for Mask R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.11.0 up to 1.12.X inclusively - * `mask_rcnn_support_api_v1.13.json` --- for Mask R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.13.0 up to 1.13.X inclusively - * `mask_rcnn_support_api_v1.14.json` --- for Mask R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.14.0 up to 1.14.X inclusively - * `mask_rcnn_support_api_v1.15.json` --- for Mask R-CNN topologies trained manually using the TensorFlow* Object Detection API version 1.15.0 or higher + * `mask_rcnn_support_api_v1.7.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.7.0 up to 1.9.X inclusively + * `mask_rcnn_support_api_v1.11.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.11.0 up to 1.12.X inclusively + * `mask_rcnn_support_api_v1.13.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.13.0 up to 1.13.X inclusively + * `mask_rcnn_support_api_v1.14.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.14.0 up to 1.14.X inclusively + * `mask_rcnn_support_api_v1.15.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 1.15.0 up to 2.0 + * `mask_rcnn_support_api_v2.0.json` --- for Mask R-CNN topologies trained using the TensorFlow\* Object Detection API version 2.0 or higher * `rfcn_support.json` --- for the frozen RFCN topology from the models zoo frozen with TensorFlow\* version 1.9.0 or lower. * `rfcn_support_api_v1.10.json` --- for the frozen RFCN topology from the models zoo frozen with TensorFlow\* version 1.10.0 up to 1.12.X inclusively * `rfcn_support_api_v1.13.json` --- for the frozen RFCN topology from the models zoo frozen with TensorFlow\* version 1.13.X. diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md index b7288322441692..0073ac2f5490ca 100644 --- a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md +++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_YOLO_From_Tensorflow.md @@ -45,6 +45,10 @@ python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weig ```sh python3 convert_weights_pb.py --class_names coco.names --data_format NHWC --weights_file yolov3-tiny.weights --tiny ``` +At this step, you may receive a warning like `WARNING:tensorflow:Entity <...> could not be transformed and will be executed as-is.`. To workaround this issue, switch to gast 0.2.2 with the following command: +```sh +pip3 install --user gast==0.2.2 +``` If you have YOLOv3 weights trained for an input image with the size different from 416 (320, 608 or your own), please provide the `--size` key with the size of your image specified while running the converter. For example, run the following command for an image with size 608: ```sh diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md index 2eb6b1717a58f5..93c4c273f44ffc 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Customize_Model_Optimizer.md @@ -1,82 +1,1299 @@ -# Custom Layers in the Model Optimizer {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer} +# Model Optimizer Extensibility {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer} -Model Optimizer searches for each layer of the input model in the list of known layers before building the model's internal representation, optimizing the model, and producing the Intermediate Representation. +* [Model Representation in Memory](#model-representation-in-memory) +* [Model Conversion Pipeline](#model-conversion-pipeline) + * [Model Loading](#model-loading) + * [Operations Attributes Extracting](#operations-attributes-extracting) + * [Front Phase](#front-phase) + * [Partial Inference](#partial-inference) + * [Middle Phase](#middle-phase) + * [NHWC to NCHW Layout Change](#layout-change) + * [Back Phase](#back-phase) + * [Intermediate Representation Emitting](#ir-emitting) +* [Graph Traversal and Modification Using `Port`s and `Connection`s](#graph-ports-and-conneсtions) + * [Ports](#intro-ports) + * [Connections](#intro-connections) +* [Model Optimizer Extensions](#extensions) + * [Model Optimizer Operation](#extension-operation) + * [Operation Extractor](#operation-extractor) + * [Graph Transformation Extensions](#graph-transformations) + * [Front Phase Transformations](#front-phase-transformations) + * [Pattern-Defined Front Phase Transformations](#pattern-defined-front-phase-transformations) + * [Specific Operation Front Phase Transformations](#specific-operation-front-phase-transformations) + * [Generic Front Phase Transformations](#generic-front-phase-transformations) + * [Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformations) + * [Front Phase Transformations Using Start and End Points](#start-end-points-front-phase-transformations) + * [Generic Front Phase Transformations Enabled with Transformations Configuration File](#generic-transformations-config-front-phase-transformations) + * [Middle Phase Transformations](#middle-phase-transformations) + * [Pattern-Defined Middle Phase Transformations](#pattern-defined-middle-phase-transformations) + * [Generic Middle Phase Transformations](#generic-middle-phase-transformations) + * [Back Phase Transformations](#back-phase-transformations) + * [Pattern-Defined Back Phase Transformations](#pattern-defined-back-phase-transformations) + * [Generic Back Phase Transformations](#generic-back-phase-transformations) -The list of known layers is different for each of supported frameworks. To see the layers supported by your framework, refer to the [corresponding section](../Supported_Frameworks_Layers.md). +Model Optimizer extensibility mechanism allows to support new operations and custom transformations to generate the +optimized Intermediate Representation (IR) as described in the +[Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md). This +mechanism is a core part of the Model Optimizer and the Model Optimizer uses it under the hood, so the Model Optimizer +itself is a huge set of examples on how to add custom logic to support your model. -Custom layers are layers that are not included into a list of known layers. If your topology contains any layers that are not in the list of known layers, the Model Optimizer classifies them as custom. +There are several cases when the customization is needed: -## Caffe\* Models with Custom Layers +* A model contains operation(s) not known for the Model Optimizer, but these operation(s) could be expressed as a +combination of supported operations. In this case a custom transformation should be implemented to replace unsupported +operation(s) with supported ones. +* A model contains sub-graph of operations which can be replaced with a smaller number of operations to get the better +performance. This example corresponds to so called fusing transformations. For example, replace a sub-graph performing +the following calculation \f$x / (1.0 + e^{-(beta * x)})\f$ with a single operation of type +[Swish](../../../ops/activation/Swish_4.md). +* A model contains a custom framework operation (the operation which is not a part of an official operation set of the +framework) which was developed using the framework extensibility mechanism. In this case the Model Optimizer should know +how to handle the operation and generate a corresponding section in an IR for it. -You have two options if your Caffe\* model has custom layers: +It is necessary to figure out how the Model Optimizer represents a model in a memory and converts it to an IR before +going into details of the Model Optimizer extensibility mechanism. -* **Register the custom layers as extensions to the Model Optimizer**. For instructions, see [Extending Model Optimizer with New Primitives](Extending_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You only need to write a small chunk of Python\* code that lets the Model Optimizer: +> **NOTE**: All paths in this document are provided relatively to the Model Optimizer installation directory if not +> stated otherwise. - * Generate a valid Intermediate Representation according to the rules you specified - * Be independent from the availability of Caffe on your computer - -* **Register the custom layers as Custom and use the system Caffe to calculate the output shape of each Custom Layer**, which is required by the Intermediate Representation format. For this method, the Model Optimizer requires the Caffe Python interface on your system. When registering the custom layer in the `CustomLayersMapping.xml` file, you can specify if layer parameters should appear in Intermediate Representation or if they should be skipped. To read more about the expected format and general structure of this file, see [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md). This approach has several limitations: +## Model Representation in Memory +The model can be represented as a directed graph where nodes are operations and edges correspond to data passing from a +producer operation (node) to a consumer operation (node). - * If your layer output shape depends on dynamic parameters, input data or previous layers parameters, calculation of output shape of the layer via Caffe can be incorrect. In this case, you need to patch Caffe on your own. - - * If the calculation of output shape of the layer via Caffe fails inside the framework, Model Optimizer is unable to produce any correct Intermediate Representation and you also need to investigate the issue in the implementation of layers in the Caffe and patch it. - - * You are not able to produce Intermediate Representation on any machine that does not have Caffe installed. If you want to use Model Optimizer on multiple machines, your topology contains Custom Layers and you use `CustomLayersMapping.xml` to fallback on Caffe, you need to configure Caffe on each new machine. - - For these reasons, it is best to use the Model Optimizer extensions for Custom Layers: you do not depend on the framework and fully control the workflow. +Model Optimizer uses Python class `mo.graph.graph.Graph` instance to represent the computation graph in memory during +the model conversion. This class is inherited from `networkx.MultiDiGraph` class of the standard `networkx` Python +library and provides many convenient methods to traverse and modify the graph. Refer to the `mo/graph/graph.py` file for +the examples. -If your model contains Custom Layers, it is important to understand the internal workflow of Model Optimizer. Consider the following example. +Model Optimizer keeps all necessary information about the operation in a node attributes. Model Optimizer uses class +`mo.graph.graph.Node` defined in the `mo/graph/graph.py` file which is a wrapper on top of a `networkx` node attributes +dictionary and provides many convenient methods to work with the node. For example, the node `my_node` attribute with a +name `'my_attr'` can be retrieved from the node with the following code `my_node.my_attr` which is equivalent to obtaining +attribute with name `'my_attr'` in the `graph.node['my_node']` dictionary. Refer to the `mo/graph/graph.py` for the +class implementation details. -**Example**: +An operation may have several inputs and outputs. For example, operation [Split](../../../ops/movement/Split_1.md) has +two inputs: data to split and axis to split along, and variable number of outputs depending on a value of attribute +`num_splits`. Each input data to the operation is passed to a specific operation **input port**. An operation produces +an output data from an **output port**. Input and output ports are numbered from 0 independently. Model Optimizer uses +classes `mo.graph.port.Port` and `mo.graph.connection.Connection` which are useful abstraction to perform graph +modifications like nodes connecting/re-connecting and a graph traversing. These classes are widely used in the Model +Optimizer code so it is easy to find a lot of usage examples. -The network has: +There is no dedicated class corresponding to an edge, so low-level graph manipulation is needed to get access to +edge attributes if needed. Meanwhile most manipulations with nodes connections should be done with help of +`mo.graph.connection.Connection` and `mo.graph.port.Port` classes. Thus, low-level graph manipulation is error prone and +is strongly not recommended. -* One input layer (#1) -* One output Layer (#5) -* Three internal layers (#2, 3, 4) +Further details and examples related to a model representation in memory are provided in the sections below in a context +for a better explanation. Also, refer to the [Graph Traversal and Modification Using `Port`s and +`Connection`s](#graph-ports-and-conneсtions) for more information on how to use ports and connections. -The custom and standard layer types are: +## Model Conversion Pipeline +A model conversion pipeline can be represented with the following diagram: -* Layers #2 and #5 are implemented as Model Optimizer extensions. -* Layers #1 and #4 are supported in Model Optimizer out-of-the box. -* Layer #3 is neither in the list of supported layers nor in extensions, but is specified in CustomLayersMapping.xml. +![Model Conversion pipeline](../../../img/MO_conversion_pipeline.png) -> **NOTE**: If any of the layers are not in one of three categories described above, the Model Optimizer fails with an appropriate message and a link to the corresponding question in [Model Optimizer FAQ](../Model_Optimizer_FAQ.md). +Lets review each conversion step in details. -The general process is as shown: +### Model Loading +Model Optimizer gets as input a trained model file. The model loader component of the Model Optimizer reads a model file +using Python bindings provided with the framework and builds an in-memory representation of a computation graph. There +is a separate loader for each supported framework. These loaders are implemented in the +`extensions/load//loader.py` files of the Model Optimizer. -![Example custom layer network](../../img/mo_caffe_priorities.png) +> **NOTE**: Model Optimizer uses a special parser for Caffe\* models built on top of `caffe.proto` file. In case of a +> model loading failure, the Model Optimizer throws an error and requests to prepare the parser that can read the model. +> For more information on how to prepare the custom Caffe\* parser, refer to the [Model Optimizer Frequently Asked Questions #1](../Model_Optimizer_FAQ.md). -1. The example model is fed to the Model Optimizer that **loads the model** with the special parser, built on top of `caffe.proto` file. In case of failure, Model Optimizer asks you to prepare the parser that can read the model. For more information, refer to Model Optimizer, FAQ #1. +The result of a model loading step is a `Graph` object which can be depicted like in the following example: -2. Model Optimizer **extracts the attributes of all layers**. In particular, it goes through the list of layers and attempts to find the appropriate extractor. In order of priority, Model Optimizer checks if the layer is: - - * Registered in `CustomLayersMapping.xml` - * Registered as a Model Optimizer extension - * Registered as a standard Model Optimizer layer - - When the Model Optimizer finds a satisfying condition from the list above, it extracts the attributes according to the following rules: - - * For bullet #1 - either takes all parameters or no parameters, according to the content of `CustomLayersMapping.xml` - * For bullet #2 - takes only the parameters specified in the extension - * For bullet #3 - takes only the parameters specified in the standard extractor - -3. Model Optimizer **calculates the output shape of all layers**. The logic is the same as it is for the priorities. **Important:** the Model Optimizer always takes the first available option. +![Graph After Load](../../../img/MO_graph_after_loader.png) -4. Model Optimizer **optimizes the original model and produces the Intermediate Representation**. +Model Optimizer loader saves an operation instance framework description (usually it is a Protobuf message) into a node +attribute usually with a name `pb` for each operation of an input model. It is important that this is a +**framework-specific** description of an operation. This means that an operation, for example, +[Convolution](../../../ops/convolution/Convolution_1.md) may be represented differently in, for example, Caffe\* and +TensorFlow\* frameworks but perform exactly the same calculations from a mathematical point of view. -## TensorFlow\* Models with Custom Layers +In the example above the "Operation 2" has one input and two outputs. The tensor produced from the output port 0 is +consumed with the "Operation 5" (the input port 0) and "Operation 3" (the input port 1). The tensor produced from the +output port 1 is consumed with the "Operation 4" (the input port 0). -You have two options for TensorFlow\* models with custom layers: +Each edge has two attributes `in` and `out` containing the input port number of the consumer node and the output port +number of the producer node. These attribute describe the fact that nodes are operations consuming some input tensors +and producing some output tensors. But nodes themselves are "black boxes" from the Model Optimizer perspective because +they don't contain required information about the operation they perform. -* **Register those layers as extensions to the Model Optimizer.** In this case, the Model Optimizer generates a valid and optimized Intermediate Representation. -* **If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option.** This feature is helpful for many TensorFlow models. To read more, see [Sub-graph Replacement in the Model Optimizer](Subgraph_Replacement_Model_Optimizer.md). - -## MXNet\* Models with Custom Layers +### Operations Attributes Extracting +The next step is to parse framework-dependent operation representation saved in a node attribute and update the node +attributes with the operation specific attributes. There are three options to do this. -There are two options to convert your MXNet* model that contains custom layers: +1. The extractor extension approach. This is a recommended way to extract attributes for an operation and it is +explained in details in the [Operation Extractor](#extension-extractor) section. -1. Register the custom layers as extensions to the Model Optimizer. For instructions, see [Extending MXNet Model Optimizer with New Primitives](Extending_MXNet_Model_Optimizer_with_New_Primitives.md). When your custom layers are registered as extensions, the Model Optimizer generates a valid and optimized Intermediate Representation. You can create Model Optimizer extensions for both MXNet layers with op `Custom` and layers which are not standard MXNet layers. +2. The legacy approach with a built-in extractor. The file `mo/front//extractor.py` (for example, the one +for Caffe) defines a dictionary with extractors for specific operation types. A key in the dictionary is a type of an +operation to trigger the extracting function for and the value is the function. The function has one parameter – a node +to extract attributes from. This is a legacy and non-extensible approach so it should be avoided. This mechanism will be +removed in future versions of the Model Optimizer. -2. If you have sub-graphs that should not be expressed with the analogous sub-graph in the Intermediate Representation, but another sub-graph should appear in the model, the Model Optimizer provides such an option. In MXNet the function is actively used for ssd models provides an opportunity to for the necessary subgraph sequences and replace them. To read more, see [Sub-graph Replacement in the Model Optimizer](Subgraph_Replacement_Model_Optimizer.md). +3. Caffe specific extractor using the `CustomLayersMapping.xml` described in the +[Legacy Mode for Caffe\* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md). This approach is deprecated and will be +removed in future versions of the Model Optimizer. +The extractors execution order is the following: +* `CustomLayersMapping.xml` (for Caffe models only). +* Model Optimizer extension. +* Built-in Model Optimizer extractor. + +The result of operations attributes extracting step can be depicted like in the following example: + +![Graph After Attributes Extraction](../../../img/MO_graph_after_extractors.png) + +The only difference in the graph from the previous step is that nodes contain dictionary with extracted attributes and +operation-specific attributes needed for the Model Optimizer. But starting from this step the Model Optimizer does not +need the original representation of the operation/model and uses just Model Optimizer representation (there are some +very specific cases when the Model Optimizer still uses the `pb` attribute and they are partially covered in this +document). Detailed list of common node attributes and their values is provided below in the +[Model Optimizer Operation](#extension-operation). + +### Front Phase +Due to legacy reasons an user must specify shapes for all not fully-defined inputs of the model. In contrast, other +machine learning frameworks like TensorFlow\* let user create a model with undefined or partially defined input shapes. +As an example, undefined dimension is marked with an integer value `-1` in a TensorFlow\* model or has some string name +in an ONNX\* model. + +During the front phase the Model Optimizer knows shape of the model inputs and constants only and does not know shapes +(and even ranks) of the intermediate tensors. But information about shapes may not be needed to implement particular +transformation. For example, the transformation `extensions/front/TopKNormalize.py` removes an attribute `k` from a +`TopK` node and adds an input constant with the value `k`. The transformation is needed to convert a `TopK` operation +which comes from frameworks where a number of output elements is defined as an attribute of the operation to the +OpenVINO™ [TopK](../../../ops/sort/TopK_3.md) operation semantic which requires this value to be a separate input. + +It is important to mention that sometimes it seems like a transformation cannot be implemented during the front phase +because the actual values of inputs or shapes are needed. But in fact shapes or values manipulations can be implemented +using operations which are added to the graph. Consider the transformation +`extensions/front/onnx/flattenONNX_to_reshape.py` which replaces an ONNX\* operation +[Flatten](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Flatten) with a sub-graph of operations performing +the following (for the case when `axis` is not equal to 0 and 1): + +1. Calculate a shape of the `Flatten` input tensor using the [ShapeOf](../../../ops/shape/ShapeOf_3.md) operation. +2. Get the first `axis` elements from the output of `Shape` operation and calculate their product using the +[ReduceProd](../../../ops/reduction/ReduceProd_1.md) operation. +3. Concatenate output of the `ReduceProd` and constant with the value `-1` (refer to the +[Reshape](../../../ops/shape/Reshape_1.md) specification for an explanation of this value). +4. Use the concatenated value as the second input to the `Reshape` operation. + +It is highly recommended to write shape-agnostic transformations to avoid model reshape-ability issues. Refer to +[Using Shape Inference](../../../IE_DG/ShapeInference.md) for more information related to the reshaping of a model. + +More information on how to develop front phase transformations and dedicated API description is provided in the +[Front Phase Transformations](#front-phase-transformations). + +### Partial Inference +Model Optimizer performs a partial inference of a model during a model conversion. This procedure includes output shapes +calculation of all operations in a model and constant folding (value calculation for constant sub-graphs). The constant +folding is needed for the shape inference because in some cases evaluation of constant sub-graph is needed to calculate +output shapes. For example, the output shape for the [Reshape](../../../ops/shape/Reshape_1.md) operation may be +defined as a mathematical expression using the [ShapeOf](../../../ops/shape/ShapeOf_3.md) operation output. + +> **NOTE**: Model Optimizer does not fold sub-graphs starting from the [ShapeOf](../../../ops/shape/ShapeOf_3.md) +> operation by default because this leads to a model non-reshape-ability (the command line parameter `--static_shape` +> can override this behavior). Refer to [Using Shape Inference](../../../IE_DG/ShapeInference.md) for more information +> related to reshaping of a model. + +Model Optimizer calculates output shapes for all operations in a model to write them to Intermediate Representation +files. + +> **NOTE**: This is a legacy requirement because starting from IR version 10 Inference Engine needs to know shapes of +> the [Const](../../../ops/infrastructure/Constant_1.md) and the [Parameter](../../../ops/infrastructure/Parameter_1.md) +> operations only. The nGraph component of the Inference Engine calculates output shapes for all operations in a model +> using shapes of [Parameter](../../../ops/infrastructure/Parameter_1.md) and +> [Const](../../../ops/infrastructure/Constant_1.md) operations defined with respective operation attributes. + +Model Optimizer inserts "data" nodes to the computation graph before starting the partial inference phase. The data node +corresponds to the specific tensor produced with the operation. Each data node contains two attributes: `shape` +containing the shape of the tensor and `value` which may contain the actual value of the tensor. The value for a `value` +attribute is equal to `None` if this tensor value cannot be calculated. This happens in two cases: when a tensor value +depends on a values passed to the [Parameter](../../../ops/infrastructure/Parameter_1.md) operation of a model or the +Model Optimizer does not have value propagation implementation for the operation. + +The graph before running the partial inference can be depicted like in the following example: + +![Graph Before Partial Inference](../../../img/MO_graph_before_partial_inference.png) + +The difference in a graph structure with a graph during the front phase is not only in the data nodes, but also in the +edge attributes. Note, that an `out` attribute is specified for edges **from operation** nodes only, while an `in` +attribute is specified for edges **from data** nodes only. This corresponds to the fact that a tensor (data node) is +produced from a specific output port of an operation and is consumed with a specific input port of an operation. Also, +a unique data node is created for each output port of an operation and may be used as an input node for several +operation nodes, like the data node "data2_0" which is consumed with the input port 1 of the operation "Operation 3" and +input port 0 of the operation "Operation 5". + +Now consider how the Model Optimizer performs shape and value propagation. Model Optimizer performs graph nodes +topological sort. An error message is thrown if a graph contains a cycle. Then shape inference functions are called for +each node in the graph according to the topological order. Each node of the graph must have an attribute called `infer` +with a shape inference function, which is a function with one parameter – an instance of the `Node` class. The `infer` +attribute is usually set in the operation extractor or when a node is added in some transformation using the Model +Optimizer operation class inherited from `mo.pos.Op` class. Refer to the [Model Optimizer Operation](#extension-operation) +and [Operation Extractor](#operation-extractor) for more information on how to specify a shape inference function. + +A shape inference function should calculate an operation (node) output shape(s) based on input shape(s) and operation +(node) attribute(s) and update `shape` and optionally `value` attributes of the corresponding data node(s). A simplified +example of the shape infer function for the [Reshape](../../../ops/shape/Reshape_1.md) operation (the full version is +available in the file `mo/ops/reshape.py`): + +```py + @staticmethod + def infer(node: Node): + name = node.soft_get('name', node.id) + + input_shape = node.in_port(0).data.get_shape() # get the input tensor shape + new_shape = node.in_port(1).data.get_value() # get the value defining the output tensor shape. This tensor may + # have special values like 0 and -1 + + output_shape = ... # calculate output shape without special values like 0 and -1 + + if node.in_port(0).data.get_value() is not None: # if the input value is defined then calculate output value; + # shape will be updated automatically with the value shape + node.out_port(0).data.set_value(node.in_port(0).data.get_value().reshape(output_shape)) + else: # in the opposite case calculate the output shape only + node.out_port(0).data.set_shape(output_shape) +``` + +Methods `in_port()` and `output_port()` of the `Node` class are used to get and set data node attributes. Refer to the +[Graph Traversal and Modification Using `Port`s and `Connection`s](#graph-ports-and-conneсtions) section on how to use +them. + +> **NOTE**: A shape inference function should perform output shape calculation in the original model layout. For +> example, OpenVINO™ supports Convolution operations in NCHW layout only but TensorFlow\* supports NHWC layout as +> well. Model Optimizer shape inference function calculates output shapes for NHWC Convolutions in NHWC layout and only +> during the layout change phase the shape is converted to NCHW. + +> **NOTE**: There is a legacy approach to read data node attribute like `input_shape = op_node.in_node(0).shape` and +> modify data nodes attributes like `op_node.out_node(0).shape = some_value`. This approach is still used in the Model +> Optimizer code but is not recommended. Instead use approach described in the [Ports](#intro-ports). + +### Middle Phase +The middle phase starts after the partial inference. At this phase a graph contains data nodes and output shapes of all +operations in the graph have been calculated. Any transformation implemented at this stage must update `shape` +attribute for all newly added operations. It is highly recommended to use API desribed in the +[Graph Traversal and Modification Using `Port`s and `Connection`s](#graph-ports-and-conneсtions) because modification of +a graph using this API causes automatic re-inference of affected nodes as well as necessary data nodes creation. + +More information on how to develop middle transformations and dedicated API description is provided in the +[Middle Phase Transformations](#middle-phase-transformations). + +### NHWC to NCHW Layout Change +There are several middle transformations responsible for changing model layout from NHWC to NCHW. These transformations +are triggered by default for TensorFlow\* models only because it is the only framework with Convolution operations in +NHWC layout. + +> **NOTE**: If a TensorFlow\* model is in NCHW layout then an user should specify `--disable_nhwc_to_nchw` command line +> parameter to disable these transformations. + +The layout change is a complex problem and detailed explanation of it is out of scope of this document. A very brief +explanation of this process is provided below: + +1. Model Optimizer changes output shapes of most of operations producing 4D and 5D (four dimensional and five +dimensional) tensors as if they were in NHWC layout to NCHW layout: `nchw_shape = np.array(nhwc_shape)[0, 3, 1, 2]` for +4D and `nchw_shape = np.array(nhwc_shape)[0, 4, 1, 2, 3]` for 5D. This permutation does not happen for some operations +with specific conditions identified during a model conversion. +2. Model Optimizer inserts [Gather](../../../ops/movement/Gather_1.md) operations to the sub-graph relates to shapes +calculation to perform shape calculation in a correct layout. +3. Model Optimizer inserts [Transpose](../../../ops/movement/Transpose_1.md) operations for some operations with +specific conditions identified during a model conversion to produce correct inference results. + +The list of main transformations responsible for a layout change are: `extensions/middle/ApplyPermutations.py`, +`extensions/middle/InsertLayoutPropagationTransposes.py`, `extensions/middle/MarkSubgraphsWithCorrectLayout.py`, +`extensions/middle/ApplyNHWCtoNCHWpermutation.py` and `extensions/middle/LayoutChangeForConstantShapePaths.py`. +Refer to the source code of these transformations for more details on how the layout change works. + +### Back Phase +The back phase starts after the layout change to NCHW. This phase contains mostly the following transformations: + +1. Transformations which should be working with a graph in the NCHW layout and thus cannot be implemented in the middle +phase. +2. Transformations which replace nodes corresponding to internal Model Optimizer operations with nodes corresponding to +[opset](@ref openvino_docs_ops_opset) operations. +3. Transformations which normalize operations inputs according to the specification. +4. Final optimization transformations. + +A graph structure during the back phase is the same as during the middle phase. There is no difference in writing middle +and back transformations. + +More information on how to develop back transformations and dedicated API description is provided in the +[Back Phase Transformations](#back-phase-transformations). + +### Intermediate Representation Emitting +The last phase of a model conversion is the Intermediate Representation emitting. Model Optimizer performs the following +steps: + +1. Iterates over all operation nodes in the graph and checks that all nodes have attribute `type` set. This attribute +defines the operation type and used in the Inference Engine to instantiate proper operation from the +[opset](@ref openvino_docs_ops_opset) specified in the `version` attribute of the node. If some node does not have +attribute `type` or its values is equal to `None` then the Model Optimizer exits with an error. +2. Performs type inference of graph operations similar to the shape inference. Inferred data types are saved to a port +attributes in the IR. +3. Performs topological sort of the graph and changes `id` attribute of all operation nodes to be sequential integer +values starting from 0. +4. Saves all Constants values to the `.bin` file. Constants with the same value are shared among different operations. +5. Generates `.xml` file defining a graph structure. The information about operation inputs and outputs are prepared +uniformly for all operations regardless of their type. A list of attributes to be saved to an `.xml` file is defined +with the `backend_attrs()` or `supported_attrs()` of the `Op` class used for a graph node instantiation. For more +information on how the operation attributes are saved to XML refer to the function `prepare_emit_ir()` in +the `mo/pipeline/common.py` file and [Model Optimizer Operation](#extension-operation). + +## Graph Traversal and Modification Using `Port`s and `Connection`s +There are three APIs for a graph traversal and transformation used in the Model Optimizer: +1. The API provided with the `networkx` Python library for the `networkx.MultiDiGraph` class which is the base class for +the `mo.graph.graph.Graph` object. Refer to the [Model Representation in Memory](#model-representation-in-memory) for +more details. For example, the following methods belong to this API level: `graph.add_edges_from([list])`, +`graph.add_node(x, attrs)`, `graph.out_edges(node_id)` etc where `graph` is a an instance of the `networkx.MultiDiGraph` +class. **This is the lowest-level API and its usage should be avoided in the Model Optimizer transformations**. +2. The API built around the `mo.graph.graph.Node` class. The `Node` class is the primary class to work with graph nodes +and their attributes. **There are some `Node` class methods not recommended to use and some functions defined in the +`mo.graph.graph` have been deprecated**. Examples of such methods and functions are: +`node.in_node(y)`, `node.out_node(x)`, `node.get_outputs()`, `node.insert_node_after(n1, y)`, `create_edge(n1, n2)` etc. +Refer to the `mo/graph/graph.py` for more details. +3. The high-level API called Model Optimizer Graph API which uses `mo.graph.graph.Graph`, `mo.graph.port.Port` and +`mo.graph.connection.Connection` classes. For example, the following methods belong to this API level: +`node.in_port(x)`, `node.out_port(y)`, `port.get_connection()`, `connection.get_source()`, +`connection.set_destination(dest_port)` etc. **This is the recommended API to be used in the Model Optimizer +transformations and operations implementation**. + +The main benefit of using Model Optimizer Graph API is that it hides some internal implementation details (the fact that +the graph contains data nodes), provides API to perform safe and predictable graph manipulations and adds operation +semantic to the graph. This is achieved with introduction of concepts of ports and connections. This chapter is +dedicated to the Model Optimizer Graph API and does not cover other two non-recommended APIs. + +### Ports +An operation semantic describes how many inputs and outputs the operation have. For example, operations +[Parameter](../../../ops/infrastructure/Parameter_1.md) and [Const](../../../ops/infrastructure/Constant_1.md) have no +inputs and have one output, operation [ReLU](../../../ops/activation/ReLU_1.md) has one input and one output, operation +[Split](../../../ops/movement/Split_1.md) has 2 inputs and variable number of outputs depending on the value of the +attribute `num_splits`. + +Each operation node in the graph (an instance of the `Node` class) has 0 or more input and output ports (instances of +the `mo.graph.port.Port` class). `Port` object has several attributes: +* `node` - the instance of the `Node` object the port belongs to. +* `idx` - the port number. Input and output ports are numbered independently starting from `0`. Thus operation +[ReLU](../../../ops/activation/ReLU_1.md) has one input port (with index `0`) and one output port (with index `0`). +* `type` - the type of the port. Could be equal to either `"in"` or `"out"`. +* `data` - the object which should be used to get attributes of the corresponding data node. This object has methods +`get_shape()` / `set_shape()` and `get_value()` / `set_value()` to get/set shape/value of the corresponding data node. +For example, `in_port.data.get_shape()` returns an input shape of a tensor connected to input port `in_port` +(`in_port.type == 'in'`), `out_port.data.get_value()` returns a value of a tensor produced from output port `out_port` +(`out_port.type == 'out'`). + +> **NOTE**: Functions `get_shape()` and `get_value()` return `None` until the partial inference phase. Refer to the +> [Model Conversion Pipeline](#model-conversion-pipeline) for more information about model conversion phases and +> [Partial Inference](#partial-inference) about partial inference phase. + +There are several methods of the `Node` class to get the instance of a corresponding port: +* `in_port(x)` and `out_port(x)` to get the input/output port with number `x`. +* `in_ports()` and `out_ports()` to get a dictionary where key is a port number and the value is the corresponding +input/output port. + +Attributes `in_ports_count` and `out_ports_count` of the `Op` class instance define default number of input and output +ports to be created for the `Node` . However, additional input/output ports can be added using methods +`add_input_port()` and `add_output_port()`. Port also can be removed using `delete_input_port()` and +`delete_output_port()` methods. + +The `Port` class is just an abstraction which works with edges incoming/outgoing to/from a specific `Node` instance. For +example, output port with `idx = 1` corresponds to the outgoing edge of a node with an attribute `out = 1`, the input +port with `idx = 2` corresponds to the incoming edge of a node with an attribute `in = 2`. + +Consider an example of a graph part with 4 operation nodes "Op1", "Op2", "Op3" and "Op4" and a number of data nodes +depicted with light green boxes. + +![Ports example 1](../../../img/MO_ports_example_1.png) + +Operation nodes have input ports (yellow squares) and output ports (light purple squares). Input port may not be +connected. For example, the input port 2 of node "Op1" does not have incoming edge, while output port always has an +associated data node (after the partial inference when the data nodes are added to the graph) which may have no +consumers. + +Ports can be used to traverse a graph. The method `get_source()` of an input port returns an output port producing the +tensor the input port consumes. It is important that the method works the same during front, middle and back phases of a +model conversion even though the graph structure changes (there is no data nodes in the graph during the front phase). + +Let's assume that there are 4 instances of `Node` object `op1, op2, op3` and `op4` corresponding to nodes "Op1", "Op2", +"Op3" and "Op4" correspondingly. The result of `op2.in_port(0).get_source()` and `op4.in_port(1).get_source()` is the +same object `op1.out_port(1)` of type `Port`. + +The method `get_destination()` of an output port returns the input port of the node consuming this tensor. If there are +multiple consumers of this tensor then the error is raised. The method `get_destinations()` of an output port returns a +list of input ports consuming the tensor. + +The method `disconnect()` removes a node incoming edge corresponding to the specific input port. The method removes +several edges if it is applied during the front phase for a node output port connected with multiple nodes. + +The method `port.connect(another_port)` connects output port `port` and input port `another_port`. The method handles +situations when the graph contains data nodes (middle and back phases) and not just creates an edge between two nodes +but also automatically creates data node or re-uses existing data node. If the method is used during the front phase and +data nodes do not exist the method creates edge and properly sets `in` and `out` edge attributes. + +For example, applying the following two methods to the graph above will result in the graph depicted below: + +```py +op4.in_port(1).disconnect() +op3.out_port(0).connect(op4.in_port(1)) +``` + +![Ports example 2](../../../img/MO_ports_example_2.png) + +> **NOTE**: Refer to the `Node` class implementation in the `mo/graph/graph.py` and `Port` class implementation in the +`mo/graph/port.py` for a full list of available methods. + +### Connections +Connection is an concept introduced to easily and reliably perform graph modifications. Connection corresponds to a +link between a source output port with one or more destination input ports or a link between a destination input port +and source output port producing data. So each port is connected with one or more ports with help of a connection. +Model Optimizer uses the `mo.graph.connection.Connection` class to represent a connection. + +There is only one method `get_connection()` of the `Port` class to get the instance of the corresponding `Connection` +object. If the port is not connected then the returned value is `None`. + +For example, the method `op3.out_port(0).get_connection()` returns a `Connection` object encapsulating edges from node +"Op3" to data node "data_3_0" and two edges from data node "data_3_0" to two ports of the node "Op4". + +The `Connection` class provides methods to get source and destination(s) ports the connection corresponds to: +* `connection.get_source()` - returns an output `Port` object producing the tensor. +* `connection.get_destinations()` - returns a list of input `Port`s consuming the data. +* `connection.get_destination()` - returns a single input `Port` consuming the data. If there are multiple consumers +then the exception is raised. + +The `Connection` class provides methods to modify a graph by changing a source or destination(s) of a connection. For +example, the function call `op3.out_port(0).get_connection().set_source(op1.out_port(0))` changes source port of edges +consuming data from port `op3.out_port(0)` to `op1.out_port(0)`. The transformed graph from the sample above is depicted +below: + +![Connection example 1](../../../img/MO_connection_example_1.png) + +Another example is the method `connection.set_destination(dest_port)`. It disconnects `dest_port` and all input ports +the connection is currently connected to and connects the connection source port to the `dest_port`. + +Note that connection work seamlessly during front, middle and back phases and hides the fact that the graph structure is +different. + +> **NOTE**: Refer to the `Connection` class implementation in the `mo/graph/connection.py` for a full list of available +methods. + +## Model Optimizer Extensions +Model Optimizer extensions allow to inject some logic to the model conversion pipeline without changing the Model +Optimizer core code. There are three types of the Model Optimizer extensions: + +1. Model Optimizer operation. +2. A framework operation extractor. +3. A model transformation which can be executed during front, middle or back phase of the model conversion. + +An extension is just a plain text file with a Python code. The file should contain a class (or classes) inherited from +one of extension base classes. Extension files should be saved to a directory with the following structure: + +```sh +.// + ops/ - custom operations + front/ - framework independent front transformations + / - front transformations for models only and extractors for operations + / - front transformations for models only and extractors for operations + ... + middle/ - middle transformations + back/ - back transformations +``` + +Model Optimizer uses the same layout internally to keep built-in extensions. The only exception is that the directory +`mo/ops/` is also used as a source of the Model Optimizer operations due to historical reasons. + +> **NOTE**: The name of a root directory with extensions should not be equal to "extensions" because it will result in a +> name collision with the built-in Model Optimizer extensions. + +> **NOTE**: Model Optimizer itself is built using these extensions so there are huge number of examples on how to use +> them in the Model Optimizer code. + +### Model Optimizer Operation +Model Optimizer defines a class `mo.ops.Op` (`Op` will be used later in the document to be short) which is a base class +for an operation used in the Model Optimizer. The instance of the `Op` class serves several purposes: + +1. Stores the operation attributes. +2. Stores the operation shape/value and type inference functions. +3. Defines operation attributes to be saved to the corresponding IR section. +4. Contains convenient methods to create a graph node from an `Op` object instance and connect it with the existing +graph. +5. Used in the extractors to store parsed attributes and operation specific attributes in the dedicated graph node. + +It is important to mention that there is no connection between the instance of the `Op` class and the `Node` object +created from it. The `Op` class is just an attributes container describing the operation. Model Optimizer uses the `Op` +class during a model conversion to create node of the graph with attributes copied from the `Op` class instance. Graph +manipulations are performed with graph `Node`s and their attributes and does not involve `Op`s. + +There are a number of common attributes used in the operations. Here is the list of these attributes with description. + +* `id` — unique identifier of a node in a graph. Generated automatically equal to the number of nodes in the graph plus +1 if not specified. **Mandatory**. +* `name` — name of the operation. Generated automatically equal to the `id` if not specified. **Mandatory**. +* `type` — type of the operation according to the [opset specification](@ref openvino_docs_ops_opset). For the internal +Model Optimizer operations this attribute should be set to `None`. The model conversion fails if an operation with +`type` equal to `None` comes to the IR emitting phase. **Mandatory**. +* `version` — the operation set (opset) name the operation belongs to. If not specified then the Model Optimizer sets it +equal to `experimental`. Refer to [nGraph Basic Concepts](@ref openvino_docs_nGraph_DG_basic_concepts) for more +information about operation sets. **Mandatory**. +* `op` — Model Optimizer type of the operation. In many cases the value of `type` is equal to the value of `op`. But +when the Model Optimizer cannot instantiate opset operation during model loading it creates an instance of an internal +operation and the attribute `op` is used as a type of this internal operation. Later in the pipeline the node created +from an internal operation will be replaced during front, middle or back phase with node(s) created from the opset. +* `infer` — the attribute defines a function calculating output tensor(s) shape and optionally value(s). The attribute +may be set to `None` for internal Model Optimizer operations used during the front phase only. Refer to the +[Partial Inference](#partial-inference) for more information about the shape inference function. +* `type_infer` — the attribute defines a function calculating output tensor(s) data type. If the attribute is not +defined then the default function is used. The function checks if the node attribute `data_type` is set and then +propagates this type to the output tensor from the port 0, otherwise it propagates the data type of the tensor coming +into the input port 0 to the output tensor from the port 0. +* `in_ports_count` — default number of input ports to be created for the operation. Additional ports can be created or +redundant ports can be removed using dedicated `Node` class API methods. +* `out_ports_count` — default number of output ports to be created for the operation. Additional ports can be created or +redundant ports can be removed using dedicated `Node` class API methods. + +Here is an example of the Model Optimizer class for the operation [SoftMax](../../../ops/activation/SoftMax_1.md) from +the file `mo/ops/softmax.py` with the in code comments. + +```py +class Softmax(Op): + # the class attribute defines a name of the operation so the operation class can be obtained using the + # "Op.get_op_class_by_name()" static method + op = 'SoftMax' + + # the operation works as an extractor by default. This is a legacy behaviour not recommended for using currently, + # thus "enabled" class attribute is set to False. The recommended approach is to use dedicated extractor extension + enabled = False + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { # the constructor of the base class Op is called with additional default attributes + 'type': __class__.op, # the operation is from the opset so the type is set to 'SoftMax' + 'op': __class__.op, # internal Model Optimizer operation has the same type + 'version': 'opset1', # the operation corresponds to opset1 + 'infer': Softmax.infer, # shape inference function is defined below + 'axis': 1, # default value for the "axis" attribute of the operation SoftMax + 'in_ports_count': 1, # the operation has one input + 'out_ports_count': 1, # the operation produces one output + }, attrs) + + # the method returns operation specific attributes list. This method is important for the case when implementing + # extractor inherited from CaffePythonFrontExtractorOp class to extract attribute for Caffe Python operation. + # But currently it is used interchangeably with the "backend_attrs()" method. If the "backend_attrs()" is not used + # then the "supported_attrs()" is used instead. In this particular case the operation has just one attribute "axis" + def supported_attrs(self): + return ['axis'] + + @staticmethod + def infer(node: Node): + "some code calculating output shape and values" +``` + +There is a dedicated method called `backend_attrs()` defining a list of attributes to be saved to the IR. Consider an +example from the `mo/ops/pooling.py` file: +```py + def backend_attrs(self): + return [ + ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), + ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))), + + ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))), + ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))), + + ('pool-method', 'pool_method'), + ('exclude-pad', 'exclude_pad'), + + 'rounding_type', + 'auto_pad', + ] +``` + +The `backend_attrs()` function returns a list of records. A record can be of one of the following formats: +1. A string defining the attribute to be saved to the IR. If the value of the attribute is `None` then the attribute is +not saved. Example of this case are `rounding_type` and `auto_pad`. +2. A tuple where the first element is a string defining the name of the attribute as it will appear in the IR and the +second element is a function to produce the value for this attribute. The function gets an instance of the `Node` as the +only parameter and returns a string with the value to be saved to the IR. Example of this case are `strides`, `kernel`, +`pads_begin` and `pads_end`. +3. A tuple where the first element is a string defining the name of the attribute as it will appear in the IR and the +second element is the name of tha `Node` attribute to get the value from. Example of this case are `pool-method` and +`exclude-pad`. + +### Operation Extractor +Model Optimizer runs specific extractor for each operation in the model during the model loading. Refer to the +[operations-attributes-extracting](#operations-attributes-extracting) for more information about this process. + +There are several types of Model Optimizer extractor extensions: +1. The generic one which is described in this section. +2. The special extractor for Caffe\* models with Python layers. This kind of extractor is described in the +[Extending the Model Optimizer with Caffe* Python Layers](Extending_Model_Optimizer_with_Caffe_Python_Layers.md). +3. The special extractor for MXNet\* models with custom operations. This kind of extractor is described in the +[Extending the Model Optimizer for Custom MXNet* Operations](Extending_MXNet_Model_Optimizer_with_New_Primitives.md). +4. The special extractor and fallback to Caffe\* for shape inference is described in the +[Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md). + +This chapter is focused on the option #1 which provides a generic mechanism for the operation extractor applicable for +all frameworks. Model Optimizer provides class `mo.front.extractor.FrontExtractorOp` as a base class to implement the +extractor. It has a class method `extract` which gets the only parameter `Node` which corresponds to the graph node to +extract data from. The operation description in the original framework format is stored in the attribute `pb` of the +node. The extractor goal is to parse this attribute and save necessary attributes to the corresponding node of the +graph. Consider the extractor for the TensorFlow\* operation `Const` (refer to the file +`extensions/front/tf/const_ext.py`): + +```py +from mo.front.extractor import FrontExtractorOp +from mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content +from mo.ops.const import Const + + +class ConstExtractor(FrontExtractorOp): + # the "op" class attribute defines a type of the operation in the framework (in this case it is a TensorFlow) for + # which the extractor should be triggered + op = 'Const' + enabled = True # the flag that indicates that this extractor is enabled + + @classmethod + def extract(cls, node): # the entry point of the extractor + # node.pb attribute stores the TensorFlow representation of the operation which is a Protobuf message of the + # specific format. In particular the message contains the attribute called "value" containing the description of + # the constant. The string "pb.attr["value"].tensor" is just a Python binding for Protobuf message parsing + pb_tensor = node.pb.attr["value"].tensor + # get the shape of the tensor from the protobuf message using the helper function "tf_tensor_shape" + shape = tf_tensor_shape(pb_tensor.tensor_shape) + # create a dictionary with necessary attributes + attrs = { + 'shape': shape, + # get the tensor value using "tf_tensor_content" helper function + 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor), + # get the tensor data type using "tf_dtype_extractor" helper function + 'data_type': tf_dtype_extractor(pb_tensor.dtype), + } + # update the node attributes using default attributes from the "Const" operation and attributes saved to the + # "attrs" dictionary + Const.update_node_stat(node, attrs) + return cls.enabled +``` + +Consider another example with an extractor of ONNX\* operation `Constant` (refer to the file +`extensions/front/onnx/const_ext.py`): + +```py +from onnx import numpy_helper +from onnx.numpy_helper import to_array + +from mo.front.extractor import FrontExtractorOp +from mo.front.onnx.extractors.utils import onnx_attr +from mo.ops.const import Const + + +class ConstantExtractor(FrontExtractorOp): + op = 'Constant' + enabled = True + + @classmethod + def extract(cls, node): + # use helper method "onnx_attr" which parses the Protobuf representation of the operation saved in the "node" + # gets the value of the attribute with name "value" as "TensorProto" type (specified with a keyword "t") + pb_value = onnx_attr(node, 'value', 't') + # use ONNX helper method "numpy_helper.to_array()" to convert "TensorProto" object to a numpy array + value = numpy_helper.to_array(pb_value) + + attrs = { + 'data_type': value.dtype, + 'value': value, + } + # update the node attributes using default attributes from the "Const" operation and attributes saved to the + # "attrs" dictionary + Const.update_node_stat(node, attrs) + return cls.enabled +``` + +The extractors for operations from different frameworks work similarly. The only difference is in the helper methods +used to parse operation attributes encoded with a framework-specific representation. + +A common practice is to use `update_node_stat()` method of the dedicated `Op` class to update the node attributes. This +method does the following: + +1. Sets values for common attributes like `op`, `type`, `infer`, `in_ports_count`, `out_ports_count`, `version` etc to +values specific to the dedicated operation (`Const` operation in this case). +2. Uses methods `supported_attrs()` and `backend_attrs()` defined in the `Op` class to update specific node attribute +`IE`. The IR emitter uses the value stored in the `IE` attribute to pre-process attribute values and save them to IR. +3. Optionally sets additional attributes provided to the `update_node_stat()` function as a second parameter. Usually +these attributes are parsed from the particular instance of the operation. + +> **NOTE**: Model Optimizer uses numpy arrays to store values and numpy arrays of type `np.int64` to store shapes in the +> graph. + +### Graph Transformation Extensions +Model Optimizer provides various base classes to implement [Front Phase Transformations](#front-phase-transformations), +[Middle Phase Transformations](#middle-phase-transformations) and [Back Phase Transformations](#back-phase-transformations). +All classes have the following common class attributes and methods: +1. Attribute `enabled` specifies whether the transformation is enabled or not. The value can be changed during runtime +to enable or disable execution of the transformation during a model conversion. Default value is `True`. +2. Attribute `id` specifies a unique transformation string identifier. This transformation identified can be used to +enable (disable) the transformation by setting environment variable `MO_ENABLED_TRANSFORMS` (`MO_DISABLED_TRANSFORMS`) +with a comma separated list of `id`s. The environment variables override the value of the `enabled` attribute of the +transformation. Optional attribute. +3. Attribute `run_not_recursively` specifies whether the transformation should be executed in the sub-graphs, for +example, body of the [TensorIterator](../../../ops/infrastructure/TensorIterator_1.md) and +[Loop](../../../ops/infrastructure/Loop_5.md). Default value is `True`. +4. Attribute `force_clean_up` specifies whether the graph clean up should be executed after the transformation. The +graph cleanup removes nodes of the graph not reachable from the model inputs. Default value is `False`. +5. Attribute `force_shape_inference` specifies whether the nodes marked with attribute `need_shape_inference` equal to +`True` should be re-inferred after the transformation. Model Optimizer sets this attribute automatically for nodes which +input(s) were changed during the transformation or developer can set this attribute manually in the transformation for +the specific nodes. Default value is `False`. +5. Attribute `graph_condition` specifies a list of functions with one parameter -- `Graph` object. The transformation +is executed if and only if all functions return `True`. If the attribute is not set then no check is performed. +7. Method `run_before()` returns a list of transformation classes which this transformation should be executed before. +8. Method `run_after()` returns a list of transformation classes which this transformation should be executed after. + +> **NOTE**: Some of the transformation types have specific class attributes and methods which are explained in the +> corresponding sections of this document. + +Model Optimizer builds a graph of dependencies between registered transformations and executes them in the topological +order. In order to execute the transformation during a proper model conversion phase the Model Optimizer defines several +anchor transformations which does nothing. All transformations are ordered with respect to these anchor transformations. +The diagram below shows anchor transformations, some of built-in transformations and dependencies between them: + +![Transformations Graph](../../../img/MO_transformations_graph.png) + +User defined transformations are executed after corresponding `Start` and before corresponding `Finish` anchor +transformations by default (if `run_before()` and `run_after()` methods have not been overridden). + +> **NOTE**: The `PreMiddleStart` and `PostMiddleStart` anchors were introduced due to historical reasons to refactor +> the Model Optimizer pipeline which initially had a hardcoded order of transformations. + +#### Front Phase Transformations +There are several types of a front phase transformation: + +1. [Pattern-Defined Front Phase Transformations](#pattern-defined-front-phase-transformations) triggered for each +sub-graph of the original graph isomorphic to the specified pattern. +2. [Specific Operation Front Phase Transformations](#specific-operation-front-phase-transformations) triggered for the +node with a specific `op` attribute value. +3. [Generic Front Phase Transformations](#generic-front-phase-transformations). +4. Manually enabled transformation defined with a JSON configuration file (for TensorFlow\*, ONNX\* and MXNet\* models +only) specified using the `--transformations_config` command line parameter: + 1. [Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformation). + 2. [Front Phase Transformations Using Start and End Points](#start-end-points-front-phase-transformations). + 3. [Generic Front Phase Transformations Enabled with Transformations Configuration File](#generic-transformations-config-front-phase-transformations). + +##### Pattern-Defined Front Phase Transformations +This type of transformation is implemented using `mo.front.common.replacement.FrontReplacementSubgraph` and +`mo.front.common.replacement.FrontReplacementPattern` as base classes and works the following way. +1. Developer defines a sub-graph to be matched using a list of nodes with attributes and edges connecting them (edges +may also have attributes). +2. Model Optimizer searches for all sub-graphs of the original graph isomorphic to the specified sub-graph (pattern). +3. Model Optimizer executes the developer-defined function performing graph transformation for each instance of a +matched sub-graph. Developer can override different functions in the base transformation class so the Model Optimizer +works differently: + 1. Override the method `replace_sub_graph(self, graph, match)`. In this case Model Optimizer only executes the + overridden function, pass the `graph` object and a dictionary describing the matched sub-graph. A developer is + responsible for writing the transformation and connecting the newly created nodes to the rest of the graph. + 2. Override the method `generate_sub_graph(self, graph, match)`. This case is not recommended to use because it is + the most complicated approach and it can be effectively replaced with one of two previous approaches and so it is not + explained in this section. The explanation of this function is provided in the + [Node Name Defined Sub-Graph Transformations](#node-name-defined-sub-graph-transformations) section. + +The sub-graph pattern is defined in the `pattern()` function. This function should return a dictionary with two keys: +`nodes` and `edges`: +* The value for the `nodes` key is a list of tuples with two elements. + * The first element is an alias name for a node which will be used to define edges between nodes and in the + transformation function. + * The second element is a dictionary with attributes. The key is a name of an attribute which should exist in the + node. The value for the attribute can be some specific value to match or a function which gets a single parameter - + the attribute value from the node. The function should return the result of attribute comparison with a dedicated + value. +* The value for the `edges` key is a list of tuples with two or three elements. + * The first element is the alias name of the node producing a tensor. + * The second element is the alias name of the node consuming the tensor. + * The third element (optional) is the dictionary with expected edge attributes. Usually this dictionary contains + attributes like `in` and `out` defining input and output ports. + +Consider the example of a front transformation implemented in the `extensions/front/Mish_fusion.py` file performing +fusing of the sub-graph defining the [Mish](../../../ops/activation/Mish_4.md) activation function into a single +operation: + +```py +from extensions.front.Softplus_fusion import SoftplusFusion +from extensions.ops.activation_ops import Mish +from mo.front.common.replacement import FrontReplacementSubgraph +from mo.front.subgraph_matcher import SubgraphMatch +from mo.graph.graph import Graph, rename_nodes + + +class MishFusion(FrontReplacementSubgraph): + """ + The transformation looks for the pattern with Softplus defining the Mish function: Mish(x) = x * tanh(SoftPlus(x)). + """ + enabled = True # transformation is enabled + + def run_after(self): # run this transformation after "SoftplusFusion" transformation + return [SoftplusFusion] + + def pattern(self): # define pattern according to formulae x * tanh(SoftPlus(x)). + return dict( + nodes=[ + ('mul', dict(op='Mul')), + ('tanh', dict(op='Tanh')), + ('softplus', dict(op='SoftPlus')), + ], + edges=[ + ('softplus', 'tanh'), + ('tanh', 'mul'), + ]) + + def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): # entry point for the transformation + mul = match['mul'] # get the Node corresponding to matched "mul" node + mul_name = mul.soft_get('name', mul.id) + softplus = match['softplus'] # get the Node corresponding to the matched "softplus" node + + # determine the input port of Mul which gets the 'input' node output + input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') == 'Tanh') + + # check that the same tensor provided as input to Mul and SoftPlus + if mul.in_port(input_port_idx).get_source() != softplus.in_port(0).get_source(): + return + + mish = Mish(graph, {}).create_node() # create Mish operation + mish.in_port(0).connect(mul.in_port(input_port_idx).get_source()) # connect input to the Mish + mul.out_port(0).get_connection().set_source(mish.out_port(0)) # reconnect outgoing edge from "mul" to Mish + + # rename the created Mish operation to have the name of the "mul" node which produced the value equal to the + # Mish output + rename_nodes([(mul, mul_name + '/TBR'), (mish, mul_name)]) +``` + +##### Specific Operation Front Phase Transformations +This type of transformation is implemented using `mo.front.common.replacement.FrontReplacementOp` as base class and +works the following way. +1. Developer defines an operation type to trigger the transformation. +2. Model Optimizer search for all nodes in the graph with the attribute `op` equal to the specified value. +3. Model Optimizer executes developer-defined function performing graph transformation for each instance of a matched +node. Developer can override different functions in the base transformation class and the Model Optimizer works +differently: + 1. Override method `replace_sub_graph(self, graph, match)`. In this case Model Optimizer only executes the overridden + function, pass the `graph` object and a dictionary with a single key `op` with the matched node as value. A developer + is responsible for writing the transformation and connecting the newly created nodes to the rest of the graph. + 2. Override method `replace_op(self, graph, node)`. In this case Model Optimizer executes the overridden function, + pass the `graph` object and the matched node as `node` parameter. If the function returns an `id` of some node then + the `Node` with this `id` is connected to the consumers of the matched node. After applying the transformation the + matched node is removed from the graph. + +The `FrontReplacementOp` class provides a simpler mechanism to match a singe operation with specific value of `op` +(write an attribute `op` in the class instead of defining a `pattern()` function) attribute and perform the +transformation. + +Consider an example transformation from the file is `extensions/front/Pack.py` which replaces operation `Pack` from +the TensorFlow\*: +```py +from mo.front.common.partial_infer.utils import int64_array +from mo.front.common.replacement import FrontReplacementOp +from mo.front.tf.graph_utils import create_op_with_const_inputs +from mo.graph.graph import Node, Graph, rename_nodes +from mo.ops.concat import Concat +from mo.ops.unsqueeze import Unsqueeze + + +class Pack(FrontReplacementOp): + op = "Pack" # trigger transformation for all nodes in the graph with attribute op = "Pack" + enabled = True # transformation is enabled + + def replace_op(self, graph: Graph, node: Node): # entry point for the transformation + # create a Concat operation with a number of inputs equal to a number of inputs to Pack + out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node() + pack_name = node.soft_get('name', node.id) + + for ind in node.in_ports(): + # add dimension of size 1 to all inputs of the Pack operation and add them as Concat inputs + unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])}, + {'name': node.soft_get('name', node.id) + '/Unsqueeze'}) + node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0)) + unsqueeze_node.out_port(0).connect(out_node.in_port(ind)) + + # rename the created Concat operation to have the name of the "pack" node which produced the value equal to the + # Concat output + rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)]) + return [out_node.id] # reconnect the Pack operation consumers to get input from Concat instead +``` + +##### Generic Front Phase Transformations +Model Optimizer provides mechanism to implement generic front phase transformation. This type of transformation is +implemented using `mo.front.common.replacement.FrontReplacementSubgraph` or +`mo.front.common.replacement.FrontReplacementPattern` as base classes. The only condition to execute the transformation +is to check that it is enabled. Then the Model Optimizer executes the method `find_and_replace_pattern(self, graph)` and +provides a `Graph` object as an input. + +Consider the example of a generic front transformation from a file `extensions/front/SqueezeNormalize.py` performing +normalization of the [Squeeze](../../../ops/shape/Squeeze_1.md) operation. Older version of the operation had a list of +axes to squeeze as an attribute, but now it is a separate input. For backward compatibility the Model Optimizer +operation supports both semantics but before IR generation the operation should normalized according to the +specification. + +```py +import logging as log + +from mo.front.common.partial_infer.utils import int64_array +from mo.front.common.replacement import FrontReplacementPattern +from mo.graph.graph import Graph +from mo.ops.const import Const +from mo.utils.error import Error + + +class SqueezeNormalize(FrontReplacementPattern): + """ + Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the + dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed. + """ + enabled = True # the transformation is enabled + + def find_and_replace_pattern(self, graph: Graph): # the function is called unconditionally + for squeeze_node in graph.get_op_nodes(op='Squeeze'): # iterate over all nodes with op='Squeeze' + # if the operation has only 1 input node and non None 'squeeze_dims' attribute then convert the attribute to + # the operation input + if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'): + dims_node = Const(graph, {'name': squeeze_node.id + '/Dims', + 'value': int64_array(squeeze_node.squeeze_dims)}).create_node() + squeeze_node.in_port(1).connect(dims_node.out_port(0)) + del squeeze_node['squeeze_dims'] + # if two inputs already exists that meanss that the operation is already normalized + elif len(squeeze_node.in_nodes()) == 2: + log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name)) + # in all other cases raise an error + else: + raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" ' + 'attribute'.format(squeeze_node.soft_get('name'))) +``` + +Refer to the `mo/front/common/replacement.py` for the implementation details on how these front phase transformations +work. + +##### Node Name Pattern Front Phase Transformations +Let's review a real life example before going into details how this type of transformation works. + +TensorFlow\* uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing +particular task into the same scope. This approach divides a graph into logical blocks that are easier to review in the +TensorBoard\*. The scope, in fact, just defines a common name prefix for the nodes belonging to it. + +For example, Inception topologies contain several types of so-called "Inception blocks". Some of them are equal to each +other, but located in different places of the network. For example, Inception V4 from the +[TensorFlow-Slim image classification model library](https://github.com/tensorflow/models/tree/master/research/slim) has +inception blocks `Mixed_5b`, `Mixed_5c` and `Mixed_5d` with exactly the same nodes with the same set of attributes. + +Consider a situation when someone implemented these Inception blocks extremely efficiently using a single Inference +Engine operation called `InceptionBlock` and need to replace these blocks in the model with instances of this operation. +Model Optimizer provides mechanism to trigger the transformation for a sub-graph of operations defined by the node name +regular expressions (scope). In this particular case, some of the patterns are: `.*InceptionV4/Mixed_5b`, +`.*InceptionV4/Mixed_5c` and `.*InceptionV4/Mixed_5d`. Each pattern starts with `.*`, because a prefix `InceptionV4` +is added to all nodes names during a model freeze. + +This type of transformation is implemented using `mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph` as a +base class and works the following way. +1. Developer prepares a JSON configuration file template defining node names patterns. +2. Developer runs the Model Optimizer with a command line parameter `--tensorflow_custom_operations_config_update` and +Model Optimizer adds information about input and output nodes of the specified sub-graphs. +3. Model Optimizer executes developer-defined transformation **only** when an user specifies the path to the +configuration file updated in step 2 using the command line parameter `--transformations_config`. + +Consider the following possible configuration file template for the Inception Block transformation: +```json +[ + { + "custom_attributes": { + "attr1_key": "attr1_value", + "attr2_key": 123456 + }, + "id": "InceptionBlockTransformation", + "instances": [ + ".*InceptionV4/Mixed_5b", + ".*InceptionV4/Mixed_5c", + ".*InceptionV4/Mixed_5d" + ], + "match_kind": "scope" + } +] +``` + +The configuration file contains a list of dictionaries. Each dictionary defines one transformation. Each transformation +is defined with several parameters: + +* `id` (mandatory) is a unique identifier of the transformation. It is used in the Python\* code that implements the +transformation to link the class and the transformation description from the configuration file. +* `match_kind` (mandatory) is a string that specifies the matching algorithm. For the node name pattern case the value +should be equal to `scope`. Another possible values are described in the dedicated sections below. +* `instances` (mandatory) specifies instances of the sub-graph to be matched. It contains a list of node names prefixes +patterns for the match kind of type `scope`. +* `custom_attributes` (optional) is a dictionary with attributes that can be used in the transformation code. + +After running the Model Optimizer with additional parameter `--tensorflow_custom_operations_config_update` pointing to +the template configuration file the content of the file should be updated with two new sections `inputs` and `outputs`. +The file content after the update is the following: +```json +[ + { + "id": "InceptionBlockTransformation", + "custom_attributes": { + "attr1_key": "attr1_value", + "attr2_key": 123456 + }, + "instances": [ + ".*InceptionV4/Mixed_5b", + ".*InceptionV4/Mixed_5c", + ".*InceptionV4/Mixed_5d" + ], + "match_kind": "scope", + "inputs": [ + [ + { + "node": "Branch_2/Conv2d_0a_1x1/Conv2D$", + "port": 0 + }, + { + "node": "Branch_3/AvgPool_0a_3x3/AvgPool$", + "port": 0 + }, + { + "node": "Branch_1/Conv2d_0a_1x1/Conv2D$", + "port": 0 + }, + { + "node": "Branch_0/Conv2d_0a_1x1/Conv2D$", + "port": 0 + } + ] + ], + "outputs": [ + { + "node": "concat$", + "port": 0 + } + ] + } +] +``` + +The value for key `inputs` is a list of lists describing input tensors of the sub-graph. Each element of the top-level +list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming +this tensor and port numbers where the tensor is consumed. Model Optimizer generates regular expressions for the input +nodes names to uniquely identify them in each instance of the sub-graph defined by the `instances`. Denote these nodes +as input nodes of the sub-graph. + +In the InceptionV4 topology, the `InceptionV4/Mixed_5b` block has four input tensors from outside of the sub-graph, +but all of them are produced by the node `InceptionV4/Mixed_5a/concat`. Therefore, the top-level list of the `inputs` +contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by +`InceptionV4/Mixed_5a/concat` node. In this case, all four input nodes consume input tensor into port 0. + +The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level +list is important. This order defines the order in which the Model Optimizer attaches input tensors to a new generated +node if the sub-graph is replaced with a single node. The `i`-th input node of the sub-graph is obtained using call +`match.single_input_node(i)` in the sub-graph transformation code. More information about API is given below. If it is +necessary to change the order of input tensors, the configuration file can be edited in the text-editor. + +The value for the key `outputs` is a list describing nodes of the sub-graph producing tensor that goes outside of the +sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in +the list is important. The i-th element of the list describes the `i`-th output tensor of the sub-graph, which could be +obtained using call `match.output_node(i)`. The order of elements can be manually changed in the configuration file. +Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node. + +Refer to [Converting TensorFlow\* Object Detection API Models](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md) +for more examples of this type of transformation. + +##### Front Phase Transformations Using Start and End Points +This type of transformation is implemented using `mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph` as a +base class and works the following way. +1. Developer prepares a JSON configuration file which defines the sub-graph to match using two lists of node names: +"start" and "end" nodes. +2. Model Optimizer executes developer-defined transformation **only** when an user specifies the path to the +configuration file using the command line parameter `--transformations_config`.Model Optimizer performs the following +steps to match the sub-graph: + 1. Starts a graph traversal from every start node following the direction of the graph edges. The search stops in an + end node or in case of a node without consumers. All visited nodes are added to the matched sub-graph. + 2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from the + "start" list. In this step the edges are traversed in the opposite edge direction. All newly visited nodes are added + to the matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the + matched sub-graph. + 3. Checks that all "end" nodes were reached from "start" nodes. If no then exit with error. + 4. Check that there are no [Parameter](../../../ops/infrastructure/Parameter_1.md) operations among added nodes. If + they exist then the sub-graph depends on the inputs of the model. Such configuration is considered incorrect so the + Model Optimizer exits with an error. + +This algorithm finds all nodes "between" start and end nodes and nodes needed for calculation of non-input nodes of the +matched sub-graph. + +The example of a JSON configuration file for a transformation with start and end points is +`extensions/front/tf/ssd_support_api_v1.15.json`: + +```json +[ + { + "custom_attributes": { + "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", + "pad_mode": "caffe.ResizeParameter.CONSTANT", + "resize_mode": "caffe.ResizeParameter.WARP", + "clip_before_nms": false, + "clip_after_nms": true + }, + "id": "ObjectDetectionAPISSDPostprocessorReplacement", + "include_inputs_to_sub_graph": true, + "include_outputs_to_sub_graph": true, + "instances": { + "end_points": [ + "detection_boxes", + "detection_scores", + "num_detections" + ], + "start_points": [ + "Postprocessor/Shape", + "Postprocessor/scale_logits", + "Postprocessor/Tile", + "Postprocessor/Reshape_1", + "Postprocessor/Cast_1" + ] + }, + "match_kind": "points" + } +] +``` + +The format of the file is similar to the one provided as an example in the +[Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformations). There difference is in +the value of the `match_kind` parameter which should be equal to `points` and the format of the `instances` parameter +which should be a dictionary with two keys `start_points` and `end_points` defining start and end node names +correspondingly. + +> **NOTE**: `include_inputs_to_sub_graph` and `include_outputs_to_sub_graph` parameters are redundant and should be +> always equal to `true`. + +> **NOTE**: This sub-graph match algorithm has a limitation that each start node must have only one input. Therefore, it +> is not possible to specify, for example, [Convolution](../../../ops/convolution/Convolution_1.md) node as input +> because it has two inputs: data tensor and tensor with weights. + +For other examples of transformations with points, please refer to the +[Converting TensorFlow\* Object Detection API Models](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md). + +##### Generic Front Phase Transformations Enabled with Transformations Configuration File +This type of transformation works similarly to the [Generic Front Phase Transformations](#generic-front-phase-transformations) +but require a JSON configuration file to enable it similarly to +[Node Name Pattern Front Phase Transformations](#node-name-pattern-front-phase-transformation) and +[Front Phase Transformations Using Start and End Points](#start-end-points-front-phase-transformations). + +The base class for this type of transformation is +`mo.front.common.replacement.FrontReplacementFromConfigFileGeneral`. The Model Optimizer executes the method +`transform_graph(self, graph, replacement_descriptions)` and provides the `Graph` object and dictionary with values +parsed from the `custom_attributes` attribute of the provided JSON configuration file. + +The example of the configuration file for this type of transformation is `extensions/front/tf/yolo_v1_tiny.json`: + +```json +[ + { + "id": "TFYOLO", + "match_kind": "general", + "custom_attributes": { + "classes": 20, + "coords": 4, + "num": 2, + "do_softmax": 0 + } + } +] +``` +and the corresponding transformation file is `./extensions/front/YOLO.py`: + +```py +from extensions.front.no_op_eraser import NoOpEraser +from extensions.front.standalone_const_eraser import StandaloneConstEraser +from extensions.ops.regionyolo import RegionYoloOp +from mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral +from mo.graph.graph import Node, Graph +from mo.ops.result import Result +from mo.utils.error import Error + + +class YoloRegionAddon(FrontReplacementFromConfigFileGeneral): + """ + Replaces all Result nodes in graph with YoloRegion->Result nodes chain. + YoloRegion node attributes are taken from configuration file + """ + replacement_id = 'TFYOLO' # the identifier matching the "id" attribute in the JSON file + + def run_after(self): + return [NoOpEraser, StandaloneConstEraser] + + def transform_graph(self, graph: Graph, replacement_descriptions): + op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result'] + for op_output in op_outputs: + last_node = Node(graph, op_output).in_node(0) + op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1) + op_params.update(replacement_descriptions) + region_layer = RegionYoloOp(graph, op_params) + region_layer_node = region_layer.create_node([last_node]) + # here we remove 'axis' from 'dim_attrs' to avoid permutation from axis = 1 to axis = 2 + region_layer_node.dim_attrs.remove('axis') + Result(graph).create_node([region_layer_node]) + graph.remove_node(op_output) +``` + +The configuration file has only 3 parameters: identifier of the transformation `id`, `match_kind` (which should be equal +to `general`) and the dictionary with custom attributes `custom_attributes` accessible in the transformation. + +#### Middle Phase Transformations +There are two types of middle phase transformations: + +1. [Pattern-Defined Middle Phase Transformations](#pattern-defined-middle-phase-transformations) triggered for each +sub-graph of the original graph isomorphic to the specified pattern. +2. [Generic Middle Phase Transformations](#generic-middle-phase-transformations). + +##### Pattern-Defined Middle Phase Transformations +This type of transformation is implemented using `mo.middle.replacement.MiddleReplacementPattern` as a base class and +works similarly to the [Pattern-Defined Front Phase Transformations](#pattern-defined-middle-phase-transformations). +The are two differences: +1. The transformation entry function name is `replace_pattern(self, graph, match)`. +2. The pattern defining the graph should contain data nodes because the structure of the graph is different between +front and middle phases. Refer to the [Partial Inference](#partial-inference) section for more information about the +graph structure changes. + +Refer to the `extensions/middle/L2NormToNorm.py` for the example of a pattern-defined middle transformation. + +##### Generic Middle Phase Transformations +Model Optimizer provides mechanism to implement generic middle phase transformations. This type of transformation is +implemented using `mo.middle.replacement.MiddleReplacementPattern` as a base class and works similarly to the +[Generic Front Phase Transformations](#generic-front-phase-transformations). The only difference is that the +transformation entry function name is `find_and_replace_pattern(self, graph: Graph)`. + +Refer to the `extensions/middle/CheckForCycle.py` for the example of a such type of transformation. + +#### Back Phase Transformations +There are two types of back phase transformations: + +1. [Pattern-Defined Back Phase Transformations](#pattern-defined-back-phase-transformations) triggered for each +sub-graph of the original graph isomorphic to the specified pattern. +2. [Generic Back Phase Transformations](#generic-back-phase-transformations). + +> **NOTE**: The graph layout during the back phase is always NCHW. However during the front and middle phases it could +> be NHWC if the original model was using it. Refer to [Model Conversion Pipeline](#model-conversion-pipeline) for more +> details. + +##### Pattern-Defined Back Phase Transformations +This type of transformation is implemented using `mo.back.replacement.MiddleReplacementPattern` as a base class and +works the same way as [Pattern-Defined Front Phase Transformations](#pattern-defined-middle-phase-transformations). + +Refer to the `extensions/back/ShufflenetReLUReorder.py` for the example of a pattern-defined back transformation. + +##### Generic Back Phase Transformations +Model Optimizer provides mechanism to implement generic back phase transformations. This type of transformation is +implemented using `mo.back.replacement.BackReplacementPattern` as a base class and works the same way as +[Generic Middle Phase Transformations](#generic-middle-phase-transformations). + +Refer to the `extensions/back/GatherNormalizer.py` for the example of a such type of transformation. + +## See Also +* [Deep Learning Network Intermediate Representation and Operation Sets in OpenVINO™](../../IR_and_opsets.md) +* [Converting a Model to Intermediate Representation (IR)](../convert_model/Converting_Model.md) +* [nGraph Basic Concepts](@ref openvino_docs_nGraph_DG_basic_concepts) +* [Inference Engine Extensibility Mechanism](../../../IE_DG/Extensibility_DG/Intro.md) +* [Extending the Model Optimizer with Caffe* Python Layers](Extending_Model_Optimizer_with_Caffe_Python_Layers.md) +* [Extending the Model Optimizer for Custom MXNet* Operations](Extending_MXNet_Model_Optimizer_with_New_Primitives.md) +* [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md) diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md index 4203a1f74114de..aa3b5697242657 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_MXNet_Model_Optimizer_with_New_Primitives.md @@ -1,45 +1,41 @@ -# Extending the MXNet Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_MXNet_Model_Optimizer_with_New_Primitives} +# Extending Model Optimizer for Custom MXNet* Operations {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_MXNet_Model_Optimizer_with_New_Primitives} -This section describes how you can create a Model Optimizer extension for a custom layer from your MXNet* model. It supplements the main document [Extending Model Optimizer with New Primitives](Extending_Model_Optimizer_with_New_Primitives.md) and provides a step-by-step procedure. To create an extension for a particular layer, perform the following steps: +This section provides instruction on how to support a custom MXNet operation (or as it called in the MXNet documentation +"operator" or "layer") which is not a part of the MXNet operation set. For example, if the operator is implemented using +the following [guide](https://mxnet.apache.org/versions/1.7.0/api/faq/new_op.html). + +This section describes a procedure on how to extract operator attributes in the Model Optimizer. The rest of the +operation enabling pipeline and documentation on how to support MXNet operations from standard MXNet operation set is +described in the main document [Customize_Model_Optimizer](Customize_Model_Optimizer.md). + +## Writing Extractor for Custom MXNet Operation +Custom MXNet operations have an attribute `op` (defining the type of the operation) equal to `Custom` and attribute +`op_type` which is an operation type defined by an user. Implement extractor class inherited from the +`MXNetCustomFrontExtractorOp` class instead of `FrontExtractorOp` class used for standard framework operations in order +to extract attributes for such kind of operations. The `op` class attribute value should be set to the `op_type` value +so the extractor is triggered for this kind of operation. + +There is the example of the extractor for the custom operation registered with type (`op_type` value) equal to +`MyCustomOp` having attribute `my_attribute` of the floating point type with default value `5.6`. In this sample we +assume that we have already created the `CustomOp` class (inherited from `Op` class) for the Model Optimizer operation +for this MXNet custom operation as described in the [Customize_Model_Optimizer](Customize_Model_Optimizer.md). -1. Create the file `custom_proposal_ext.py` in the folder `/deployment_tools/model_optimizer/extensions/front/mxnet` -If your MXNet layer has op `Custom`, create the `CustomProposalFrontExtractor` class inherited from `MXNetCustomFrontExtractorOp`: -```py -from mo.front.extractor import MXNetCustomFrontExtractorOp -class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp): - pass -``` -Otherwise, for layers that are not standard MXNet layers, create the `ProposalFrontExtractor` class inherited from `FrontExtractorOp`: -```py - from mo.front.extractor import FrontExtractorOp - class ProposalFrontExtractor(FrontExtractorOp): - pass -``` -2. Specify the operation that the extractor refers to and a specific flag. The flag represents whether the operation should be used by the Model Optimizer or should be excluded from processing: -```py -from mo.front.extractor import MXNetCustomFrontExtractorOp -class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp): - op = '_contrib_Proposal' - enabled = True -``` -3. Register a mapping rule between the original model and the `PythonProposalOp` attributes by overriding the following function: ```py +from extension.ops.custom_op import CustomOp # implementation of the MO operation class from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs from mo.front.extractor import MXNetCustomFrontExtractorOp -from mo.ops.op import Op -class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp): - op = '_contrib_Proposal' - enabled = True +class CustomProposalFrontExtractor(MXNetCustomFrontExtractorOp): # inherit from specific base class + op = 'MyCustomOp' # the value corresponding to the `op_type` value of the MXNet operation + enabled = True # the extractor is enabled + @staticmethod def extract(node): - attrs = get_mxnet_layer_attrs(node.symbol_dict) + attrs = get_mxnet_layer_attrs(node.symbol_dict) # parse the attributes to a dictionary with string values node_attrs = { - 'feat_stride': attrs.float('feat_stride', 16) + 'my_attribute': attrs.float('my_attribute', 5.6) } - - # update the attributes of the node - Op.get_op_class_by_name('Proposal').update_node_stat(node, node_attrs) # <------ here goes the name ('Proposal') of the Operation that was implemented before - return __class__.enabled -``` + CustomOp.update_node_stat(node, node_attrs) # update the attributes of the node + return self.enabled +``` diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md new file mode 100644 index 00000000000000..c79da3ef0efaa0 --- /dev/null +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.md @@ -0,0 +1,89 @@ +# Extending Model Optimizer with Caffe* Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers} + +This section provides instruction on how to support a custom Caffe operation written only in Python. For example, the +[Faster-R-CNN model]((http://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz?dl=0)) implemented in +Caffe contains a custom layer Proposal written in Python. The layer is described in the +[Faster-R-CNN protoxt](https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt) +the following way: +```sh +layer { + name: 'proposal' + type: 'Python' + bottom: 'rpn_cls_prob_reshape' + bottom: 'rpn_bbox_pred' + bottom: 'im_info' + top: 'rois' + python_param { + module: 'rpn.proposal_layer' + layer: 'ProposalLayer' + param_str: "'feat_stride': 16" + } +} +``` + +This section describes only a procedure on how to extract operator attributes in the Model Optimizer. The rest of the +operation enabling pipeline and documentation on how to support other Caffe operations (written in C++) is described in +the main document [Customize_Model_Optimizer](Customize_Model_Optimizer.md). + +## Writing Extractor for Caffe Python Layer +Custom Caffe Python layers have an attribute `type` (defining the type of the operation) equal to `Python` and two +mandatory attributes `module` and `layer` in the `python_param` dictionary. The `module` defines the Python module name +with the layer implementation, while `layer` value is an operation type defined by an user. In order to extract +attributes for such an operation it is necessary to implement extractor class inherited from the +`CaffePythonFrontExtractorOp` class instead of `FrontExtractorOp` class used for standard framework layers. The `op` +class attribute value should be set to the `module + "." + layer` value so the extractor is triggered for this kind of +operation. + +Here is a simplified example of the extractor for the custom operation Proposal from Faster-R-CNN model mentioned above. +The full code with additional checks is provided in the +`/deployment_tools/model_optimizer/extensions/front/caffe/proposal_python_ext.py`. The sample code uses +operation `ProposalOp` which corresponds to `Proposal` operation described in the [Available Operations Sets](../../../ops/opset.md) +document. Refer to the source code below for a detailed explanation of the extractor. + +```py +from extensions.ops.proposal import ProposalOp +from mo.front.extractor import CaffePythonFrontExtractorOp + + +class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): + op = 'rpn.proposal_layer.ProposalLayer' # module + "." + layer + enabled = True # extractor is enabled + + @staticmethod + def extract_proposal_params(node, defaults): + param = node.pb.python_param # get the protobuf message representation of the layer attributes + # parse attributes from the layer protobuf message to a Python dictionary + attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) + update_attrs = defaults + + # the operation expects ratio and scale values to be called "ratio" and "scale" while Caffe uses different names + if 'ratios' in attrs: + attrs['ratio'] = attrs['ratios'] + del attrs['ratios'] + if 'scales' in attrs: + attrs['scale'] = attrs['scales'] + del attrs['scales'] + + update_attrs.update(attrs) + ProposalOp.update_node_stat(node, update_attrs) # update the node attributes + + @classmethod + def extract(cls, node): + # define default values for the Proposal layer attributes + defaults = { + 'feat_stride': 16, + 'base_size': 16, + 'min_size': 16, + 'ratio': [0.5, 1, 2], + 'scale': [8, 16, 32], + 'pre_nms_topn': 6000, + 'post_nms_topn': 300, + 'nms_thresh': 0.7 + } + cls.extract_proposal_params(node, defaults) + return cls.enabled +``` + +## See Also +* [Customize_Model_Optimizer](Customize_Model_Optimizer.md) +* [Legacy Mode for Caffe* Custom Layers](Legacy_Mode_for_Caffe_Custom_Layers.md) diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md index b94ddb52885f80..9fb0e9b26f2db7 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_New_Primitives.md @@ -1,476 +1,3 @@ -# Extending the Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives} +# Extending Model Optimizer with New Primitives {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_with_New_Primitives} -This section explains how to register a custom layer in the Model Optimizer, including how to register Proposal as a custom layer. This section also demonstrates how `Proposal` works as a custom layer. - -Model Optimizer loads the model, goes through the topology, and tries to find each layer type in the list of known layers. If the Model Optimizer does not find a layer in that list, it looks for the layer in the list of custom layers. If the Model Optimizer fails to find the layer among the defined custom layers, it registers a Caffe\* fallback for for the output shape inference. If the Model Optimizer does not find Caffe and cannot infer shapes, the Model Optimizer fails with an appropriate message. - -You must know two things about custom layers with the Model Optimizer: - -* How to map a subgraph in a FW model to a subgraph consisting of Inference Engine layers. For Caffe, the subgraph is a 1-to-1 mapping of a Caffe layer to an Inference Engine layer. -* How to infer shapes for unknown subgraphs. This can be either for a step in which the internal representation consists of framework-specific layers, or for a step in which the internal representation consists of Inference Engine layers. - -You also have the option of a framework fallback for unknown subgraphs, for when the original framework is used for inference of output shapes of operations. The example below demonstrates the case in which the framework is not available or should not be used. - -## Preparing an Example Topology - -> **NOTE**: Skip this section if you have a topology with a layer that is not known to the Model Optimizer. - -The information in this section prepares a Caffe\* model with the provided, deployment-ready `prototxt` for a -well-known topology called -[Faster-R-CNN protoxt](https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt) -to demonstrate the workflow. To use this example, you must have -[weights and biases](http://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz?dl=0) for inference, -because `prototxt` just describes the structure of the topology. - -1. Download the `.caffemodel` and `.prototxt` files -2. Run the Model Optimizer on the `.caffemodel` and `.prototxt` files: -```shell -python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt -``` -You will likely see the error message: -```shell -Error parsing text-format caffe.NetParameter: 196:16: Message type "caffe.DropoutParameter" has no field named "scale_train". -``` -Whether you see the error depends on your Caffe version. For example, BVLC Caffe does not support the boolean parameter `scale_train` for the `dropout` layer. The error message does not matter, because the dropout layer is needed only for training, and the Model Optimizer removes it. -3. To proceed, comment out these lines in `test.prototxt`: -```sh -... -layer { - name: "drop6" - type: "Dropout" - bottom: "fc6" - top: "fc6" - dropout_param { - dropout_ratio: 0.5 - # scale_train: false # <-------------- comment out this line - } -} -... -layer { - name: "drop7" - type: "Dropout" - bottom: "fc7" - top: "fc7" - dropout_param { - dropout_ratio: 0.5 - # scale_train: false # <-------------- comment out this line - } -} -... -``` -4. Run the Model Optimizer on this model again: -```shell -python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt -``` - You get the model successfully converted to Intermediate Representation, and you can infer it with the Inference Engine. - - However, the aim of this tutorial is to demonstrate the way of supporting custom layers not yet supported by the Model Optimizer. - If you want to understand better how Model Optimizer works, remove the extension for layer `Proposal` and follow all steps of this tutorial. - -5. Remove the extension for layer `Proposal`: -```sh -mkdir extensions/old -mv extensions/front/caffe/proposal_python_ext.py extensions/old/proposal_python_ext_old.py -mv extensions/ops/proposal_python_example.py extensions/old/proposal_python__example_old.py -``` -6. Now you can run the Model Optimizer on this model once again: -```sh -python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt -``` -You will see the message: -```shell -[ ERROR ] Found custom layer proposal. Model Optimizer does not support this layer. -Please, register it in CustomLayersMapping.xml or implement extension. -For more information please refer to Model Optimizer FAQ, question #FAQ45. -``` -This message means the Model Optimizer can load the model, but is unable to infer the shape and handle the custom layer properties. - -## Registering a Custom Layer as a Model Optimizer Extension - -In the following sections, you will learn how to make the Model Optimizer independent from Caffe\* when processing a -model that has a custom layer. In this example, the custom layer is referred to as the Proposal layer. - -Use this section to implement the mapping rules for the `Proposal` layer attributes and the output shape calculation. As part of these steps, you must first create a class for the `Proposal` layer and inherit it from general-purpose Op that defines the interface of every new custom layer. - -In this section, it is important to understand the `Op` class and its function. The implementation of this class shows that it expects a graph and attributes to be passed when initializing. The graph and attributes are in `/deployment_tools/model_optimizer/mo/ops/op.py` - -`Op` keeps the attributes for each operation and contains logic for handling node creation for internal model representation. `Op` is responsible for dumping each particular operation to the `.xml` format for the Intermediate Representation. By inheriting from it, the technical items are complete and you concentrate on the specificity of this layer: the attributes it supports and the rules on computing its output shape. - -Follow these steps: - -1. Create the file `python_proposal.py` in the directory `/deployment_tools/model_optimizer/extensions/ops`: -```python -from mo.ops.op import Op -class PythonProposalOp(Op): - pass -``` -2. Define the name of the operation and make a stub constructor: -```python -from mo.ops.op import Op -class PythonProposalOp(Op): - op = 'Proposal' - def __init__(self, graph, attrs): - super().__init__(graph) -``` -3. Every `Op` must have three specific fields defined: `type`, `op`, and `infer`. In most cases, the `type` and `op` names are the same, and `infer` is defined as a function to compute the output shape. Reflect these fields in your constructor: -```python -from mo.ops.op import Op -class PythonProposalOp(Op): - op = 'Proposal' - def __init__(self, graph, attrs): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'infer': None - } - super().__init__(graph, mandatory_props, attrs) -``` - According to the Intermediate Representation catalog, Proposal layer has the following attributes: - - * `pre_nms_topn` - * `post_nms_topn` - * `nms_thresh` - * `feat_stride` - * `min_size` - * `base_size` - * `ratio` - * `scale` -4. In defining supported attribute names, it is best to use the same names as in the original models. The names are similar to parameters and have no connection with the model layer properties. For clarity, you can use the name `my_ratio` for `ratio`. Other than defining the list of supported parameters, you can define only the parameters that appear in the Intermediate Representation in the `backend_attrs` method. - Define your attributes: -```python -class PythonProposalOp(Op): - # ... constructor - def supported_attrs(self): - return [ - 'pre_nms_topn', - 'post_nms_topn', - 'nms_thresh', - 'feat_stride', - 'min_size', - 'base_size', - 'ratio', - 'scale' - ] -``` -5. Model Optimizer now knows how to create the layer called Proposal when it is in the topology and what attributes this layer has. However, the Model Optimizer does not know how to calculate the output shape of this operation. Define a rule to calculate the output shape: -```python -import numpy as np -from mo.graph.graph import Node -from mo.ops.op import Op -class PythonProposalOp(Op): - def __init__(self, graph, attrs): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'infer': PythonProposalOp.calculate_output_shape - } - super().__init__(graph, mandatory_props, attrs) - # ... supported attrs - @staticmethod - def calculate_output_shape(node: Node): - node.out_node().shape = (1, 1, 1, 1) # any Proposal now has always the same output -``` -6. According to the Intermediate Representation catalog, Proposal layer has the following output calculation formula, where shape dynamically depends on the `post_nms_topn` parameter. - Implement the output calculation formula in Python\*: -```python -import numpy as np -class PythonProposalOp(Op): - # ... static fields - # ... constructor - # ... supported attrs - @staticmethod - def calculate_output_shape(node: Node): - input_shape = node.in_node(0).shape - out_shape = np.array([0, 0], dtype=np.int64) - # rois blob: holds R regions of interest, each is a 5 - tuple - # (n, x1, y1, x2, y2) specifying an image batch index n and a - # rectangle(x1, y1, x2, y2) - out_shape[0] = input_shape[0] * node.post_nms_topn - out_shape[1] = 5 - node.out_node(0).shape = out_shape -``` - The node does not contain this parameter because it should be initialized in the constructor and in other parameters. The Inference Engine contains the implementation of a Caffe\*-like Proposal layer and works well with the default values from `caffe.proto`: -``` -// Message that stores parameters used by ProposalLayer message ProposalParameter { optional uint32 feat_stride = 1 [default = 16]; optional uint32 base_size = 2 [default = 16]; optional uint32 min_size = 3 [default = 16]; repeated float ratio = 4; repeated float scale = 5; optional uint32 pre_nms_topn = 6 [default = 6000]; optional uint32 post_nms_topn = 7 [default = 300]; optional float nms_thresh = 8 [default = 0.7]; } -``` -7. Change the constructor as follows: -```python -class PythonProposalOp(Op): - # ... static fields - def __init__(self, graph, attrs): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [0.5, 1, 2], - 'scale': [8, 16, 32], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7, - 'infer': PythonProposalOp.calculate_output_shape - } - super().__init__(graph, mandatory_props, attrs) - # ... supported attrs - # ... calculate output shape - -``` - -It is mandatory to call two functions right after the implementation of that class: - -``` -class ProposalPythonOp(Op): - ... - -register_caffe_python_extractor(ProposalPythonOp, 'rpn.proposal_layer.ProposalLayer') -Op.excluded_classes.append(ProposalPythonOp) -``` - -Note that the first call register_caffe_python_extractor(ProposalPythonOp, 'rpn.proposal_layer.ProposalLayer') registers the extension of the layer in the Model Optimizer that will be found by a specific name (it is mandatory to join module name and layer name): 'rpn.proposal_layer.ProposalLayer'. - -The second call prevents the Model Optimizer from using this extension as if it is an extension for a layer with type `Proposal`. Otherwise, this layer can be chosen as an implementation of extension that can lead to potential issues. - -**Summary** - -In this section you implemented support for a custom layer with type `Python` that is `Proposal` layer in the topology. You learned how to calculate output shape of this layer. - -The values of attributes are hardcoded, and in the next section you will learn how to extract these values from original framework model (Caffe model in this case). - -## Registering Rules to Pass Extension Layer Properties from a Caffe\* Model to the Intermediate Representation - -Model Optimizer now knows how to set the shape of the `PythonProposalOp` operation, but it is incorrect to initialize attributes with same values for every operation. Instead, the values should be extracted from the original topology. Model Optimizer does not know how to map the custom layer properties to the `PythonProposalOp`. For this, you must register the `FrontExtractorOp` instance. - -> **NOTE**: This step is required only if the layer requires parameters from the original model. - -1. Remove call functions `register_caffe_python_extractor` and `Op.excluded_classes.append` from the file with `op`, because you will implement extracted attributes from prototxt by yourself. -There are multiple types of layers in Caffe: for example, `Convolution` and `Pooling`. Also, there is a specific type for custom Python\* layers called `Python`. Therefore, it is necessary to distinguish between those 'usual' types of layers and custom ones. If you want to implement extensions for a layer with type different to `Python`, you need to inherit your class of operation (for example, `ProposalFrontExtractor`) from `FrontExtractorOp`. Otherwise, inherit your class of operation from `CaffePythonFrontExtractorOp`. -2. Create a file `python_proposal_ext.py` in the folder `/deployment_tools/model_optimizer/extensions/front/caffe` -```py -from mo.front.extractor import CaffePythonFrontExtractorOp -class PythonProposalFrontExtractor(CaffePythonFrontExtractorOp): - pass -``` -For other layers types, inherit from `FrontExtractorOp`: -```py - from mo.front.extractor import FrontExtractorOp - class ProposalFrontExtractor(FrontExtractorOp): - pass -``` -You will implement extractor for layer with type `Python`, however, the steps are generally the same for layers with other types. -3. Specify the operation that the extractor refers to and a specific flag. The flag represents whether the operation should be used by the Model Optimizer or should be excluded from processing: -```py -from mo.front.extractor import CaffePythonFrontExtractorOp -class PythonProposalFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' - enabled = True -``` -4. Register a mapping rule between the original model and the `PythonProposalOp` attributes by overriding the following function: -```py -from mo.front.extractor import CaffePythonFrontExtractorOp -from mo.ops.op import Op -class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' - enabled = True - @staticmethod - def extract(node): - proto_layer = node.pb - param = proto_layer.python_param # each layer has a specific parameter, take a look at caffe.proto - python_params = str(param.param_str) # for Python layers, all params are in param_str - attrs = { - 'feat_stride': int(python_params.split(':')[-1]) - } - # update the attributes of the node - Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs) # <------ here goes the name ('Proposal') of the Operation that was implemented before - return __class__.enabled -``` -> **NOTE:** if you implement extension for layer with type different to `Python`, change the following line: Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs) to this line: Op.get_op_class_by_name(__class__.op).update_node_stat(node, mapping_rule). -You have successfully extracted the parameter `feat_stride` from `prototxt`, assuming it is the only parameter in this layer. -5. To increase the implementation flexibility: -```py - from mo.front.extractor import CaffePythonFrontExtractorOp - from mo.ops.op import Op - class PythonProposalFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' - enabled = True - @staticmethod - def extract(node): - param = node.pb.python_param - attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) - Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs) - return ProposalPythonFrontExtractor.enabled -``` - -You can successfully convert the model. Open the `.xml` file and view your code: -```xml -... - - - - - 1 - 18 - 15 - 15 - - - 1 - 36 - 15 - 15 - - - 1 - 3 - - - - - 300 - 5 - - - -... -``` - -Look at the output shape of the custom layer you implemented. The shape was calculated according to the rules specified in `PythonProposalOp`. The `ratio` and `scale` properties have the value `[0.5, 1, 2]` and `[8, 16, 32]`. They have square brackets because they are originally a repeated parameter. You converted the parameter to a list in `PythonProposalOp`. Model Optimizer cast the value to a string. According to Python\* rules, a list has a string representation of opening and closing square brackets and values joined by commas. - -This is not a valid notation for the Intermediate Representation specification, because repeated parameters must be separated by a comma but without the brackets. Therefore, you must override the Model Optimizer default behavior regarding how it handles those parameters during the Intermediate Representation emitting stage, after the optimizations are complete. To do so, implement `backend_attrs()` in the `PythonProposalOp` class: -```python -class PythonProposalOp(Op): - ... other methods - def backend_attrs(self) -> list: - """ - Gets list of attributes that should appear in resulting IR - Returns: - list of attributes names or list of tuples (name of attribute, pre-processing rule) - """ - return [ - ( # a tuple per attribute - 'ratio', # name of attribute - # pre-processing rule in a form of lambda - # lambda takes a PythonProposalOp node with all defined properties - # it translates [1,2,3] -> "1,2,3" - lambda node: ','.join(map(str, node['ratio'])) - ), - ( - 'scale', - lambda node: ','.join(map(str, node['scale'])) - ), - 'feat_stride', - 'base_size', - 'min_size', - 'pre_nms_topn', - 'post_nms_topn', - 'nms_thresh' - ] -``` -The model can now be successfully converted. - -Open the `.xml` file. `ratio` and `scale` have the expected correct values `0.5,1,2` and `8,16,32`: -```xml - ... - - - - - ... - - - ... - - - - ... -``` - -> **NOTE**: Model Optimizer supports the Faster-R-CNN topology. Run the following command for the same Intermediate Representation: - -```sh -python mo.py --input_model VGG16_faster_rcnn_final.caffemodel --input_proto test.prototxt --extensions /deployment_tools/inference-engine/samples/object_detection_sample/fasterrcnn_extensions -``` - -**Summary** - -In this section you learned how to: - -1. Create a framework-independent extension implementation of the Intermediate Representation custom layer with unified logic for calculating output shapes, specified set of attributes -2. Use the Framework-Specific property extractor to map original model custom layer properties to the expected properties of the Framework-Independent extension -3. Manipulate the custom layer properties representation in the resulting Intermediate Representation - -Files used in this section: - -* `/deployment_tools/model_optimizer/extensions/ops/python_proposal.py`: - -```py -import networkx as nx -import numpy as np -from mo.front.extractor import attr_getter -from mo.graph.graph import Node -from mo.ops.op import Op - -class ProposalOp(Op): - op = 'Proposal' - - def __init__(self, graph: nx.MultiDiGraph, attrs: dict): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'post_nms_topn': 300, # default in caffe-shared - 'infer': ProposalOp.proposal_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'feat_stride', - 'base_size', - 'min_size', - 'ratio', - 'scale', - 'pre_nms_topn', - 'post_nms_topn', - 'nms_thresh' - ] - - def backend_attrs(self): - return [ - 'feat_stride', - 'base_size', - 'min_size', - ('ratio', lambda node: attr_getter(node, 'ratio')), - ('scale', lambda node: attr_getter(node, 'scale')), - 'pre_nms_topn', - 'post_nms_topn', - 'nms_thresh', - ] - - @staticmethod - def proposal_infer(node: Node): - input_shape = node.in_node(0).shape - out_shape = np.array([0, 0], dtype=np.int64) - # rois blob: holds R regions of interest, each is a 5 - tuple - # (n, x1, y1, x2, y2) specifying an image batch index n and a - # rectangle(x1, y1, x2, y2) - out_shape[0] = input_shape[0] * node.post_nms_topn - out_shape[1] = 5 - node.out_node(0).shape = out_shape -``` -* `/deployment_tools/model_optimizer/extensions/front/caffe/python_proposal_ext.py`: - -```py -from mo.front.extractor import CaffePythonFrontExtractorOp -from mo.ops.op import Op - -class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' - enabled = True - - @staticmethod - def extract(node): - param = node.pb.python_param - attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) - Op.get_op_class_by_name('Proposal').update_node_stat(node, attrs) - return ProposalPythonFrontExtractor.enabled -``` +This page is deprecated. Please, refer to [Model Optimizer Extensibility](Customize_Model_Optimizer.md) page for more information. diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md index ba56ecfcaa147d..c106d489ea8af7 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Legacy_Mode_for_Caffe_Custom_Layers.md @@ -1,10 +1,23 @@ # Legacy Mode for Caffe* Custom Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Legacy_Mode_for_Caffe_Custom_Layers} -> **NOTE**: This functionality is deprecated and will be removed in future releases. +> **NOTE**: This functionality is deprecated and will be removed in the future releases. -Model Optimizer can register custom layers in a way that the output shape is calculated by the Caffe\* framework installed on your system. This chapter covers this option. +Model Optimizer can register custom layers in a way that the output shape is calculated by the Caffe\* framework +installed on your system. This approach has several limitations: -> **NOTE**: Caffe Python\* API has an issue when layer name does not correspond to the name of its top. The fix was implemented on [BVLC Caffe\*](https://github.com/BVLC/caffe/commit/35a7b87ad87457291dfc79bf8a7e7cf7ef278cbb). The Caffe framework on your computer must contain this fix. Otherwise, Caffe framework can unexpectedly fail during the fallback procedure. +* If your layer output shape depends on dynamic parameters, input data or previous layers parameters, calculation of +output shape of the layer via Caffe can be incorrect. For example, `SimplerNMS` is filtering out bounding boxes that do +not satisfy the condition. Internally, Caffe fallback forwards the whole net without any meaningful data - just some +noise. It is natural to get only one bounding box (0,0,0,0) instead of expected number (for example, 15). There is an +option to patch Caffe accordingly, however, it makes success of Intermediate Representation generation on the patched +Caffe on the particular machine. To keep the solution independent from Caffe, we recommend to use extensions mechanism +for such layers described in the [Model Optimizer Extensibility](Customize_Model_Optimizer.md). +* It is not possible to produce Intermediate Representation on a machine that does not have Caffe installed. + +> **NOTE**: Caffe Python\* API has an issue when layer name does not correspond to the name of its top. The fix was +> implemented on [BVLC Caffe\*](https://github.com/BVLC/caffe/commit/35a7b87ad87457291dfc79bf8a7e7cf7ef278cbb). The +> Caffe framework on your computer must contain this fix. Otherwise, Caffe framework can unexpectedly fail during the +> fallback procedure. > **NOTE**: The Caffe fallback feature was validated against [this GitHub revision](https://github.com/BVLC/caffe/tree/99466224dac86ddb86296b1e727794fb836bd80f). You may have issues with forks or later Caffe framework versions. @@ -25,7 +38,8 @@ Where: **Example**: -1. `Proposal` layer has parameters, and they appear in the Intermediate Representation. The parameters are stored in the `proposal_param` property of the layer: +1. `Proposal` layer has parameters, and they appear in the Intermediate Representation. The parameters are stored in +the `proposal_param` property of the layer: ```shell \ ``` @@ -34,16 +48,6 @@ Where: \ ``` -For this feature, you need an appropriate version of Caffe installed on the computer on which you run the Model Optimizer. - -## Constraints of Using the Caffe Fallback - -Several layers in the Caffe\* framework can have shapes that dynamically depend on the input data, not only the layers that proceed the layer and its parameters. For example, `SimplerNMS` is filtering out bounding boxes that do not satisfy the condition. Internally, Caffe fallback forwards the whole net without any meaningful data - just some noise. It is natural to get only one bounding box (0,0,0,0) instead of expected number (for example, 15). There is an option to patch Caffe accordingly, however, it makes success of Intermediate Representation generation on the patched Caffe on the particular machine. To keep the solution independent from Caffe, we recommend to use extensions mechanism for such layers. - -Known cases like `Proposal`, `DetectionOutput`, `SimplerNMS` are implemented as extensions and can be used out of the box. - -A detailed description of supported layers is in the [Operations Specification](../../../ops/opset.md) document. - ## Building Caffe\* 1. Build Caffe\* with Python\* 3.5: @@ -68,4 +72,4 @@ python3 import caffe ``` -If Caffe was installed correctly, the `caffe` module is imported without errors. \ No newline at end of file +If Caffe was installed correctly, the `caffe` module is imported without errors. diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md b/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md index d3ba399a87745d..a3e6eda7756ad7 100644 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md +++ b/docs/MO_DG/prepare_model/customize_model_optimizer/Subgraph_Replacement_Model_Optimizer.md @@ -1,363 +1,4 @@ # Sub-Graph Replacement in the Model Optimizer {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Subgraph_Replacement_Model_Optimizer} -Several reasons exist for why the Model Optimizer could not generate an Intermediate Representation for a model. However, in some cases, the Intermediate Representation could be generated after providing certain hints to the tool. The examples of hints below are mostly related to TensorFlow\*, but potentially could be actual for models created in any framework: - -* Topology contains an operation (or a sub-graph of operations) not known for Model Optimizer, but this operation (sub-graph) could be expressed as a combination of known operations. A hint would be a description of this combination to the tool). -* Sub-graph of operations in the topology expresses a single layer known to Inference Engine. -* TensorFlow and Inference Engine use different layouts of tensors, NHWC and NCHW respectively. If some tensor in NHWC layout is flattened (for example, all the dimensions are squashed into single dim), it is not possible to convert it to NCHW layout required for Inference Engine, so Model Optimizer cannot produce correct Intermediate Representation. - -The detailed solutions for the examples above are given later, the next subsection shows what is common in all three examples. - -## Sub-graph Replacement - -In these cases, the sub-graph (or a single node) of initial graph is replaced with a new sub-graph (single node). The sub-graph replacement consists of the following steps: - -1. Identify an existing sub-graph for replacement - -2. Generate a new sub-graph - -3. Connect a new sub-graph to the graph (create input/output edges to the new sub-graph) - -4. Create output edges out of a new sub-graph to the graph - -5. Do something with the original sub-graph (for example, remove it) - -Model Optimizer provides several ways to perform most of the sub-graph replacement steps. The next subsections describe these methods. - -## Replace a Single Operation with a Sub-graph of Operations - -For example, there is an operation `SquaredDifference` in TensorFlow which calculates \f$(a - b)^2\f$, where \f$a\f$ and \f$b\f$ are input tensors. Inference Engine does not support such operation. However, `SquaredDifference` could be expressed using two `Power` operations and one `Eltwise Add`. The `Power` operation calculates \f$scale * (a ^ {power}) + shift\f$, where \f$a\f$ is a tensor and \f$scale\f$, \f$power\f$ and \f$shift\f$ are float values. The first `Power` operation negates the value of tensor \f$b\f$. The second one is used to square the result of \f$a + (- b)\f$ which is calculated using the `Eltwise Add` operation applied to tensor \f$a\f$ and tensor \f$-b\f$. - -Given that, we can replace all `SquaredDifference` operations in the initial model with two `Power` and one `Eltwise` operations. The replacer is implemented in the following file `/deployment_tools/model_optimizer/extensions/front/SquaredDifference.py`. -```python -import networkx as nx -from mo.front.common.replacement import FrontReplacementOp -from mo.graph.graph import Node -from mo.ops.eltwise import Eltwise -from mo.ops.power import Power -class SquaredDifference(FrontReplacementOp): - """ - Example class illustrating how to implement replacement of a single op in the front-end of the MO pipeline. - This class replaces a single op SquaredDifference by a sub-graph consisting of 3 lower-level ops. - """ - op = "SquaredDifference" - enabled = True - def replace_op(self, graph: nx.MultiDiGraph, node: Node): - negate = Power(graph, dict(scale=-1, name=node.name + '/negate_')) - add = Eltwise(graph, dict(operation='sum', name=node.name + '/add_')) - squared = Power(graph, dict(power=2, name=node.name + '/squared_')) - out_node = squared.create_node([add.create_node([node.in_node(0), negate.create_node([node.in_node(1)])])]) - # Replace edge from out port 0 of the matched node with a edge from node out_node.id with port 0. - # The "explicit" version of the return value is: [(out_node.id, 0)]) - return [out_node.id] -``` -Model Optimizer internal representation of the graph uses the networkx module. - -**Key lines**: - -* Line 1: Imports this module. - -* Line 3: Imports class `FrontReplacementOp` that is used to replace operation of particular type with a new sub-graph. This class performs the first step of the sub-graph replacement (identifies an existing sub-graph for replacement). It is important to mention that the replacement happens before shape inference and creation of data nodes representing tensors with values. At this stage of model conversion pipeline, all nodes in the graph are operation nodes or nodes of type `Const` that produce tensor with fixed value embedded into the node. - -* Line 4: Imports class `Node` representing a single node in the computation graph. - -* Lines 5 - 6: Import classes representing operations `Power` and `Eltwise`. These classes are inherited from base class `mo.ops.Op` that represents operation and stores its attributes. - -* Line 9: Defines class `SquaredDifference` inherited from `FrontReplacementOp`. This is a replacer class that is automatically registered and executed by Model Optimizer. Since the class is located in the common (not framework) specific directory `/deployment_tools/model_optimizer/extensions/front`, it is used for replacement for all supported frameworks. - -* Line 15: Defines the class variable `op` that stores the name of the operation to be replaced. In this case, it is `SquaredDifference`. - -* Line 16: Defines class variable `enabled` that controls whether the replacer is enabled or not. The only function that should be implemented in the class is `replace_op`. It gets graph to operate on and an instance of node of desired operation (`SquaredDifference` in this case). This function performs step two and three of the sub-graph replacement (generates a new sub-graph to replace with and connects a new sub-graph to the graph). - -* Lines 19 - 21: Create instances of operations classes with required attributes. - -* Line 23: Creates a sub-graph from the operations defined above. The `create_node` method of the `Op` class generates `Node` from the `Op` and uses single mandatory argument - the list of input nodes (represented as instances of `Node` class) to create input edges to the node being generated. Inputs of the `SquaredDifference` node are retrieved using `node.in_node(0)` and `node.in_node(1)` method calls. The `Eltwise Add` node gets first input as initial first input of `SquaredDifference` node, the second input of `add` is the result of negation of the second input of `SquaredDifference` node: `[add.create_node([node.in_node(0), negate.create_node([node.in_node(1)])])]`. Then the result of `Add` node is squared. `out_node` node performs this calculation. - -The `replace_op` function returns a list of node names used to create output edges of the sub-graph to connect it with the rest of the graph. Each element of the list describes mapping between old output edge of the matched node and new sub-graph node and output edge index. The i-th element of the list corresponds to the i-th output tensor of the matched node. In this case, `SquaredDifference` produces single tensor through output port 0, so the returned list contains single element. In general, each element is a tuple, where the first element is the name of a new node producing required tensor and the second is the output port for that tensor. If the output port is 0, it is possible to use shortcut - just the name of the node instead of a tuple. Line 26 uses this shortcut. The returned value is used to create the new sub-graph output edges (step 4 of the sub-graph replacement). - -Default implementation of the `FrontReplacementOp` class removes matched node and all its input/output edges (step 5 of the sub-graph replacement). - -Another example of such kind of replacement is in the `/deployment_tools/model_optimizer/extensions/front/Sub.py` class where all instances of `Sub` operations are replaced with two operations: `Power` to negate the second argument and the `Eltwise` to perform elementwise add. - -## Replace Sub-graph of Operations with a New Sub-graph of Operations - -The previous example considered situation when one single node of a specific type is replaced. When it is necessary to replace a sub-graph of operations it is necessary to tell Model Optimizer how to identify this sub-graph. There are three ways to achieve that: - -* Use graph isomorphism pattern of the networkx module - -* Use nodes name pattern to identify `scope` (according to TensorFlow terminology) to be replaced - -* Use sets of `start` and `end` node names to match all nodes "between" them - -The next sections explain each option using real examples. - -### Replace Sub-graph of Operations Using Graph Isomorphism Pattern - -networkx Python\* module provides methods to find graph isomorphic to the given one using nodes and edges match: for example, `networkx.algorithms.isomorphism.categorical_node_match`, `networkx.algorithms.isomorphism.categorical_multiedge_match`. Model Optimizer uses these methods and provides simple API to use that feature. - -For example, the Caffe\* has layer called [Mean-Variance Normalization (MVN)](http://caffe.berkeleyvision.org/tutorial/layers/mvn.html), which is also supported by the Inference Engine. This layer is implemented with low-level operations in TensorFlow: `Mean`, `StopGradient`, `SquaredDifference`, `Squeeze` and `FusedBatchNorm`. Model Optimizer should replace sub-graph with these operations with a single Inference Engine layer of type `MVN`. - -The file `/deployment_tools/model_optimizer/extensions/front/tf/mvn.py` performs such a replacement. The first part of the file is: -```python -class MVN(FrontReplacementSubgraph): - enabled = True - def pattern(self): - log.debug('Enabled MVN replacement') - return dict( - nodes=[ - ('mean', dict(op='Mean')), - ('stop_grad', dict(op='StopGradient')), - ('sqdiff', dict(op='SquaredDifference')), - ('variance', dict(op='Mean')), - ('squeeze_mean', dict(op='Squeeze')), - ('squeeze_variance', dict(op='Squeeze')), - ('fbn', dict(op='FusedBatchNorm')), - ], - edges=[ - ('mean', 'stop_grad', {'in': 0}), - ('stop_grad', 'sqdiff', {'in': 1}), - ('sqdiff', 'variance', {'in': 0}), - ('mean', 'squeeze_mean', {'in': 0}), - ('variance', 'squeeze_variance', {'in': 0}), - ('squeeze_mean', 'fbn', {'in': 3}), - ('squeeze_variance', 'fbn', {'in': 4}), - ], - node_attrs=['op'], - edge_attrs=['in']) -``` -**Key lines**: - -* Line 1: Defines class `MVN` inherited from class `FrontReplacementSubgraph` that performs sub-graph replacement using sub-graph isomorphism pattern. - -* Line 3: Sets class variable `enabled` to value True meaning that this replacer is enabled. - -* The function `pattern` defines the sub-graph constraints to be matched. It returns a dictionary with four keys: - - * the `nodes` defines a list of nodes to be matched. Each element in the list is a tuple. The first element is the alias name assigned for the matched node, the second element is a dictionary with desired attributes of the node. - - * the `edges` defines a list of edges to be matched. Each element in the list is a tuple. The first and the second elements are the start and end edge nodes alias names respectively. The third element is a dictionary with desired edge attributes. - - * the `node_attrs` contains the names of nodes attributes to use during sub-graph isomorphism search. - - * the `edge_attrs` contains the names of edges attributes to use during sub-graph isomorphism search. - - The sub-graph is matched if all provided constraints are satisfied. If at least one node with desired attributes is missing or at least one defined edge is absent, the sub-graph is not matched. -* Line 9: Adds constraint that sub-graph should contain node with attribute `op` with value `Mean`. The matched node gets an alias name `mean`. The same way the line 10 add constrain for node `StopGradient`, the matched node gets an alias name `stop_grad`. - -* Line 18: Defines edge from node with alias name `mean` to node with alias name `stop_grad` having attribute `in` equal to 0. This means that the output of node `mean` is connected to the node `stop_grad` as a first input (Model Optimizer uses zero-based indexing that is why `in` is 0). Another example of defining the edges constraints is in line 25 where the edge from `squeeze_mean` is connected to the `fbn` node as fourth input. - -* Lines 26 - 27: Specify a list of attributes to be checked. In fact, these lists are just list of all keys in the dictionaries for node and edge attributes. - -Now when the Model Optimizer knows how to find sub-graph (step 1 of the sub-graph replacement), it is necessary to implement function that will perform actual sub-graph replacement (step 2 and 3). The code for this function is: -```python -def replace_sub_graph(self, graph: nx.MultiDiGraph, match: dict): - fbn = match['fbn'] - input = fbn.in_node(0) - log.debug('Found potential MVN pattern after {} with name {}'.format(input.op, input.name)) - if input.id != match['mean'].in_node(0).id or input.id != match['sqdiff'].in_node(0).id: - return - log.debug('Confirmed MVN pattern after {} with name {}'.format(input.op, input.name)) - MVN = Op.get_op_class_by_name('MVN') - mvn = MVN(graph, dict( - name=fbn.name + '/MVN_', - eps=fbn.eps, - required_reduction_indices=[1,2] if fbn.data_format == b'NHWC' else [2,3] - )) - mvn.attrs['old_infer'] = mvn.attrs['infer'] - mvn.attrs['infer'] = __class__.infer - mul = Eltwise(graph, dict(operation='mul', name=fbn.name + '/Mul_')) - add = Eltwise(graph, dict(operation='sum', name=fbn.name + '/Add_')) - input_gamma = fbn.in_node(1) - input_beta = fbn.in_node(2) - mean_reduction = match['mean'].in_node(1) - variance_reduction = match['mean'].in_node(1) - new_subgraph = add.create_node([ - mul.create_node([ - mvn.create_node([input, mean_reduction, variance_reduction]), - input_gamma - ]), - input_beta - ]) - replace_node(fbn, new_subgraph) -``` -The function accepts two arguments - the graph and the dictionary `match`. The keys in the dictionary are the alias names of matched nodes (defined in the `nodes` list in the function `pattern`) and the values are the matched node of the graph (the instance of Node object). - -The function generates new sub-graph with node of type `MVN` and two nodes of the type `Eltwise` calculating sum and product. There is nothing interesting in how the graph is generated and mathematics behind that, so attention will be put to two aspects of this function. - -The first one is the call to function `replace_node` in line 36. `FusedBatchNorm` node is replaced with the output node of the generated sub-graph: all input edges of the `FusedBatchNorm` node are re-connected to the `new_subgraph` node, all consumers of the `FusedBatchNorm` node are updated to get inputs from the `new_subgraph` node. This action connects newly generated sub-graph with an existing graph (step 4 of the sub-graph replacement). - -The second one is that the default implementation of the inference function for `MVN` operation is overwritten. In line 16, the default implementation of the inference function for `MVN` is saved to attribute `old_infer`. In line 17, the new inference function is saved to the instance of the `MVN` operation class. The new inference function code looks the following way: -```python -@staticmethod -def infer(node: Node): - if not(node.in_node(1).has_valid('value') and node.in_node(2).has_valid('value')): - log.warning('Reduction indices for mean and variance for MVN node {} are not constants'.format(node.name)) - return - if not(all(node.in_node(1).value == node.required_reduction_indices) and - all(node.in_node(2).value == node.required_reduction_indices)): - log.warning('Reduction indices for mean {} and variance {} do not match required ones {}'.format( - node.in_node(1).value, - node.in_node(2).value, - node.required_reduction_indices - )) - return - node.graph.remove_edge(node.in_node(1).id, node.id) - node.graph.remove_edge(node.in_node(2).id, node.id) - node.old_infer(node) -``` -The `infer` function is needed to infer value of the node (if it is possible) and to infer shapes of the output tensors of the node (mandatory). The custom `infer` function performs additional checks that describe limitations of the `MVN` layer implementation in the Inference Engine. For example, reduction indices for mean and variance must be constants (line 10), while in TensorFlow they could be computed during model inference. In addition, the function removes two edges from the graph (lines 17 and 18) because all required information is already stored in the `MVN` node attributes. This is due to different `MVN` layer implementation in Inference Engine and TensorFlow\*: `mean` and `variance` are attributes of the node in Inference Engine while in TensorFlow they are input tensors. Edges are not removed in the `replace_sub_graph` function, because these edges are used in the `infer` function (lines 7-12). - -The last action in the `infer` method (line 19) is to call default infer function for the `MVN`, which is saved in the attribute `old_infer` of the node to infer output tensors shapes. - -On the step 5 of the sub-graph replacement, six matching nodes are automatically removed during the dead code elimination pass that is performed after applying of custom sub-graph replacements defined. Six matching nodes are no more connected to the inputs of the network after replacing node `fbn` with a newly created sub-graph node. Since they are not marked as output nodes (using `--output` command line parameter), they could be removed. - -The replacement works for all sub-graph isomorphism instances found in the network. - -### Replace Sub-graph of Operations Using Nodes Name Pattern - -TensorFlow uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing particular task into the scope. This approach divides a graph into logical blocks that are easier to review in TensorBoard\*. The `scope`, in fact, just defines a common prefix for the node names in the scope. - -For example, Inception topologies contain several types of so-called "Inception blocks". Some of them are exactly equal to each other, but located in different places of the network. For example, Inception V4 from `tensorflow.contrib.slim` module has inception blocks `Mixed_5b`, `Mixed_5c` and `Mixed_5d` with exactly the same nodes with the same attributes. - -Now consider situation when someone implemented these Inception blocks extremely efficiently using single Inference Engine custom layer called `InceptionBlock` and would like to replace these blocks with instances of the layer to decrease inference time. Model Optimizer provides mechanism to replace sub-graph of operations defined by the regular expressions for the node names prefixes (scope). In this particular case, some of the patterns are: `.*InceptionV4/Mixed_5b`, `.*InceptionV4/Mixed_5c` and `.*InceptionV4/Mixed_5d`. Each pattern starts with `.*`, because a prefix `InceptionV4` is added to all nodes names during a model freeze. - -The sub-graph replacement using nodes name pattern is a bit trickier than replacements of single operation and networkx isomorphism pattern described above. You should do the following additional steps in comparison with previously described replacements: - -1. Prepare configuration file template defining node names patterns and information about custom layer attributes. - -2. Run Model Optimizer with command line parameter to add information about input and output nodes of the specified sub-graphs. - -Consider the following possible configuration file for the Inception Block replacer: -```json -[ - { - "custom_attributes": { - "attr1_key": "attr1_value", - "attr2_key": 123456 - }, - "id": "InceptionBlockReplacer", - "op": "InceptionBlock", - "instances": [ - ".*InceptionV4/Mixed_5b", - ".*InceptionV4/Mixed_5c", - ".*InceptionV4/Mixed_5d" - ], - "match_kind": "scope" - } -] -``` -The `.json` file contains list of dictionaries. Each dictionary defines one replacement. Each replacement is defined with several keys: - -* `id` (mandatory) is a unique identifier of the replacer. It is used in the Python\* code that implements sub-graph replacement to link the class and the replacement description from the configuration file. - -* `match_kind` (mandatory) is a string that specifies what matching algorithm is used. Currently supported `scope` and `points`. In this example, the first one is considered. The `points` match kind is described below. - -* `instances` (mandatory) specifies instances of the sub-graph to be matched. It contains a list of node names prefixes patterns for the match kind `scope`. - -* `custom_attributes` (optional) is a dictionary with static attributes of the layer to be dumped to Inference Engine Intermediate Representation `.xml` file. - -* `op` (optional) is used only if the sub-graph replacement Python code is not needed, because the sub-graph should be replaced with a single node of type `op`. If this attribute is not set, it is necessary to implement Python code with sub-graph generation code. Both options are considered in this example. - -When the configuration file is ready, run the Model Optimizer with regular command line parameters pointing to the file with model and input shapes (if necessary) and additional parameter `--tensorflow_custom_operations_config_update` pointing to the generated configuration file. If the file is correct, Model Optimizer adds two keys to the `InceptionBlockReplacer` dictionary: `inputs` and `outputs` with the following content: -```json -[ - { - "id": "InceptionBlockReplacer", - ... - "inputs": [ - [ - { - "node": "Branch_2/Conv2d_0a_1x1/Conv2D$", - "port": 0 - }, - { - "node": "Branch_3/AvgPool_0a_3x3/AvgPool$", - "port": 0 - }, - { - "node": "Branch_1/Conv2d_0a_1x1/Conv2D$", - "port": 0 - }, - { - "node": "Branch_0/Conv2d_0a_1x1/Conv2D$", - "port": 0 - } - ] - ], - "outputs": [ - { - "node": "concat$", - "port": 0 - } - ] - } -] -``` -The value for key `inputs` is a list of lists describing input tensors of the sub-graph. Each element of the top-level list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming this tensor and port numbers where the tensor is consumed. Model Optimizer generates regular expressions for the input nodes names to uniquely identify them in each instance of the sub-graph defined by the `instances`. Denote these nodes as input nodes of the sub-graph. - -In the InceptionV4 topology, the `InceptionV4/Mixed_5b` block has four input tensors from outside of the sub-graph, but all of them are produced by the node `InceptionV4/Mixed_5a/concat`. Therefore, the top-level list of the `inputs` contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by `InceptionV4/Mixed_5a/concat` node. In this case, all four input nodes consume input tensor into port 0. - -The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level list is important. This order defines the order in which the Model Optimizer attaches input tensors to a new generated node if the sub-graph is replaced with a single node. The i-th input node of the sub-graph is obtained using call `match.single_input_node(i)` in the sub-graph replacer code. More information about API is given below. If you need to change the order of input tensors, you can edit the configuration file in the text-editor. - -The value for the key `outputs` is a list describing nodes of the sub-graph producing tensor that goes outside of the sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in the list is important. The i-th element of the list describes the i-th output tensor of the sub-graph, which could be obtained using call `match.output_node(i)`. The order of elements can be manually changed in the configuration file. Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node. - -Now, when meaning of `inputs` and `outputs` attributes is clean, return back to the replacer implementation. The replacer `InceptionBlockReplacer` contains attribute `op` with the value `InceptionBlock`, which means that the identified sub-graph should be replaced with a single layer of type `InceptionBlock`. This layer is not known for the Model Optimizer, so it is necessary to define it. See [Extending the Model Optimizer with New Primitives](Extending_Model_Optimizer_with_New_Primitives.md). You must create file `extension/ops/InceptionBlock.py` with the following content: -```python -import numpy as np -from mo.graph.graph import Node -from mo.ops.op import Op -class InceptionBlock(Op): - op = "InceptionBlock" - enabled = True - def __init__(self, graph, attrs): - super().__init__(graph, attrs, { - 'type': __class__.op, - 'op': __class__.op, - }) -``` -The shape inference function is not defined. In this case, Model Optimizer uses TensorFlow fallback to calculate shapes of the sub-graph output tensors. - -Run the Model Optimizer with the regular command line parameters, path to the model file and input shape (if necessary), and the parameter `--tensorflow_use_custom_operations_config` and point to the created configuration file. Model Optimizer generates Intermediate Representation `.xml` file with three sequential layers of type `InceptionBlock` like in the following example: -```xml - - - - 1 - 384 - 35 - 35 - - - - - 1 - 384 - 35 - 35 - - - -``` -The implementation of the sub-graph replacement by scope with a single layer is complete. The next subsection explains -how Model Optimizer replaces sub-graph identified by start/end nodes (`points`) with another sub-graph. - -### Replace Sub-graph of Operations Using Points -In this scenario, for the matching algorithm user defines the sub-graph via a set of "start" and "end" nodes. -Given the set, the Model Optimizer performs the following steps: -1. Starts graph traversal from every _start_ nodes following the direction of the graph edges. -The search stops in _end_ nodes or in case of nodes without further children. All visited nodes are added to the matched sub-graph. -2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from "start" set. -In this step the edges are traversed in the opposite edge direction. All newly visited nodes are added to the - matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the - matched sub-graph. -3. Checks that all "end" nodes were reached from "input" nodes. If no then exit with error. -4. Check that there are no "Placeholder" operations among added nodes. If it is not true then some side branch of - the sub-graph (added in step 2) depends on inputs of the network. Such configuration is not correct so exit with error. - -This algorithm finds all nodes "between" start and end nodes. Also nodes needed for calculation of non-input nodes of the -matched sub-graph produce _constant_ values because they do not depend on input of the network. -**This sub-graph match has a limitation that each start node must have only one input**. Therefore, it is not possible -to specify, for example, convolution node as input because it has two inputs: data tensor and tensor with weights. - -For example of replacement with points, please refer to the case-study of the -[conversion for the SSD models, created with TensorFlow Object Detection API](TensorFlow_SSD_ObjectDetection_API.md). +The document has been deprecated. Refer to the [Model Optimizer Extensibility](Subgraph_Replacement_Model_Optimizer.md) +for the up-to-date documentation. diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md b/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md deleted file mode 100644 index 482cb1545abf97..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_Faster_RCNN_ObjectDetection_API.md +++ /dev/null @@ -1,449 +0,0 @@ -# Converting Faster R-CNN models, created with TensorFlow Object Detection API {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_TensorFlow_Faster_RCNN_ObjectDetection_API} - -This is a deprecated page. Please, consider reading [this](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md) page describing new approach to convert Object Detection API models giving closer to TensorFlow inference results. - -## Converting models created with TensorFlow Object Detection API version equal or higher than 1.6.0 -This chapter describes how to convert selected Faster R-CNN models from the TensorFlow Object Detection API zoo version equal or higher than 1.6.0. The full list of supported models is provided in the table below. Note that currently batch size 1 is supported only. The only Inference Engine plugin supporting these topologies inference is CPU. - -The Faster R-CNN models contain several building blocks similar to building blocks from SSD models so it is highly recommended to read chapter about [enabling TensorFlow Object Detection API SSD models](TensorFlow_SSD_ObjectDetection_API.md) first. Detailed information about Faster R-CNN topologies is provided [here](https://arxiv.org/abs/1506.01497). - -The TensorFlow network consists of a number of big blocks grouped by scope: - -* `Preprocessor` performs scaling/resizing of the image and converts input data to [0, 1] interval. Has two outputs: the first one is modified input image and the second one is a constant tensor with shape (batch_size, 3) and values (resized_image_height, resized_image_width, 3). - -* `FirstStageFeatureExtractor` is a backbone feature extractor. - -* `FirstStageBoxPredictor` calculates boxes and classes predictions. - -* `GridAnchorGenerator` generates anchors coordinates. - -* `ClipToWindow` crops anchors to the resized image size. - -* `Decode` decodes coordinates of boxes using anchors and data from the `FirstStageBoxPredictor`. - -* `BatchMultiClassNonMaxSuppression` performs non maximum suppression. - -* `map` scales coordinates of boxes to [0, 1] interval by dividing coordinates by (resized_image_height, resized_image_width). - -* `map_1` scales coordinates from [0, 1] interval to resized image sizes. - -* `SecondStageFeatureExtractor` is a feature extractor for predicted Regions of interest (ROIs). - -* `SecondStageBoxPredictor` refines box coordinates according `SecondStageFeatureExtractor`. - -* `SecondStagePostprocessor` is Detection Output layer performing final boxes predictions. - -### Sub-graph replacements -There are three sub-graph replacements defined in the `extensions/front/tf/legacy_faster_rcnn_support.json` used to convert these models: - -* the first one replaces the `Preprocessor` block. The implementation of this replacer is in the `/deployment_tools/model_optimizer/extensions/front/tf/Preprocessor.py` - -* the second one replaces a number of blocks in the the graph including `GridAnchorGenerator`, `ClipToWindow`, `Decode`, `BatchMultiClassNonMaxSuppression`, `Tile`, `Tile_1` and `map` with Proposal and ROIRooling layers and some additional layers to pre-process input data - -* the third one replaces `SecondStagePostprocessor` with a DetectionOutput layer. - -The second replacer is defined using the following configuration that matches sub-graph by points: - -```json - { - "custom_attributes": { - "nms_threshold": 0.7, - "feat_stride": 16, - "max_proposals": 100, - "anchor_base_size": 256, - "anchor_scales": [0.25, 0.5, 1.0, 2.0], - "anchor_aspect_ratios": [0.5, 1.0, 2.0], - "roi_spatial_scale": 0.0625 - }, - "id": "TFObjectDetectionAPIFasterRCNNProposalAndROIPooling", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "CropAndResize", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "map_1/while/strided_slice/Enter", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/concat", - "FirstStageBoxPredictor/concat_1", - "GridAnchorGenerator/Identity", - "Shape", - "CropAndResize" - ] - }, - "match_kind": "points" - } -``` - -The `start_points` list contains the following nodes: - -* `FirstStageBoxPredictor/concat` node produces box coordinates predictions. - -* `FirstStageBoxPredictor/concat_1` node produces classes predictions which will be used for the ROIs - -* `GridAnchorGenerator/Identity` node produces anchors coordinates. - -* `Shape` and `CropAndResize` nodes are specified as inputs to correctly isolate the required sub-graph. Refer to the [chapter](Subgraph_Replacement_Model_Optimizer.md) for more information about replacements by points. - -The `end_points` list contains the following nodes: - -* `CropAndResize` is the node that performs ROI pooling operation. - -* `map_1/TensorArrayStack/TensorArrayGatherV3`, `map_1/while/strided_slice/Enter` and `BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3` are specified to correctly isolate the sub-graph. - -The `custom_attributes` dictionary contains attributes where most values are taken from the topology-specific configuration file `samples/configs/faster_rcnn_*.config` of the [TensorFlow Object Detection API repository](https://github.com/tensorflow/models/tree/master/research/object_detection): - -* `nms_threshold` is the value of the `first_stage_nms_iou_threshold` parameter. - -* `feat_stride` is the value of the `height_stride` and `width_stride` parameters. Inference Engine supports case when these two values are equal that is why the replacement configuration file contains just one parameter. - -* `max_proposals` is the value of the `max_total_detections` parameter which is a maximum number of proposal boxes from the Proposal layer and detected boxes. - -* `anchor_base_size` is the base size of the generated anchor. The 256 is the default value for this parameter and it is not specified in the configuration file. - -* `anchor_scales" is the value of the `scales` attrbite. - -* `anchor_aspect_ratios` is the value of the `aspect_ratios` attribute. - -* `roi_spatial_scale` is needed for the Inference Engine ROIPooling layer. It is the default value that is not actually used. - -The identifier for this replacer is `TFObjectDetectionAPIFasterRCNNProposalAndROIPooling`. The Python implementation of this replacer is in the file `/deployment_tools/model_optimizer/extensions/front/tf/FasterRCNNs.py`. - -The first four functions of the replacer class are the following: - -```python -class TFObjectDetectionAPIFasterRCNNProposalAndROIPooling(FrontReplacementFromConfigFileSubGraph): - """ - This class replaces sub-graph of operations with Proposal and ROIPooling layers and additional layers transforming - tensors from layout of TensorFlow to layout required by Inference Engine. - Refer to comments inside the function for more information about performed actions. - """ - replacement_id = 'TFObjectDetectionAPIFasterRCNNProposalAndROIPooling' - - def run_after(self): - return [PreprocessorReplacement] - - def run_before(self): - return [SecondStagePostprocessorReplacement] - - def output_edges_match(self, graph: nx.DiGraph, match: SubgraphMatch, new_sub_graph: dict): - return {match.output_node(0)[0].id: new_sub_graph['roi_pooling_node'].id} - - def nodes_to_remove(self, graph: nx.MultiDiGraph, match: SubgraphMatch): - new_list = match.matched_nodes_names().copy() - # do not remove nodes that produce box predictions and class predictions - new_list.remove(match.single_input_node(0)[0].id) - new_list.remove(match.single_input_node(1)[0].id) - return new_list -``` - -The function `run_after` returns list of Python classes inherited from one of the replacer classes (`FrontReplacementOp`, `FrontReplacementPattern`, `FrontReplacementFromConfigFileSubGraph` etc) those current sub-graph replacement class must be run after. In this case the replacer must be run after the `Preprocessor` is removed by the `PreprocessorReplacement` replacer. Similar way the `run_before` function is used to tell Model Optimizer to execute `SecondStagePostprocessorReplacement` before this replacer. - -The `output_edges_match` function describes matching between the output nodes of the sub-graph before replacement and after. In this case the only needed output node of the sub-graph is the `CropAndResize` node which is identified with `match.output_node(0)[0]`. The new output node which is created in the `generate_sub_graph` function is identified with `new_sub_graph['roi_pooling_node']`. - -The `nodes_to_remove` function takes the default list of nodes to be removed which contains all matched nodes and remove from them two input nodes which are identified with `match.single_input_node(0)[0]` and `match.single_input_node(1)[0]`. These nodes will be connected as inputs to new nodes being generated in the `generate_sub_graph` function so they should node be removed. - -The code generating new sub-graph is the following: - -```python - def generate_sub_graph(self, graph: nx.MultiDiGraph, match: SubgraphMatch): - log.debug('TFObjectDetectionAPIFasterRCNNProposal: matched_nodes = {}'.format(match.matched_nodes_names())) - - config_attrs = match.custom_replacement_desc.custom_attributes - nms_threshold = config_attrs['nms_threshold'] - feat_stride = config_attrs['feat_stride'] - max_proposals = config_attrs['max_proposals'] - anchor_base_size = config_attrs['anchor_base_size'] - roi_spatial_scale = config_attrs['roi_spatial_scale'] - proposal_ratios = config_attrs['anchor_aspect_ratios'] - proposal_scales = config_attrs['anchor_scales'] - anchors_count = len(proposal_ratios) * len(proposal_scales) -``` - -These lines get parameters defined in the sub-graph replacement configuration file and calculate initial anchors count. - -```python - # get the ROIPool size from the CropAndResize which performs the same action - if 'CropAndResize' not in graph.nodes(): - raise Error('Failed to find node with name "CropAndResize" in the topology. Probably this is not Faster' - ' RCNN topology or it is not supported') - roi_pool_size = Node(graph, 'CropAndResize').in_node(3).value[0] -``` - -The code above gets the ROI Pooling spatial output dimension size as a value from the fourth argument of the node with name `CropAndResize`. - -```python - # Convolution/matmul node that produces classes predictions - # Permute result of the tensor with classes permissions so it will be in a correct layout for Softmax - predictions_node = match.single_input_node(1)[0].in_node(0).in_node(0) - permute_predictions_op = Permute(graph, {'order': np.array([0, 2, 3, 1])}) - permute_predictions_node = permute_predictions_op.create_node([], dict(name=predictions_node.name + '/Permute_')) - insert_node_after(predictions_node, permute_predictions_node, 0) - - reshape_classes_op = Reshape(graph, {'dim': np.array([0, -1, 2])}) - reshape_classes_node = reshape_classes_op.create_node([permute_predictions_node], - dict(name='Reshape_FirstStageBoxPredictor_Class_')) - update_attrs(reshape_classes_node, 'shape_attrs', 'dim') - - softmax_conf_op = Softmax(graph, {'axis': 1}) - softmax_conf_node = softmax_conf_op.create_node([reshape_classes_node], - dict(name='FirstStageBoxPredictor_SoftMax_Class_')) -``` - -The output with class predictions from the `FirstStageBoxPredictor` is generated with a convolution operation. The convolution output data layout in TensorFlow is NHWC while Inference Engine uses NCHW layout. Model Optimizer by default converts the weights of TensorFlow convolutions to produce output tensor in NCHW layout required by Inference Engine. The issue arises because the class predictions tensor is passed through the Softmax operation to produce class probabilities. The Inference Engine Softmax is performed over the fastest-changing dimension which is 'W' in Inference Engine. Thus, the softmax operation will be performed over a wrong dimension after conversion of the convolution node producing classes predicitions. The solution is to add Permute and Reshape operations to prepare the input data for Softmax. The Reshape operation is required to make the size of the fastest-changing dimension equal to 2, because there are 2 classes being predicted: background and foreground. - -Another issue is that layout of elements in the predicted classes tensor is different between TensorFlow and Inference Engine Proposal layer requirements. In TensorFlow the tensor has the following virtual layout [N, H, W, num_anchors, num_classes] while the Inference Engine Proposal layer requires in the following virtual layout [N, num_classes, num_anchors, H, W]. Thus, it is necessary to reshape, permute and then reshape again output from the Softmax to the required shape for the Proposal layer: - -```python - reshape_softmax_op = Reshape(graph, {'dim': np.array([1, anchors_count, 2, -1])}) - reshape_softmax_node = reshape_softmax_op.create_node([softmax_conf_node], dict(name='Reshape_Softmax_Class_')) - update_attrs(reshape_softmax_node, 'shape_attrs', 'dim') - - permute_reshape_softmax_op = Permute(graph, {'order': np.array([0, 1, 3, 2])}) - permute_reshape_softmax_node = permute_reshape_softmax_op.create_node([reshape_softmax_node], - dict(name='Permute_')) - - # implement custom reshape infer function because we need to know the input convolution node output dimension - # sizes but we can know it only after partial infer - reshape_permute_op = Reshape(graph, {'dim': np.ones([4]), 'anchors_count': anchors_count, - 'conv_node': predictions_node}) - reshape_permute_op.attrs['old_infer'] = reshape_permute_op.attrs['infer'] - reshape_permute_op.attrs['infer'] = __class__.classes_probabilities_reshape_shape_infer - reshape_permute_node = reshape_permute_op.create_node([permute_reshape_softmax_node], - dict(name='Reshape_Permute_Class_')) - update_attrs(reshape_permute_node, 'shape_attrs', 'dim') -``` - -The Proposal layer has 3 inputs: classes probabilities, boxes predictions and a input shape of the image. The first two tensors are ready so it is necessary to create the Const operation that produces the desired third input tensor. - -```python - # create constant input with the image height, width and scale H and scale W (if present) required for Proposal - const_value = np.array([[input_height, input_width, 1]], dtype=np.float32) - const_op = Const(graph, dict(value=const_value, shape=const_value.shape)) - const_node = const_op.create_node([], dict(name='Proposal_const_image_size_')) -``` - -Now add the Proposal layer: - -```python - - proposal_op = ProposalOp(graph, dict(min_size=10, framework='tensorflow', box_coordinate_scale=10, - box_size_scale=5, post_nms_topn=max_proposals, feat_stride=feat_stride, - ratio=proposal_ratios, scale=proposal_scales, base_size=anchor_base_size, - pre_nms_topn=2**31 - 1, - nms_thresh=nms_threshold)) - proposal_node = proposal_op.create_node([reshape_permute_node, - match.single_input_node(0)[0].in_node(0).in_node(0), - const_node], - dict(name=proposal_op.attrs['type'] + '_')) -``` - -The box coordinates in the TensorFlow are in the following layout "YXYX" while Inference Engine uses "XYXY" layout so it is necessary to swap coordinates produced by Proposal layer. It is implemented with help of a convolution node with a special filter of a size [5, 5]: - -```python - proposal_reshape_4d_op = Reshape(graph, {'dim': np.array([max_proposals, 1, 1, 5])}) - proposal_reshape_4d_node = proposal_reshape_4d_op.create_node([proposal_node], dict(name="reshape_4d_")) - update_attrs(proposal_reshape_4d_node, 'shape_attrs', 'dim') - - # create convolution node to swap X and Y coordinates in the proposals - conv_filter_const_data = np.array(np.array([[1, 0, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 1, 0, 0, 0], - [0, 0, 0, 0, 1], - [0, 0, 0, 1, 0]], - dtype=np.float32).reshape([1, 1, 5, 5]), dtype=np.float32) - conv_filter_const_op = Const(graph, dict(value=conv_filter_const_data, spatial_dims=np.array([2, 3]))) - conv_filter_const_node = conv_filter_const_op.create_node([], dict(name="conv_weights")) - - conv_op = Op(graph, { - 'op': 'Conv2D', - 'bias_addable': False, - 'spatial_dims': np.array([1, 2]), - 'channel_dims': np.array([3]), - 'batch_dims': np.array([0]), - 'pad': None, - 'pad_spatial_shape': None, - 'input_feature_channel': 2, - 'output_feature_channel': 2, - 'output_shape': [max_proposals, 1, 1, 5], - 'dilation': np.array([1, 1, 1, 1], dtype=np.int64), - 'stride': np.array([1, 1, 1, 1]), - 'type': 'Convolution', - 'group': None, - 'layout': 'NHWC', - 'infer': __class__.fake_conv_shape_infer}) - predictions_node = conv_op.create_node([proposal_reshape_4d_node, conv_filter_const_node], dict(name="conv_")) - update_ie_fields(graph.node[predictions_node.id]) - - proposal_reshape_2d_op = Reshape(graph, {'dim': np.array([max_proposals, 5])}) - proposal_reshape_2d_node = proposal_reshape_2d_op.create_node([predictions_node], dict(name="reshape_2d_")) - # set specific name for this Reshape operation so we can use it in the DetectionOutput replacer - proposal_reshape_2d_node['name'] = 'swapped_proposals' -``` - -The ROIPooling layer in TensorFlow is implemented with operation called `CropAndResize` with bi-linear filtration. Inference Engine implementation of the ROIPooling layer with bi-linear filtration requires input boxes coordinates be scaled to [0, 1] interval. Adding elementwise multiplication of box coordinates solves this issue: - -```python - # the TF implementation of Proposal with bi-linear filtration need proposals scaled by image size - proposal_scale_const = np.array([1.0, 1 / input_height, 1 / input_width, 1 / input_height, 1 / input_width], - dtype=np.float32) - proposal_scale_const_op = Const(graph, dict(value=proposal_scale_const, shape=proposal_scale_const.shape)) - proposal_scale_const_node = proposal_scale_const_op.create_node([], dict(name='Proposal_scale_const_')) - - scale_proposals_op = Eltwise(graph, {'operation': 'mul'}) - scale_proposals_node = scale_proposals_op.create_node([proposal_reshape_2d_node, proposal_scale_const_node], - dict(name='scale_proposals_')) -``` - -The last step is to create the ROIPooling node with 2 inputs: the identified feature maps from the `FirstStageFeatureExtractor` and the scaled output of the Proposal layer: - -```python - feature_extractor_output_nodes = scope_output_nodes(graph, 'FirstStageFeatureExtractor') - if len(feature_extractor_output_nodes) != 1: - raise Error("Failed to determine FirstStageFeatureExtractor output node to connect it to the ROIPooling." - "Found the following nodes: {}".format([node.name for node in feature_extractor_output_nodes])) - - roi_pooling_op = ROIPooling(graph, dict(method="bilinear", framework="tensorflow", - pooled_h=roi_pool_size, pooled_w=roi_pool_size, - spatial_scale=roi_spatial_scale)) - roi_pooling_node = roi_pooling_op.create_node([feature_extractor_output_nodes[0], scale_proposals_node], - dict(name='ROI_Pooling_')) - - return {'roi_pooling_node': roi_pooling_node} -``` - -The are two additional methods implemented in the replacer class: - -* The `fake_conv_shape_infer` is a silly infer function for the convolution that permutes X and Y coordinates of the Proposal output which avoids setting a lot of internal attributes required for propoper shape inference. - -* The "classes_probabilities_reshape_shape_infer" function is used to update the output dimension of the reshape operation. The output spatial dimensions depends on the convolution output spatial dimensions thus they are not known until the shape inference pass which is performed after this sub-graph replacement class. So this custom infer function is called instead of default Reshape shape inference function, updates the required attribute "dim" of the node with the convolution output spatial dimensions which are known at the time of calling this inference function and then call the default Reshape inference function. - -```python - @staticmethod - def fake_conv_shape_infer(node: Node): - node.out_node(0).shape = node.in_node(0).shape - # call functions to update internal attributes required for correct IR generation - mark_input_bins(node) - assign_dims_to_weights(node.in_node(1), [0, 1], node.input_feature_channel, node.output_feature_channel, 4) - - @staticmethod - def classes_probabilities_reshape_shape_infer(node: Node): - # now we can determine the reshape dimensions from Convolution node - conv_node = node.conv_node - conv_output_shape = conv_node.out_node().shape - - # update desired shape of the Reshape node - node.dim = np.array([0, conv_output_shape[1], conv_output_shape[2], node.anchors_count * 2]) - node.old_infer(node) -``` - -The second replacer defined in the sub-graph replacement configuration file replaces the `SecondStagePostprocessor` block and is defined using scope: - -```json - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "confidence_threshold": 0.01, - "keep_top_k": 300, - "nms_threshold": 0.6, - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "max_detections_per_class": 100, - "num_classes": 90 - }, - "id": "SecondStagePostprocessorReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - } -``` - -The replacement code is similar to the `SecondStagePostprocessor` replacement for the SSDs topologies. The are two major difference: - -* The tensor with bounding boxes doesn't contain locations for class 0 (background class) but Inference Engine Detection Output layer requires it. The Const node with some dummy values are created and concatenated with the tensor. - -* The priors tensor is not constant like in SSDs so the bounding boxes tensor must be scaled with variances [0.1, 0.1, 0.2, 0.2]. - -The described above difference are resolved with the following code: - -```python - # TF produces locations tensor without boxes for background. - # Inference Engine DetectionOutput layer requires background boxes so we generate them with some values - # and concatenate with locations tensor - fake_background_locs_blob = np.tile([[[1, 1, 2, 2]]], [max_detections_per_class, 1, 1]) - fake_background_locs_const_op = Const(graph, dict(value=fake_background_locs_blob, - shape=fake_background_locs_blob.shape)) - fake_background_locs_const_node = fake_background_locs_const_op.create_node([]) - - reshape_loc_op = Reshape(graph, {'dim': np.array([max_detections_per_class, num_classes, 4])}) - reshape_loc_node = reshape_loc_op.create_node([match.single_input_node(0)[0].in_node(0)], - dict(name='Reshape_loc_')) - - concat_loc_op = Concat(graph, {'axis': 1}) - concat_loc_node = concat_loc_op.create_node([fake_background_locs_const_node, reshape_loc_node], - dict(name='Concat_fake_loc_')) - - # blob with variances - variances_blob = np.array([0.1, 0.1, 0.2, 0.2]) - variances_const_op = Const(graph, dict(value=variances_blob, shape=variances_blob.shape)) - variances_const_node = variances_const_op.create_node([]) - - # reshape locations tensor to 2D so it could be passed to Eltwise which will be converted to ScaleShift - reshape_loc_2d_op = Reshape(graph, {'dim': np.array([-1, 4])}) - reshape_loc_2d_node = reshape_loc_2d_op.create_node([concat_loc_node], dict(name='reshape_locs_2d_')) - - # element-wise multiply locations with variances - eltwise_locs_op = Eltwise(graph, {'operation': 'mul'}) - eltwise_locs_node = eltwise_locs_op.create_node([reshape_loc_2d_node, variances_const_node], - dict(name='scale_locs_')) -``` - -### Example of Model Optimizer Command-Line for TensorFlow's Faster R-CNNs -The final command line to convert Faster R-CNNs from the TensorFlow* Object Detection Zoo is the following: - -```sh -./mo.py --input_model= --output=detection_boxes,detection_scores,num_detections --tensorflow_use_custom_operations_config extensions/front/tf/legacy_faster_rcnn_support.json -``` - -Note that there are minor changes that should be made to the and sub-graph replacement configuration file `/deployment_tools/model_optimizer/extensions/front/tf/legacy_faster_rcnn_support.json` before converting particular Faster R-CNN topology. Refer to the table below. - -### Sub-Graph Replacement Configuration File Parameters to Convert Different Faster R-CNN Models -|Model Name | Configuration File Changes| -|:----|:----:| -| faster_rcnn_inception_v2_coco | None -| faster_rcnn_resnet50_coco | None -| faster_rcnn_resnet50_lowproposals_coco | None -| faster_rcnn_resnet101_coco | None -| faster_rcnn_resnet101_lowproposals_coco | None -| faster_rcnn_inception_resnet_v2_atrous_coco | "feat_stride: 8" -| faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco| "feat_stride: 8" - diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md b/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md deleted file mode 100644 index b43d5de15e21aa..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/TensorFlow_SSD_ObjectDetection_API.md +++ /dev/null @@ -1,339 +0,0 @@ -# (Deprecated) Case Study: Converting SSD Models Created with TensorFlow* Object Detection API {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_TensorFlow_SSD_ObjectDetection_API} - -This is a deprecated page. Please, consider reading [this](../convert_model/tf_specific/Convert_Object_Detection_API_Models.md) page describing new approach to convert Object Detection API models giving closer to TensorFlow inference results. - -## Converting Models Created with TensorFlow Object Detection API Version prior 1.6.0 - -As explained in the [Sub-graph Replacement in Model Optimizer](Subgraph_Replacement_Model_Optimizer.md) section, there are multiple -ways to setup the sub-graph matching. In this example we are focusing on the defining the sub-graph via a set of -"start" and "end" nodes. -The result of matching is two buckets of nodes: -* Nodes "between" start and end nodes. -* Nodes connected to the first list, but just on the constant path (e.g. these nodes are not connected to the inputs of the entire graph). - -Let's look closer to the SSD models from the TensorFlow* detection model -zoo: -[SSD MobileNet](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2017_11_17.tar.gz) and -[SSD InceptionV2](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2017_11_17.tar.gz). - -* Nodes "between" start and end nodes -* Nodes connected to the first list, but just on the constant path (for example, these nodes are not connected to the inputs of the entire graph). Let's look closer to the SSD models from the TensorFlow\* detection model zoo : [SSD MobileNet](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2017_11_17.tar.gz) and [SSD InceptionV2](http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2017_11_17.tar.gz). - -A distinct layer of any SSD topology is the `DetectionOutput` layer. This layer is implemented with a dozens of primitive operations in TensorFlow, while in Inference Engine, it is one [layer](../../../ops/opset.md). Thus, to convert a SSD model from the TensorFlow, the Model Optimizer should replace the entire sub-graph of operations that implement the `DetectionOutput` layer with a single well-known `DetectionOutput` node. - -The Inference Engine `DetectionOutput` layer consumes three tensors in the following order: - -1. Tensor with locations of bounding boxes -2. Tensor with confidences for each bounding box -3. Tensor with prior boxes (anchors in TensorFlow terminology) - -`DetectionOutput` layer produces one tensor with seven numbers for each actual detection. There are more output tensors in the TensorFlow Object Detection API, but the values in them are consistent with the Inference Engine ones. - -The difference with [other examples](Subgraph_Replacement_Model_Optimizer.md) is that here the `DetectionOutput` sub-graph is replaced with a new sub-graph (not a single layer). - -Look at sub-graph replacement configuration file `/deployment_tools/model_optimizer/extensions/front/tf/legacy_ssd_support.json` that is used to enable two models listed above: -```json -[ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "confidence_threshold": 0.01, - "keep_top_k": 200, - "nms_threshold": 0.45, - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP" - }, - "id": "TFObjectDetectionAPIDetectionOutput", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/Slice", - "Postprocessor/ExpandDims", - "Postprocessor/Reshape_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "PreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - } -] -``` - -**Key lines**: - -* Lines 3-10 define static attributes that will be saved to the Intermediate Representation `.xml` file for `DetectionOutput` layer. - -* Lines 12 and 13 define values for attributes that should be always set to "true" for this release of the Model Optimizer. These two attributes are specific for sub-graph match by points only. - -* Lines 14-26 define one instance of the sub-graph to be match. It is an important difference between sub-graph matching by scope and points. Several instances could be specified for matching by scope, but matching with points allows specifying just one instance. So the full node names (not regular expressions like in case of match with scope) are specified in `instances` dictionary. - -The second sub-graph replacer with identifier `PreprocessorReplacement` is used to remove the `Preprocessing` block from the graph. The replacer removes all nodes from this scope except nodes performing mean value subtraction and scaling (if applicable). Implementation of the replacer is in the `/deployment_tools/model_optimizer/extensions/front/tf/Preprocessor.py` file. - -Now let's analyze the structure of the topologies generated with the Object Detection API. There are several blocks in the graph performing particular task: - -* `Preprocessor` block resizes, scales and subtracts mean values from the input image. - -* `FeatureExtractor` block is a [MobileNet](https://arxiv.org/abs/1704.04861) or other backbone to extract features. - -* `MultipleGridAnchorGenerator` block creates initial bounding boxes locations (anchors). - -* `Postprocessor` block acts as a `DetectionOutput` layer. So we need to replace `Postprocessor` block with `DetectionOutput` layer. It is necessary to add all input nodes of the `Postprocessor` scope to the list `start_points`. Consider inputs of each of these nodes: - - * `Postprocessor/Shape` consumes tensor with locations. - * `Postprocessor/Slice` consumes tensor with confidences. - * `Postprocessor/ExpandDims` consumes tensor with prior boxes. - * `Postprocessor/Reshape_1` consumes tensor with locations similarly to the `Postprocessor/Shape` node. Despite the fact that the last node `Postprocessor/Reshape_1` gets the same tensor as node `Postprocessor/Shape`, it must be explicitly put to the list. - -Object Detection API `Postprocessor` block generates output nodes: `detection_boxes`, `detection_scores`, `num_detections`, `detection_classes`. - -Now consider the implementation of the sub-graph replacer, available in the `/deployment_tools/model_optimizer/extensions/front/tf/SSDs.py`. The file is rather big, so only some code snippets are used: -```python -class PostprocessorReplacement(FrontReplacementFromConfigFileSubGraph): - replacement_id = 'TFObjectDetectionAPIDetectionOutput' -``` - -These lines define the new `PostprocessorReplacement` class inherited from `FrontReplacementFromConfigFileSubGraph`. `FrontReplacementFromConfigFileSubGraph` is designed to replace sub-graph of operations described in the configuration file. There are methods to override for implementing custom replacement logic that we need: - -* `generate_sub_graph` performs new sub-graph generation and returns dictionary where key is an alias name for the node and value is a Node objects. The dictionary has the same format as parameter `match` in the `replace_sub_graph` method in the example with networkx sub-graph isomorphism pattern. This dictionary is passed as argument to the next three methods, so it should contain entries the for nodes that the functions need. - -* `input_edges_match` specifies mapping between input edges to sub-graph before replacement and after replacement. The key of the dictionary is a tuple specifying input tensor of the sub-graph before replacement: sub-graph input node name and input port number for this node. The value for this key is also a tuple specifying the node where this tensor should be attached during replacement: the node name (or alias name of the node) and the input port for this node. If the port number is zero, the parameter could be omitted so the key or value is just a node name (alias). Default implementation of the method returns an empty dictionary, so Model Optimizer does not create new edges. - -* `output_edges_match` returns mapping between old output edges of the matched nodes and new sub-graph node and output edge index. The format is similar to the dictionary returned in the `input_edges_match` method. The only difference is that instead of specifying input port numbers for the nodes it is necessary to specify output port number. Of course, this mapping is needed for the output nodes only. Default implementation of the method returns an empty dictionary, so the Model Optimizer does not create new edges. - -* `nodes_to_remove` specifies list of nodes that Model Optimizer should remove after sub-graph replacement. Default implementation of the method removes all sub-graph nodes. - -Review of the replacer code, considering details of the `DetectionOutput` layer implementation in the Inference Engine. There are several constraints to the input tensors of the `DetectionOutput` layer: - -* The tensor with locations must be of shape `[#‍batch, #‍prior_boxes * 4]` or `[#‍batch, #‍prior_boxes * 5]` depending on shared locations between different batches or not. -* The tensor with confidences must be of shape `[#‍batch, #‍prior_boxes * #‍classes]` and confidences values are in range [0, 1], that is passed through `softmax` layer. -* The tensor with prior boxes must be of shape `[#‍batch, 2, #‍prior_boxes * 4]`. Inference Engine expects that it contains variance values which TensorFlow Object Detection API does not add. - -To enable these models, add `Reshape` operations for locations and confidences tensors and update the values for the prior boxes to include the variance constants (they are not there in TensorFlow Object Detection API). - -Look at the `generate_sub_graph` method: -```python -def generate_sub_graph(self, graph: nx.MultiDiGraph, match: SubgraphMatch): - log.debug('PostprocessorReplacement.generate_sub_graph') - log.debug('matched_nodes = {}'.format(match.matched_nodes_names())) - # softmax to be applied to the confidence - softmax_conf_op = Softmax(graph, {'axis': 2, 'nchw_layout': True}) - softmax_conf_node = softmax_conf_op.add_node(dict(name='DetectionOutput_SoftMax_conf_')) - # Inference Engine DetectionOutput layer consumes flattened tensors - # reshape operation to flatten locations tensor - reshape_loc_op = Reshape(graph, {'dim': np.array([0, -1])}) - reshape_loc_node = reshape_loc_op.add_node(dict(name='DetectionOutput_Reshape_loc_')) - # Inference Engine DetectionOutput layer consumes flattened tensors - # reshape operation to flatten confidence tensor - reshape_conf_op = Reshape(graph, {'dim': np.array([0, -1])}) - reshape_conf_node = reshape_conf_op.add_node(dict(name='DetectionOutput_Reshape_conf_')) - # create Node object from Op class - detection_output_op = DetectionOutput(graph, match.custom_replacement_desc.custom_attributes) - detection_output_op.attrs['old_infer'] = detection_output_op.attrs['infer'] - detection_output_op.attrs['infer'] = __class__.do_infer - detection_output_node = detection_output_op.add_node(dict(name=detection_output_op.attrs['type'] + '_')) - # create internal edges of the sub-graph. In this case we add edges to connect input port 0 and 1 of the - # detection output with output of reshape of locations and reshape of confidence - create_edge(softmax_conf_node, reshape_conf_node, 0, 0) - create_edge(reshape_loc_node, detection_output_node, 0, 0) - create_edge(reshape_conf_node, detection_output_node, 0, 1) - return {'detection_output_node': detection_output_node, 'reshape_conf_node': softmax_conf_node, - 'reshape_loc_node': reshape_loc_node} -``` -The method has two inputs: the graph to operate on and the instance of `SubgraphMatch` object, which describes matched sub-graph. The latter class has several useful methods to get particular input/output node of the sub-graph by input/output index or by node name pattern. Examples of these methods usage are given below. - -**Key lines**: - -* Lines 6 and 7 create new instance of operation of type `Softmax` and graph Node object corresponding to that operation. - -* Lines 11-12 and 16-17 create new instance of operation of type `Reshape` to reshape locations and confidences tensors correspondingly. - -* Lines 20-23 create new instance of operation `DetectionOutput` and graph Node object corresponding to that operation. - -* Lines 27-29 connect `softmax` node with `reshape` node and connect two reshaped locations and confidences tensors with `DetectionOutput` node. - -* Lines 30-31 define dictionary with aliases for detection output node, reshape locations and confidences nodes. These aliases are used in the `input_edges_match` and `output_edges_match` methods. - -The `input_edges_match` method is the following: -```python -def input_edges_match(self, graph: nx.DiGraph, match: SubgraphMatch, new_sub_graph: dict): - locs_consumer_node, locs_consumer_node_port = match.input_nodes(0)[0] - conf_consumer_node, conf_consumer_node_port = match.input_nodes(1)[0] - priors_consumer_node, priors_consumer_node_port = match.input_nodes(2)[0] - # create matching nodes for locations and confidence tensors using simple scheme "old_node_name: new_node_name" - # which in fact means "(old_node_name, 0): (new_node_name, 0)", while first '0' means old_port and the second - # zero defines 'new_port'. - return {locs_consumer_node.id: new_sub_graph['reshape_loc_node'].id, - conf_consumer_node.id: new_sub_graph['reshape_conf_node'].id, - priors_consumer_node.id: (new_sub_graph['detection_output_node'].id, 2), - } -``` -The method has three parameters: input `graph`, `match` object describing matched sub-graph and `new_sub_graph` dictionary with alias names returned from the `generate_sub_graph` method. - -**Key lines**: - -* Lines 2-4 initialize Node objects and input ports for the nodes where the input tensors for the sub-graph are consumed. The method `match.input_nodes(ind)` returns list of tuples where the first element is a Node object and the second is the input port for this node which consumes the ind-th input tensor of the sub-graph. `input_points` list in the configuration file defines the order of input tensors to the sub-graph. For example, the `locs_consumer_node` object of type Node is a node that consumes tensor with locations in the port with number `locs_consumer_node_port`. - -* Lines 8-11 define dictionary with the mapping of tensors as described above. Note that the attribute `id` of the Node object contains the name of the node in the graph. - -The `output_edges_match` method is the following: -```python -def output_edges_match(self, graph: nx.DiGraph, match: SubgraphMatch, new_sub_graph: dict): - # the DetectionOutput in IE produces single tensor, but in TF it produces two tensors, so we need to create only - # one output edge match - return {match.output_node(0)[0].id: new_sub_graph['detection_output_node'].id} -``` - -The method has the same three parameters as `input_edges_match` method. The returned dictionary contains mapping just for one tensor initially produces by the first output node of the sub-graph (which is `detection_boxes` according to the configuration file) to a single output tensor of the created `DetectionOutput` node. In fact, it is possible to use any output node of the initial sub-graph in mapping, because the sub-graph output nodes are the output nodes of the whole graph (their output is not consumed by any other nodes). - -Now, the Model Optimizer knows how to replace the sub-graph. The last step to enable the model is to cut-off some parts of the graph not needed during inference. - -It is necessary to remove the `Preprocessor` block where image is resized. Inference Engine does not support dynamic input shapes, so the Model Optimizer must froze the input image size, and thus, resizing of the image is not necessary. This is achieved by replacer `/deployment_tools/model_optimizer/extensions/front/tf/Preprocessor.py` which is executed automatically. - -There are several `Switch` operations in the `Postprocessor` block without output edges. For example: -```sh -Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond/cond/switch_t -``` -```sh -Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond/cond/switch_f -``` -```sh -Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond_1/cond/switch_t -``` -```sh -Postprocessor/BatchMultiClassNonMaxSuppression/map/while/PadOrClipBoxList/cond_1/cond/switch_f -``` - -Model Optimizer marks these nodes as output nodes of the topology. Some parts of the `Posprocessor` blocks are not removed during sub-graph replacement because of that. In order to fix this issue, it is necessary to specify output nodes of the graph manually using the `--output` command line parameter. - -###Example Model Optimizer Command-Line for TensorFlow\* SSD - -The final command line to convert SSDs from the TensorFlow Object Detection API Zoo is: -```shell -./mo_tf.py --input_model= --tensorflow_use_custom_operations_config extensions/front/tf/legacy_ssd_support.json --output="detection_boxes,detection_scores,num_detections" -``` - -## Converting MobileNet V2 model created with TensorFlow Object Detection API -The [MobileNet V2 model](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz) differs from the previous version, so converting the model requires a new sub-graph replacement configuration file and new command line parameters. The major differences are: - -* The `Preprocessor` block has two outputs: the pre-processed image and the pre-processed image size. -* The `Postprocessor` block has one more input (in comparison with models created with TensorFlow Object Detection API -version 1.6 or lower): the pre-processed image size. -* Some node names have been changed in the `Postprocessor` block. - -The updated sub-graph replacement configuration file `extensions/front/tf/ssd_v2_support.json` reflecting these changes -is the following: - -```json -[ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "confidence_threshold": 0.01, - "keep_top_k": 200, - "nms_threshold": 0.6, - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP" - }, - "id": "TFObjectDetectionAPIDetectionOutput", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/scale_logits", - "Postprocessor/ExpandDims", - "Postprocessor/Reshape_1", - "Postprocessor/ToFloat" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "PreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - } -] -``` - -### Example of Model Optimizer Command-Line for TensorFlow SSD MobileNet V2 -The final command line to convert MobileNet SSD V2 from the TensorFlow Object Detection Zoo is the following: - -```sh -./mo_tf.py --input_model= --tensorflow_use_custom_operations_config extensions/front/tf/ssd_v2_support.json --output="detection_boxes,detection_scores,num_detections" -``` diff --git a/docs/benchmarks/performance_benchmarks.md b/docs/benchmarks/performance_benchmarks.md index 9f172d82d99ae8..9247d63541ba28 100644 --- a/docs/benchmarks/performance_benchmarks.md +++ b/docs/benchmarks/performance_benchmarks.md @@ -19,6 +19,7 @@ Measuring inference performance involves many variables and is extremely use-cas + \endhtmlonly diff --git a/docs/doxygen/assets/customdoxygen.css b/docs/doxygen/assets/customdoxygen.css new file mode 100644 index 00000000000000..438b9cc762b406 --- /dev/null +++ b/docs/doxygen/assets/customdoxygen.css @@ -0,0 +1,1470 @@ +/* +****************************************************************************** +Copyright 2017-2021 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +****************************************************************************** +*/ + +/* CUSTOM FONTS */ +/* lato-100 - latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 100; + src: url('fonts/Lato/lato-v16-latin-100.eot'); /* IE9 Compat Modes */ + src: local('Lato Hairline'), local('Lato-Hairline'), + url('fonts/Lato/lato-v16-latin-100.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-100.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-100.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-100.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-100.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-100italic - latin */ +@font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 100; + src: url('fonts/Lato/lato-v16-latin-100italic.eot'); /* IE9 Compat Modes */ + src: local('Lato Hairline Italic'), local('Lato-HairlineItalic'), + url('fonts/Lato/lato-v16-latin-100italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-100italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-100italic.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-100italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-100italic.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-300 - latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 300; + src: url('fonts/Lato/lato-v16-latin-300.eot'); /* IE9 Compat Modes */ + src: local('Lato Light'), local('Lato-Light'), + url('fonts/Lato/lato-v16-latin-300.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-300.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-300.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-300.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-300.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-300italic - latin */ +@font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 300; + src: url('fonts/Lato/lato-v16-latin-300italic.eot'); /* IE9 Compat Modes */ + src: local('Lato Light Italic'), local('Lato-LightItalic'), + url('fonts/Lato/lato-v16-latin-300italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-300italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-300italic.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-300italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-300italic.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-regular - latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 400; + src: url('fonts/Lato/lato-v16-latin-regular.eot'); /* IE9 Compat Modes */ + src: local('Lato Regular'), local('Lato-Regular'), + url('fonts/Lato/lato-v16-latin-regular.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-regular.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-regular.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-regular.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-italic - latin */ +@font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 400; + src: url('fonts/Lato/lato-v16-latin-italic.eot'); /* IE9 Compat Modes */ + src: local('Lato Italic'), local('Lato-Italic'), + url('fonts/Lato/lato-v16-latin-italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-italic.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-italic.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-700 - latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 700; + src: url('fonts/Lato/lato-v16-latin-700.eot'); /* IE9 Compat Modes */ + src: local('Lato Bold'), local('Lato-Bold'), + url('fonts/Lato/lato-v16-latin-700.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-700.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-700.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-700.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-700.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-700italic - latin */ +@font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 700; + src: url('fonts/Lato/lato-v16-latin-700italic.eot'); /* IE9 Compat Modes */ + src: local('Lato Bold Italic'), local('Lato-BoldItalic'), + url('fonts/Lato/lato-v16-latin-700italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-700italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-700italic.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-700italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-700italic.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-900 - latin */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: 900; + src: url('fonts/Lato/lato-v16-latin-900.eot'); /* IE9 Compat Modes */ + src: local('Lato Black'), local('Lato-Black'), + url('fonts/Lato/lato-v16-latin-900.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-900.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-900.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-900.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-900.svg#Lato') format('svg'); /* Legacy iOS */ +} +/* lato-900italic - latin */ +@font-face { + font-family: 'Lato'; + font-style: italic; + font-weight: 900; + src: url('fonts/Lato/lato-v16-latin-900italic.eot'); /* IE9 Compat Modes */ + src: local('Lato Black Italic'), local('Lato-BlackItalic'), + url('fonts/Lato/lato-v16-latin-900italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('fonts/Lato/lato-v16-latin-900italic.woff2') format('woff2'), /* Super Modern Browsers */ + url('fonts/Lato/lato-v16-latin-900italic.woff') format('woff'), /* Modern Browsers */ + url('fonts/Lato/lato-v16-latin-900italic.ttf') format('truetype'), /* Safari, Android, iOS */ + url('fonts/Lato/lato-v16-latin-900italic.svg#Lato') format('svg'); /* Legacy iOS */ +} + +* { + box-sizing: border-box; +} + +body, +table, +div, +p, +dl { + font: normal 400 1rem/1.25 "Lato", "Helvetica", sans-serif; +} + +body { + background: white; + color: #555555; + display: -webkit-box; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -ms-flex-direction: column; + flex-direction: column; + margin: 0; + min-height: 100vh; + max-width: 100%; + overflow-y: scroll; + padding: 0; + position: relative; + -webkit-font-kerning: normal; + font-kerning: normal; + -webkit-font-feature-settings: "liga"; + -ms-font-feature-settings: "liga"; + font-feature-settings: "liga"; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + text-rendering: optimizeLegibility; +} + +/* Optimization Notice */ + +div.opt-notice { + text-align:center; + width: 100%; + margin: auto; + padding: 1.5vh 20px; +} + +div.opt-notice a{ + text-decoration: underline; +} + +/* Failsafe in case JS is turned off */ +body > .header, +body > div.header, +body > .contents, +body > div.contents { + margin-left: auto; + margin-right: auto; + max-width: 1064px; + padding: 0 20px; +} +/* end failsafe */ + +a { + color: #368dcc; + font-weight: inherit; + text-decoration: none; +} + +a.el { + font-weight: inherit; +} + +a:hover { + color: #368dcc; + text-decoration: none; + cursor: pointer; +} + +a:visited, +.contents a:visited { + color: #368dcc; +} + +p { + margin: 1rem 0 1.5rem 0; +} + +p.startli, +p.startdd { + margin-top: 0; +} + +ol, +ul { + margin: 0 0 1.5rem 0; +} + +li { + margin-bottom: 1.1rem; +} + +li .image { + padding-left: 0; +} + +img { + max-width: 100%; + cursor: pointer; +} + +.image { + margin: 2.5rem 0; + padding: 0 1.875rem; + display: inline-block; +} + +hr { + background: #ececec; + border: none; + height: 1px; + margin: 1.5rem 0; + width: 100%; +} + +blockquote, .blockquote_note { + background: #ebf3fc; + border-left: 5px solid #2171b8; + font: inherit; + font-size: 0.875rem; + margin: 0 0 2.5rem 0; + padding: 0.5rem 1.5rem 1.5rem 1.5rem; +} + +.blockquote_caution { + background: #fcf4e7; + border-color: #ffb133; +} + +.blockquote_tip { + background: #effaee; + border-color: #0c6800; +} + +blockquote.blockquote_warning { + background: #ffebeb; + border-color: #d80000; +} + +blockquote p, +blockquote ul { + margin-bottom: 1.25rem; +} + +blockquote > ul { + list-style: disc; +} + +blockquote p:last-child, +blockquote ul:last-child, +blockquote ul li:last-child { + margin-bottom: 0; +} + +/* + min-height calculation: + header height: 120 + footer height: 254 + contents bottom margin: 20 +*/ + +#container { + min-height: calc(100vh - 394px); + padding: 0; + padding-bottom:200px; + margin: 150px auto 0 auto; + width: 800px; +} + +.textblock { + margin-bottom: 2.5rem; +} + +div.summary { + display: none; +} + +/* make sure that the width of the contents section is everything but left and right columns */ +@media screen and (max-width: 1400px) { + #container { + margin-left: 20rem; + width: 70%; + } +} + +/* at smaller breakpoints, only account for left nav column & gutter */ +/* +@media screen and (max-width: 1200px) { + #left-nav + div.contents, + #contents-nav + div.contents { + width: calc(100% - 297px); + margin-right: 0; + } +} +*/ + +h1, +div.header, +h2, +h2.groupheader, +h3, +h4, +h5, +h6 { + margin:0; + margin-bottom: 1rem; + margin-top: 2.5rem; +} + +h2, +h2.groupheader { + border: 0; + color: inherit; + font: normal 400 1.75rem/1.25 "Lato", "Helvetica", sans-serif; +} + +h3 { + font: normal 400 1.375rem/1.25 "Lato", "Helvetica", sans-serif; +} + +h4 { + font: normal 400 1.25/1.25 "Lato", "Helvetica", sans-serif; +} + +/* "H1" headings */ + +div.header { + background: none; + border: 0; +} + +div.headertitle { + padding: 0; +} + +h1, +.title { + color: #555555; + font: normal 400 2.25rem/1.25 "Lato", "Helvetica", sans-serif; +} + +.title { + margin:0; +} + +/* END "H1" headings */ + +/* Tables */ +.table-wrapper { + margin: 0 0 2.5rem 0; + overflow-x: auto; + overflow-y: hidden; +} + +table, +table.doxtable, +table.markdownTable { + border-collapse: collapse; + margin: 0; + width: 100%; +} + +table tr.heading td { + padding: 0; +} + +table.doxtable td, +table.doxtable th, +table.markdownTable td, +table.markdownTable th { + background: transparent; + border: 0; + border-bottom: 1px solid #ececec; + color: inherit; + font: inherit; + padding: 1rem; +} + +table.doxtable td:first-child, +table.doxtable th:first-child, +table.markdownTable td:first-child, +table.markdownTable th:first-child { + padding-left: 0; +} + +table.doxtable td:last-child, +table.doxtable th:last-child, +table.markdownTable td:last-child, +table.markdownTable th:last-child { + padding-right: 0; +} + +table.doxtable th, +table.markdownTable th { + border-bottom: 2px solid #ececec; + color: #777; + font-size: 0.875rem; + letter-spacing: 0.05em; + line-height: 1em; + padding-bottom: 0.5rem; + padding-top: 0.5rem; + text-transform: uppercase; +} + +table.doxtable th.markdownTableHeadCenter, +table.markdownTable th.markdownTableHeadCenter { + text-align: center; +} + +table.doxtable th.markdownTableHeadRight, +table.markdownTable th.markdownTableHeadRight { + text-align: right; +} + +table .image { + margin: 0; + padding: 0; +} + +table .markdownTableBodyNone .image, +table .markdownTableBodyLeft .image { + text-align: left; +} + +table h2.groupheader { + border-bottom: 2px solid #ececec; + color: inherit; + margin-bottom: 0; +} + +table.memberdecls td.memSeparator { + border-color: #ececec; + height: 0px; + line-height: 0px; +} + +.mdescLeft, +.mdescRight, +.memItemLeft, +.memItemRight, +.memTemplItemLeft, +.memTemplItemRight, +.memTemplParams { + padding-bottom: 5px; + padding-top: 5px; +} +/* END tables */ + +/* =========================================================== */ +/* H E A D E R */ + +#top { + background: #003C71; + position: fixed; + transform: translateZ(0); + top: 0; + width: 100%; + z-index: 1000; +} + +#titlearea { + color: white; + margin: 0 auto; + padding: 1rem auto; + position: relative; + transition: height 0.3s ease; + min-width: 768px; + max-width: 100%; + display: flex; + flex-direction: row; + flex-wrap: nowrap; + justify-content: space-between; + border:none; +} + +#projectalign { + margin-left:3.75rem; +} + +#projectalign, +#MSearchBox { + width: 18.375rem; + white-space: nowrap; + float: none; + right: 0px; + background: none; + box-shadow: none; +} + +#projectname { + font: inherit; + font-size: 1.25em; + line-height: 1em; + padding: 0; + position: relative; +} + +a.homelink-id { + color: white; + font-size: 1rem; + display:block; + padding: 1.4375rem 0 1rem 0; +} + +a.homelink-id > img { + min-width: 220px; + max-width: 220px; +} + +a.homelink-id > p { + margin: 0; + font-size: 1rem; +} + +#projectnumber { + display: none; + font: inherit; + font-size: 0.75em; +} + +#versionsSelector { + font-size: 0.875rem; + margin-right: 2.25rem; + min-width:90px; + position: relative; +} + +div.ovino-btn { + font-size: 1rem; + line-height: 1.25; + border-radius: 0.25rem; + padding: 0.625rem 1.5rem; + background: #003C71; + color: #ffffff; + white-space: nowrap; +} + +div.ovino-btn:hover { + background-color:#001F3B; +} + +div.ovino-btn > a { + color:white; + width:100%; +} + + +#versionsSelector button.version-toggle { + background: transparent url(images/icon-accordion-arrow-dn-black.svg) 97% 50% + no-repeat; + background-size: 0.75rem; + border: 0; + border-bottom: 1px solid #555555; + color: #555555; + cursor: pointer; + font: inherit; + height: 100%; + padding-left: 5px; + padding-right: 24px; +} + +#versionsList { + background: #f9f9f9; + border: 2px solid #368dcc; + box-shadow: 0 2px 3px rgba(0, 0, 0, 0.5); + box-sizing: content-box; + color: initial; + display: none; + font-weight: 300; + list-style: none; + max-height: 200px; + overflow-y: auto; + padding: 0.5rem; + position: absolute; + right: 0; + top: 0; + width: 100%; +} + +#versionsList.opened { + display: block; +} + +#versionsList li { + display: block; + margin: 0; +} + +#versionsList li a { + color: #555555; + display: block; + padding: 0.2rem 0 0.2rem 1.2rem; +} + +#versionsList li.active a, +#versionsList li.active a:hover, +#versionsList li.active a:visited { + background: url(images/icon-checkmark.svg) 0 50% no-repeat; + background-size: 1em; + color: #2171b8; +} + +#nav-path { + display: none; +} + +#main-nav { + text-align: center; + margin: 0 auto; + display: flex; + align-items: baseline; + padding: 0 0.2rem; +} + +#main-nav ul#main-menu { + list-style: none; + margin: 0 auto; + padding: 0; + width: 100%; + text-align: center; + display: flex; + height: 100%; + align-items: stretch; + justify-content: center; +} + +#main-nav ul#main-menu > li { + margin: 0; + display: inline-block; + margin-right: 6.875rem; + height:100%; +} + +#main-nav ul#main-menu > li:last-child { + margin-right: 0; +} + +ul.dropdown-menu { + box-shadow: 0 2px 3px rgba(0, 0, 0, 0.1); + max-height: 500px; + overflow-y: scroll; +} + + +#main-nav ul.dropdown-menu a { + font-size: 0.75rem; + display: block; +} + +#main-nav ul#main-menu > li:hover>ul.dropdown-menu { + width: 100%; + display:flex; + margin:0; + padding: 2.25rem 2.9375rem 1.6875rem 2.9375rem; + flex-wrap: wrap; +} + +#main-nav ul.dropdown-menu > li { + padding-left:0; + padding: 0 0 0.5625rem 0; + margin-bottom:0; + border: 0.75rem solid transparent; +} + +#main-nav ul.dropdown-menu > li > ul { + margin-top: 0.625rem; + margin-right: 2.125rem; +} + +#main-nav ul.dropdown-menu > li > ul:last-child { + margin-right: 0; +} + +#main-nav ul.dropdown-menu > li > ul > li { + margin-bottom: 0.4375rem; +} + +#main-nav ul.dropdown-menu > li > a { + display: block; + padding: 0.1rem 0px; + font-weight: bold; + font-size: 0.875rem; + padding-bottom: 0.625rem; +} + +#main-nav ul.dropdown-menu > li > a { + border-bottom: 2px solid rgba(34,36,38,.15); +} + +#main-nav ul.dropdown-menu ul { + list-style: none; + padding: 0; +} + +#main-nav ul#main-menu > li > a { + display:flex; + align-items: center; + height:100%; + color: #ffffff; + font-size: 0.875rem; + font-weight: normal; + letter-spacing: 0.07rem; + position: relative; + text-transform: uppercase; + white-space: nowrap; +} + +#main-nav ul#main-menu > li > a:hover { + color: #AED1EB; +} + +a.see-all { + font-weight: bold; +} + +@media screen and (max-width: 1300px) { + #main-nav ul#main-menu > li > a { + font-size: 0.75rem; + } + + #main-nav ul#main-menu > li { + margin-right: 2.5rem; + } + + #main-nav ul#main-menu { + width: 500px; + } + + #projectalign, #MSearchBox, #MSearchBox .left { + width: 16rem; + } + + #download-link { + margin-right:3.28rem; + } +} + +#main-nav ul#main-menu > li > a.active { + font-weight: bold; +} + +div.old-version > p { + margin: 0; + text-align: center; + background: #ffb133; + padding: 5px; + color: white; +} + +#secondnav { + background: #f9f9f9; + box-shadow: 0 2px 3px rgba(0, 0, 0, 0.1); + display: flex; + justify-content: flex-end; + padding:8px; +} + +#download-link { + margin-right: 3.75rem; +} + +.nav-placeholder { + background:#ececec; + width:10%; + height:30px; +} + +/* =========================================================== */ +/* L E F T - N A V */ + +#left-nav { + left: 0; + overflow-x: hidden; + overflow-y: auto; + position: fixed; + width: 17.3125rem; + margin-top: 20px; + max-height: 690px; +} + +#left-nav a { + color: inherit; + display: block; + padding: 3px 0; + position: relative; +} + +#left-nav a:hover { + color: #368dcc; +} + +div.accordion-heading { + position: relative; +} + +#left-nav ul, +div.accordion-heading { + font-size: 0.75rem; +} + +#left-nav ul { + font-weight: 700; + list-style: none; + margin: 0; + padding: 0; + width: 100%; +} + +#left-nav li { + margin-top: 0.8125rem; + margin-bottom: 0; + position: relative; +} + +#left-nav ul.main-menu > li { + margin-bottom: 13px; +} + +#left-nav ul.main-menu > li > ul > li { + margin-bottom: 0.4375rem; + margin-top:0; + background: #f3f3f3; +} + +#left-nav ul.main-menu > li > ul > li:last-child { + margin-bottom: 0; +} + +#left-nav ul.main-menu > li > ul > li > div.accordion-heading > a { + font-weight:bold; +} + +#left-nav ul.main-menu li.active > a, +#left-nav ul.main-menu li.active > div.accordion-heading > a { + font-weight: bold; + color: #368dcc; +} + +#left-nav > ul.main-menu > li > div.accordion-heading { + height:3.875rem; + font-weight: bold; + color: #ffffff; + font-size:0.875rem; + background: #003C71; +} + +#left-nav > ul.main-menu > li > div.accordion-heading > a { + top: 50%; + transform: translateY(-50%); + padding-left: 2rem; +} + +#left-nav > ul.main-menu > li > div.accordion-heading > span.accordion-trigger { + top: 1.9rem; + left: 0.6rem; +} + +div.accordion-heading > span.accordion-trigger { + top:50%; + transform: translateY(-50%); +} + +div.accordion-heading > a { + padding-left: 0; +} + +#left-nav ul.main-menu > li > ul > li > ul > li > ul { + background: #f9f9f9; +} + +#left-nav ul.main-menu > li > ul > li > ul > li > ul > li ul { + background: #fefefe; +} + +#left-nav ul.main-menu > li > ul > li, +#left-nav ul.main-menu > li > ul > li > ul > li > ul, +#left-nav ul.main-menu > li > ul > li > ul > li > ul > li ul { + padding-top:1.125rem; + padding-bottom:1.125rem; +} + +#left-nav ul.main-menu > li:last-child { + margin-bottom: 0; +} + +#left-nav li.accordion > span.accordion-trigger, +div.accordion-heading > span.accordion-trigger { + background: url(images/icon-accordion_arrow_right.svg) center center + no-repeat; + background-size: contain; + border: 5px solid transparent; + cursor: pointer; + display: block; + height: 20px; + left: 0.35rem; + position: absolute; + top: 0.3rem; + width: 20px; + z-index: 100; +} + +#left-nav li.accordion > div.accordion-heading > span.accordion-trigger { + background-image: url(images/icon-accordion-arrow-right-black.svg); + height: 21px; + top: 0.65rem; + width: 21px; +} + +#left-nav > ul.main-menu > li.accordion > div.accordion-heading > span.accordion-trigger { + background-image: url(images/icon-accordion-arrow-right-white.svg); +} + +#left-nav li.accordion-opened > div.accordion-heading > span.accordion-trigger { + background-image: url(images/icon-accordion-arrow-dn-black.svg); +} + +#left-nav > ul.main-menu > li.accordion-opened > div.accordion-heading > span.accordion-trigger { + background-image: url(images/icon-accordion-arrow-dn-white.svg); +} + +#left-nav li.accordion.active > div.accordion-heading > span.accordion-trigger { + background: url(images/icon-accordion_arrow_right.svg) center + center no-repeat; + opacity: 0.59; +} + +#left-nav li.accordion-opened.active > div.accordion-heading > span.accordion-trigger { + background: url(images/icon-accordion_arrow_dn.svg) center + center no-repeat; + opacity: 1; +} + +#left-nav li.accordion.active > div.accordion-heading > span.accordion-trigger { + opacity: 1; +} + +#left-nav li.accordion > ul { + display: none; + font-weight: 400; + overflow: hidden; +} + +#left-nav ul.main-menu ul li span.accordion-trigger { + left: 0.6rem; +} +#left-nav ul.main-menu ul li a { + font-size: 0.75rem; + padding-left: 2rem; +} + +#left-nav ul.main-menu ul ul { + margin-bottom: 0.5rem; + margin-right: 0; + width: auto; +} + +#left-nav ul.main-menu ul ul li span.accordion-trigger { + left: 1.7rem; +} +#left-nav ul.main-menu ul ul li a { + padding-left: 3rem; + position: relative; + padding-right: 18px; + line-height: 1.25em; +} + +#left-nav ul.main-menu ul ul ul ul li span.accordion-trigger { + left: 3.9rem; +} +#left-nav ul.main-menu ul ul ul ul li a { + padding-left: 5rem; +} + +#left-nav ul.main-menu ul ul ul ul ul li span.accordion-trigger { + left: 5rem; +} +#left-nav ul.main-menu ul ul ul ul ul li a { + padding-left: 6rem; +} + +#left-nav ul.main-menu ul ul ul ul ul ul li span.accordion-trigger { + left: 6.1rem; +} +#left-nav ul.main-menu ul ul ul ul ul ul li a { + padding-left: 7rem; +} + +#left-nav ul.main-menu ul ul ul ul ul ul ul li span.accordion-trigger { + left: 7.2rem; +} +#left-nav ul.main-menu ul ul ul ul ul ul ul li a { + padding-left: 8rem; +} + +#left-nav ul.main-menu ul ul ul li span.accordion-trigger { + left: 2.8rem; +} +#left-nav ul.main-menu ul ul ul li a { + padding-left: 4rem; +} + +/* =========================================================== */ +/* C O N T E N T S - N A V */ + +nav.contents-nav { + font-size: 0.875rem; + padding-bottom: 5px; + margin-top:80px; +} + +nav.contents-nav h2.contents-nav-title { + color: #555555; + font-size: 1rem; + font-weight: normal; + line-height: 2rem; + margin-bottom: 1rem; +} + +nav.contents-nav a:hover, +div.contents nav.contents-nav a:hover { + text-decoration: underline; + color: #368dcc; +} + +nav.contents-nav a.active, +div.contents nav.contents-nav a.active { + color: #003C71; +} + +nav.contents-nav ul { + list-style: none; + margin: 0; + padding: 0; +} + +nav.contents-nav ul li:first-child { + margin-top:0; +} + +nav.contents-nav ul li { + margin: .7vh 0; +} + +#left-nav a.removehover:hover { + color: #6e6e6e; + text-decoration: none; +} + +nav.contents-nav ul ul { + font-weight: 400; +} + +nav.contents-nav ul ul li { + padding-left: 15px; + position: relative; +} + +#inner-contents-nav { + display: none; + margin-bottom: 40px; +} + +#contents-nav { + background: #ffffff; + overflow-x: hidden; + overflow-y: auto; + position: fixed; + right: 10px; + width: 202px; + top: 10% !important; + margin-right:50px; +} + +@media screen and (max-width: 1400px) { + #inner-contents-nav { + display: block; + } + + #contents-nav { + display: none; + } +} + +/* =========================================================== */ +/* F O O T E R */ + +.footer { + background: #555555; + color: white; + font: inherit; + /* font-weight: 300; */ + position: absolute; + bottom: 0; + width: 100%; + /* height: 200px; */ +} + +.footer a { + color: #ffffff; +} + +.footer-content { + margin: 0 auto; + max-width: 1400px; + overflow: hidden; + padding: 10px 15px; + width: 100%; +} + +.footer-column { + display:inline-block; + margin: 0 10px; + font-size: calc(6px + (20 - 14) * ((100vw - 300px) / (1600 - 300))); + line-height: calc((6px + (20 - 14) * ((100vw - 300px) / (1600 - 300))) * 1.5); +} + +.footer-column h4 { + text-align: left; +} + +.footer-row { + text-align: center; + position: relative; + height: 50px; +} + +.footer-last { + float: right; + margin-right: 0; +} + +.footer-support, .footer-cookies { + position: absolute; + bottom: 0; +} + +.footer-support { + left: 0; +} + +.footer-cookies { + right: 0; +} + +.footer ul { + list-style: none; + margin: 0 auto; + padding: 0; + width: 100%; + text-align: center; +} + +.footer li { + margin: 0; + display: inline-block; + padding: 0.6em; +} + +.footer h4 { + margin: 0; + text-transform: uppercase; +} + +.copyright { + text-align: right; + margin-right: 20px; +} + +.copyright p { + font-size: 14px; +} + +/* Optimization Notice */ + +div.opt-notice-wrapper { + background: #4a4a4a; + color:#ffffff; + position: fixed; + bottom: 0; + left: 0; + width:100% +} + +p.opt-notice { + text-align:center; + width: 100%; + margin: auto; + padding: 1.5vh 20px; + font-size: calc(6px + (20 - 14) * ((100vw - 300px) / (1600 - 300))); + line-height: calc((6px + (20 - 14) * ((100vw - 300px) / (1600 - 300))) * 1.5); +} + +/* Cookies notification */ +div.cookies-notification { + position: fixed; + bottom: 0; + background: #ebf3fc; + padding:30px; + width: 100%; + text-align: center; + z-index: 1000; +} + +div.cookies-notification button { + border: none; + background-color: #0F93C2; + color: white; + padding: 10px 20px; + margin-left: 30px; + cursor: pointer; +} + +div.cookies-notification p { + margin:0; + color: #555555; + font-size: 15px; +} + +div.cookies-notification a, +div.cookies-notification a:hover { + color: #368dcc; + text-decoration: none; +} + +/* =========================================================== */ +/* C O D E - B L O C K S */ + +code, +pre, +div.line { + background: #f9f9f9; + font-family: monospace, fixed; + font-weight: 400; +} + +div.fragment, +pre.fragment { + background: #f9f9f9; + border: none; + counter-reset: codegroup; + margin: 1.1rem 0; + padding: 0.75rem 1rem; + overflow: auto; +} + +div.line { + box-sizing: content-box; + font-size: 12px; + line-height: 18px; + position: relative; + text-indent: 0; + white-space: pre; +} + +div.line::before { + color: #d1d0d0; + content: counter(codegroup); + counter-increment: codegroup; + left: 0; + position: absolute; + text-align: right; + width: 3em; +} + +div.line span.lineno { + display: none; +} + +div.fragment div.line:last-of-type { + margin-bottom: 0; +} + +.memtitle { + margin-top:0; + width:100%; +} + +.memitem { + margin-bottom: 15px; + display: block !important; +} + +.class-attr-name { + margin-top: 10px; +} + +.class-attr-desc { + overflow-x: auto; +} + +div.memdoc { + overflow-x: auto; +} + +/* =========================================================== */ +/* S E A R C H */ + +#MSearchResultsWindow { + background-color: white; + border: none; + border-radius: 0.5rem; + box-shadow: 0 3px 4px rgba(163, 163, 163, 0.5); + overflow: hidden; + padding: 10px; + position: fixed; +} + +iframe#MSearchResults { + width: 533px; +} + +#MSearchBox { + margin-top: 0; + position: relative; + height:auto; + margin-right: 3.75rem; +} + +#FSearchBox { + float: none; + min-height: 100%; + margin:0; + position: relative; +} + +#MSearchBox .left, +#MSearchBox .right { + background: none; + height: auto; + left: 0; + position: relative; + top: 0; + width: auto; +} + +#MSearchBox .left { + display: block; + position: absolute; + width:18.375rem; +} + +#MSearchBox .left { + height: 100%; +} + +#MSearchBox .left img { + display: none; +} + +#FSearchBox #MSearchField { + margin-left:0; +} + +#MSearchField { + background-size: 1rem; + font: inherit; + line-height: 30px; + margin: 0; + background: white; + border-radius: 0.25rem; + height: 2.125rem; + display:none; + position: absolute; + top: 50%; + transform: translateY(-50%); + right: 9rem; + width: 60vw; +} + +#search-slider { + cursor:pointer; + height:20px; + width:20px; + position:absolute; + top:50%; + transform:translateY(-50%); + z-index: 1000; + right: 9.1875rem; +} + +#search-slider.closed { + background: url(images/icon-search-white.svg) center no-repeat; +} + +#search-slider.open { + background: url(images/icon-close_btn.svg) center no-repeat; +} + + +#MSearchClose { + background: url(images/icon-close_btn.svg) no-repeat; + height: 16px; + margin: 0; + padding: 0; + top: 8px; + width: 16px; +} + +.right #MSearchClose { + right: 10px; +} + +#MSearchClose img { + display: none; +} + +/* viewer.js fix */ +.viewer-toolbar > ul > li::before { + position: absolute; +} diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.eot b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.eot new file mode 100644 index 00000000000000..0d9dac2ff74676 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.eot differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.svg b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.svg new file mode 100644 index 00000000000000..077653d2028528 --- /dev/null +++ b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.svg @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.ttf b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.ttf new file mode 100644 index 00000000000000..4f3d84480b5bfa Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.ttf differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.woff b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.woff new file mode 100644 index 00000000000000..abf0196d8329e4 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.woff differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.woff2 b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.woff2 new file mode 100644 index 00000000000000..f8c37ecde4dafa Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700.woff2 differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.eot b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.eot new file mode 100644 index 00000000000000..1ab37ef7ba299a Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.eot differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.svg b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.svg new file mode 100644 index 00000000000000..3e9d4409b347cb --- /dev/null +++ b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.svg @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.ttf b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.ttf new file mode 100644 index 00000000000000..96b4f823474914 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.ttf differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.woff b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.woff new file mode 100644 index 00000000000000..cdf0d86aef9e4f Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.woff differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.woff2 b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.woff2 new file mode 100644 index 00000000000000..5b3f882d813f50 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-700italic.woff2 differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.eot b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.eot new file mode 100644 index 00000000000000..8e32803d7a8125 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.eot differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.svg b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.svg new file mode 100644 index 00000000000000..e288645b12cec0 --- /dev/null +++ b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.svg @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.ttf b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.ttf new file mode 100644 index 00000000000000..cf3da8be2de3ed Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.ttf differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.woff b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.woff new file mode 100644 index 00000000000000..95251da0946cd0 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.woff differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.woff2 b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.woff2 new file mode 100644 index 00000000000000..3246c1286f3cf2 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-italic.woff2 differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.eot b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.eot new file mode 100644 index 00000000000000..c6413069679e40 Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.eot differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.svg b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.svg new file mode 100644 index 00000000000000..55b43fb86a0e91 --- /dev/null +++ b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.svg @@ -0,0 +1,435 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.ttf b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.ttf new file mode 100644 index 00000000000000..3c2d417ea4069f Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.ttf differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.woff b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.woff new file mode 100644 index 00000000000000..189a0feb590a6a Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.woff differ diff --git a/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.woff2 b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.woff2 new file mode 100644 index 00000000000000..6904b6649ffa9a Binary files /dev/null and b/docs/doxygen/assets/fonts/Lato/lato-v16-latin-regular.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.eot b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.eot new file mode 100644 index 00000000000000..550df22a6d601d Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.svg b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.svg new file mode 100644 index 00000000000000..f384d9cea2f601 --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.svg @@ -0,0 +1,390 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.ttf b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.ttf new file mode 100644 index 00000000000000..27363d17d074e5 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.woff b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.woff new file mode 100644 index 00000000000000..0ea5db896b1f6d Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.woff2 b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.woff2 new file mode 100644 index 00000000000000..6163de7b0ab1b4 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-mono-v5-latin-regular.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500.eot b/docs/doxygen/assets/fonts/roboto-v18-latin-500.eot new file mode 100644 index 00000000000000..849f4a50b77c70 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500.svg b/docs/doxygen/assets/fonts/roboto-v18-latin-500.svg new file mode 100644 index 00000000000000..67eecf442fc7f9 --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-v18-latin-500.svg @@ -0,0 +1,305 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500.ttf b/docs/doxygen/assets/fonts/roboto-v18-latin-500.ttf new file mode 100644 index 00000000000000..55b559f6197a91 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500.woff b/docs/doxygen/assets/fonts/roboto-v18-latin-500.woff new file mode 100644 index 00000000000000..2633e1525dcee3 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500.woff2 b/docs/doxygen/assets/fonts/roboto-v18-latin-500.woff2 new file mode 100644 index 00000000000000..8dceabcf6bdfdb Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.eot b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.eot new file mode 100644 index 00000000000000..148c4fa0858fd3 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.svg b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.svg new file mode 100644 index 00000000000000..bed50dcf2e63f9 --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.svg @@ -0,0 +1,326 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.ttf b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.ttf new file mode 100644 index 00000000000000..dcf655fb2e014a Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.woff b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.woff new file mode 100644 index 00000000000000..d4d8b157f37b3d Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.woff2 b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.woff2 new file mode 100644 index 00000000000000..1b9589945e075f Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-500italic.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700.eot b/docs/doxygen/assets/fonts/roboto-v18-latin-700.eot new file mode 100644 index 00000000000000..0df88af13d6563 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700.svg b/docs/doxygen/assets/fonts/roboto-v18-latin-700.svg new file mode 100644 index 00000000000000..11db87dd0eb530 --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-v18-latin-700.svg @@ -0,0 +1,309 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700.ttf b/docs/doxygen/assets/fonts/roboto-v18-latin-700.ttf new file mode 100644 index 00000000000000..031bf06cb27a5f Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700.woff b/docs/doxygen/assets/fonts/roboto-v18-latin-700.woff new file mode 100644 index 00000000000000..a0d26516a8c8d9 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700.woff2 b/docs/doxygen/assets/fonts/roboto-v18-latin-700.woff2 new file mode 100644 index 00000000000000..e327dc95b6a3b7 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.eot b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.eot new file mode 100644 index 00000000000000..7dc0ff449f7838 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.svg b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.svg new file mode 100644 index 00000000000000..050bee0e4add2f --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.svg @@ -0,0 +1,325 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.ttf b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.ttf new file mode 100644 index 00000000000000..c74a6781842903 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.woff b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.woff new file mode 100644 index 00000000000000..a3ca246269e4b3 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.woff2 b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.woff2 new file mode 100644 index 00000000000000..d3239a3b13ce54 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-700italic.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-italic.eot b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.eot new file mode 100644 index 00000000000000..0280829fa202fd Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-italic.svg b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.svg new file mode 100644 index 00000000000000..4d59797103447a --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.svg @@ -0,0 +1,323 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-italic.ttf b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.ttf new file mode 100644 index 00000000000000..91cd95e2866c8f Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-italic.woff b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.woff new file mode 100644 index 00000000000000..27c34da2a63a44 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-italic.woff2 b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.woff2 new file mode 100644 index 00000000000000..3791c883e8d0e8 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-italic.woff2 differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-regular.eot b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.eot new file mode 100644 index 00000000000000..a0780d6e3ffaa2 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.eot differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-regular.svg b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.svg new file mode 100644 index 00000000000000..627f5a368ca791 --- /dev/null +++ b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.svg @@ -0,0 +1,308 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-regular.ttf b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.ttf new file mode 100644 index 00000000000000..b91bf3f7e31942 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.ttf differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-regular.woff b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.woff new file mode 100644 index 00000000000000..92dfacc618f920 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.woff differ diff --git a/docs/doxygen/assets/fonts/roboto-v18-latin-regular.woff2 b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.woff2 new file mode 100644 index 00000000000000..7e854e669b7517 Binary files /dev/null and b/docs/doxygen/assets/fonts/roboto-v18-latin-regular.woff2 differ diff --git a/docs/doxygen/assets/images/404-error.svg b/docs/doxygen/assets/images/404-error.svg new file mode 100644 index 00000000000000..96feb223411034 --- /dev/null +++ b/docs/doxygen/assets/images/404-error.svg @@ -0,0 +1,123 @@ + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/favicon.ico b/docs/doxygen/assets/images/favicon.ico new file mode 100644 index 00000000000000..d8d6993caa8245 Binary files /dev/null and b/docs/doxygen/assets/images/favicon.ico differ diff --git a/docs/doxygen/assets/images/icon-accordion-arrow-dn-black.svg b/docs/doxygen/assets/images/icon-accordion-arrow-dn-black.svg new file mode 100644 index 00000000000000..bccf2c2718a1fd --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion-arrow-dn-black.svg @@ -0,0 +1,84 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/icon-accordion-arrow-dn-white.svg b/docs/doxygen/assets/images/icon-accordion-arrow-dn-white.svg new file mode 100644 index 00000000000000..f881d968739228 --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion-arrow-dn-white.svg @@ -0,0 +1,84 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/icon-accordion-arrow-right-black.svg b/docs/doxygen/assets/images/icon-accordion-arrow-right-black.svg new file mode 100644 index 00000000000000..a7744ad033fdc5 --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion-arrow-right-black.svg @@ -0,0 +1,84 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/icon-accordion-arrow-right-white.svg b/docs/doxygen/assets/images/icon-accordion-arrow-right-white.svg new file mode 100644 index 00000000000000..f22a04d749babd --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion-arrow-right-white.svg @@ -0,0 +1,84 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/icon-accordion_arrow_dn--hover.svg b/docs/doxygen/assets/images/icon-accordion_arrow_dn--hover.svg new file mode 100644 index 00000000000000..b9188f654cc15a --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion_arrow_dn--hover.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-accordion_arrow_dn--white.svg b/docs/doxygen/assets/images/icon-accordion_arrow_dn--white.svg new file mode 100644 index 00000000000000..0aa5a9cb067180 --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion_arrow_dn--white.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-accordion_arrow_dn.svg b/docs/doxygen/assets/images/icon-accordion_arrow_dn.svg new file mode 100644 index 00000000000000..fcd70fe9f63aa6 --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion_arrow_dn.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-accordion_arrow_right--hover.svg b/docs/doxygen/assets/images/icon-accordion_arrow_right--hover.svg new file mode 100644 index 00000000000000..51e203821515e0 --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion_arrow_right--hover.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-accordion_arrow_right--white.svg b/docs/doxygen/assets/images/icon-accordion_arrow_right--white.svg new file mode 100644 index 00000000000000..8db35966088624 --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion_arrow_right--white.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-accordion_arrow_right.svg b/docs/doxygen/assets/images/icon-accordion_arrow_right.svg new file mode 100644 index 00000000000000..d584ff26465b1e --- /dev/null +++ b/docs/doxygen/assets/images/icon-accordion_arrow_right.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-arrow-down.svg b/docs/doxygen/assets/images/icon-arrow-down.svg new file mode 100644 index 00000000000000..3a27257202354d --- /dev/null +++ b/docs/doxygen/assets/images/icon-arrow-down.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-checkmark.svg b/docs/doxygen/assets/images/icon-checkmark.svg new file mode 100644 index 00000000000000..1d8d8d293c5412 --- /dev/null +++ b/docs/doxygen/assets/images/icon-checkmark.svg @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-close_btn.svg b/docs/doxygen/assets/images/icon-close_btn.svg new file mode 100644 index 00000000000000..9d46da5f00050b --- /dev/null +++ b/docs/doxygen/assets/images/icon-close_btn.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-search-white.svg b/docs/doxygen/assets/images/icon-search-white.svg new file mode 100644 index 00000000000000..efcd933e1e0d02 --- /dev/null +++ b/docs/doxygen/assets/images/icon-search-white.svg @@ -0,0 +1,66 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/icon-search.svg b/docs/doxygen/assets/images/icon-search.svg new file mode 100644 index 00000000000000..5042e336b60702 --- /dev/null +++ b/docs/doxygen/assets/images/icon-search.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-section-api-references.svg b/docs/doxygen/assets/images/icon-section-api-references.svg new file mode 100644 index 00000000000000..16df2b971d66f6 --- /dev/null +++ b/docs/doxygen/assets/images/icon-section-api-references.svg @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-section-getting-started.svg b/docs/doxygen/assets/images/icon-section-getting-started.svg new file mode 100644 index 00000000000000..a4738d1048f71c --- /dev/null +++ b/docs/doxygen/assets/images/icon-section-getting-started.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-section-guides.svg b/docs/doxygen/assets/images/icon-section-guides.svg new file mode 100644 index 00000000000000..f40a3051126b9b --- /dev/null +++ b/docs/doxygen/assets/images/icon-section-guides.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-section-how-tos.svg b/docs/doxygen/assets/images/icon-section-how-tos.svg new file mode 100644 index 00000000000000..368fc0209b359e --- /dev/null +++ b/docs/doxygen/assets/images/icon-section-how-tos.svg @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/icon-section-resources.svg b/docs/doxygen/assets/images/icon-section-resources.svg new file mode 100644 index 00000000000000..df49c596d6da71 --- /dev/null +++ b/docs/doxygen/assets/images/icon-section-resources.svg @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-teaser-eye.svg b/docs/doxygen/assets/images/icon-teaser-eye.svg new file mode 100644 index 00000000000000..63b729969e8b68 --- /dev/null +++ b/docs/doxygen/assets/images/icon-teaser-eye.svg @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-teaser-screen.svg b/docs/doxygen/assets/images/icon-teaser-screen.svg new file mode 100644 index 00000000000000..5562ef574669f4 --- /dev/null +++ b/docs/doxygen/assets/images/icon-teaser-screen.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/icon-teaser-spoke-diagram.svg b/docs/doxygen/assets/images/icon-teaser-spoke-diagram.svg new file mode 100644 index 00000000000000..1a7cc71453d03e --- /dev/null +++ b/docs/doxygen/assets/images/icon-teaser-spoke-diagram.svg @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/images/int-openvino-wht.svg b/docs/doxygen/assets/images/int-openvino-wht.svg new file mode 100644 index 00000000000000..300620df3b2bf3 --- /dev/null +++ b/docs/doxygen/assets/images/int-openvino-wht.svg @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/doxygen/assets/images/logo-openvino.svg b/docs/doxygen/assets/images/logo-openvino.svg new file mode 100644 index 00000000000000..213ef7835adc59 --- /dev/null +++ b/docs/doxygen/assets/images/logo-openvino.svg @@ -0,0 +1,36 @@ + + + + + + + + + \ No newline at end of file diff --git a/docs/doxygen/assets/jquery-2.2.4.min.js b/docs/doxygen/assets/jquery-2.2.4.min.js new file mode 100644 index 00000000000000..5c82cc00e72caf --- /dev/null +++ b/docs/doxygen/assets/jquery-2.2.4.min.js @@ -0,0 +1,4 @@ +/*! jQuery v2.2.4 | (c) jQuery Foundation | jquery.org/license */ +!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="2.2.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isPlainObject:function(a){var b;if("object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype||{},"isPrototypeOf"))return!1;for(b in a);return void 0===b||k.call(a,b)},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=d.createElement("script"),b.text=a,d.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:h.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(d=e.call(arguments,2),f=function(){return a.apply(b||this,d.concat(e.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return h.call(b,a)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&f.parentNode&&(this.length=1,this[0]=f),this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?void 0!==c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?h.call(n(a),this[0]):h.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||n.uniqueSort(e),D.test(a)&&e.reverse()),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.removeEventListener("DOMContentLoaded",J),a.removeEventListener("load",J),n.ready()}n.ready.promise=function(b){return I||(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(n.ready):(d.addEventListener("DOMContentLoaded",J),a.addEventListener("load",J))),I.promise(b)},n.ready.promise();var K=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)K(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},L=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function M(){this.expando=n.expando+M.uid++}M.uid=1,M.prototype={register:function(a,b){var c=b||{};return a.nodeType?a[this.expando]=c:Object.defineProperty(a,this.expando,{value:c,writable:!0,configurable:!0}),a[this.expando]},cache:function(a){if(!L(a))return{};var b=a[this.expando];return b||(b={},L(a)&&(a.nodeType?a[this.expando]=b:Object.defineProperty(a,this.expando,{value:b,configurable:!0}))),b},set:function(a,b,c){var d,e=this.cache(a);if("string"==typeof b)e[b]=c;else for(d in b)e[d]=b[d];return e},get:function(a,b){return void 0===b?this.cache(a):a[this.expando]&&a[this.expando][b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=a[this.expando];if(void 0!==f){if(void 0===b)this.register(a);else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in f?d=[b,e]:(d=e,d=d in f?[d]:d.match(G)||[])),c=d.length;while(c--)delete f[d[c]]}(void 0===b||n.isEmptyObject(f))&&(a.nodeType?a[this.expando]=void 0:delete a[this.expando])}},hasData:function(a){var b=a[this.expando];return void 0!==b&&!n.isEmptyObject(b)}};var N=new M,O=new M,P=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Q=/[A-Z]/g;function R(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(Q,"-$&").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:P.test(c)?n.parseJSON(c):c; +}catch(e){}O.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return O.hasData(a)||N.hasData(a)},data:function(a,b,c){return O.access(a,b,c)},removeData:function(a,b){O.remove(a,b)},_data:function(a,b,c){return N.access(a,b,c)},_removeData:function(a,b){N.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=O.get(f),1===f.nodeType&&!N.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),R(f,d,e[d])));N.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){O.set(this,a)}):K(this,function(b){var c,d;if(f&&void 0===b){if(c=O.get(f,a)||O.get(f,a.replace(Q,"-$&").toLowerCase()),void 0!==c)return c;if(d=n.camelCase(a),c=O.get(f,d),void 0!==c)return c;if(c=R(f,d,void 0),void 0!==c)return c}else d=n.camelCase(a),this.each(function(){var c=O.get(this,d);O.set(this,d,b),a.indexOf("-")>-1&&void 0!==c&&O.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){O.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=N.get(a,b),c&&(!d||n.isArray(c)?d=N.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return N.get(a,c)||N.access(a,c,{empty:n.Callbacks("once memory").add(function(){N.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length",""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};$.optgroup=$.option,$.tbody=$.tfoot=$.colgroup=$.caption=$.thead,$.th=$.td;function _(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function aa(a,b){for(var c=0,d=a.length;d>c;c++)N.set(a[c],"globalEval",!b||N.get(b[c],"globalEval"))}var ba=/<|&#?\w+;/;function ca(a,b,c,d,e){for(var f,g,h,i,j,k,l=b.createDocumentFragment(),m=[],o=0,p=a.length;p>o;o++)if(f=a[o],f||0===f)if("object"===n.type(f))n.merge(m,f.nodeType?[f]:f);else if(ba.test(f)){g=g||l.appendChild(b.createElement("div")),h=(Y.exec(f)||["",""])[1].toLowerCase(),i=$[h]||$._default,g.innerHTML=i[1]+n.htmlPrefilter(f)+i[2],k=i[0];while(k--)g=g.lastChild;n.merge(m,g.childNodes),g=l.firstChild,g.textContent=""}else m.push(b.createTextNode(f));l.textContent="",o=0;while(f=m[o++])if(d&&n.inArray(f,d)>-1)e&&e.push(f);else if(j=n.contains(f.ownerDocument,f),g=_(l.appendChild(f),"script"),j&&aa(g),c){k=0;while(f=g[k++])Z.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var da=/^key/,ea=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,fa=/^([^.]*)(?:\.(.+)|)/;function ga(){return!0}function ha(){return!1}function ia(){try{return d.activeElement}catch(a){}}function ja(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)ja(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ha;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return"undefined"!=typeof n&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(G)||[""],j=b.length;while(j--)h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.hasData(a)&&N.get(a);if(r&&(i=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&N.remove(a,"handle events")}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(N.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!==this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]*)\/>/gi,la=/\s*$/g;function pa(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function qa(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function ra(a){var b=na.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function sa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(N.hasData(a)&&(f=N.access(a),g=N.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}O.hasData(a)&&(h=O.access(a),i=n.extend({},h),O.set(b,i))}}function ta(a,b){var c=b.nodeName.toLowerCase();"input"===c&&X.test(a.type)?b.checked=a.checked:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}function ua(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&ma.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),ua(f,b,c,d)});if(o&&(e=ca(b,a[0].ownerDocument,!1,a,d),g=e.firstChild,1===e.childNodes.length&&(e=g),g||d)){for(h=n.map(_(e,"script"),qa),i=h.length;o>m;m++)j=e,m!==p&&(j=n.clone(j,!0,!0),i&&n.merge(h,_(j,"script"))),c.call(a[m],j,m);if(i)for(k=h[h.length-1].ownerDocument,n.map(h,ra),m=0;i>m;m++)j=h[m],Z.test(j.type||"")&&!N.access(j,"globalEval")&&n.contains(k,j)&&(j.src?n._evalUrl&&n._evalUrl(j.src):n.globalEval(j.textContent.replace(oa,"")))}return a}function va(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(_(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&aa(_(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(ka,"<$1>")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=_(h),f=_(a),d=0,e=f.length;e>d;d++)ta(f[d],g[d]);if(b)if(c)for(f=f||_(a),g=g||_(h),d=0,e=f.length;e>d;d++)sa(f[d],g[d]);else sa(a,h);return g=_(h,"script"),g.length>0&&aa(g,!i&&_(a,"script")),h},cleanData:function(a){for(var b,c,d,e=n.event.special,f=0;void 0!==(c=a[f]);f++)if(L(c)){if(b=c[N.expando]){if(b.events)for(d in b.events)e[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);c[N.expando]=void 0}c[O.expando]&&(c[O.expando]=void 0)}}}),n.fn.extend({domManip:ua,detach:function(a){return va(this,a,!0)},remove:function(a){return va(this,a)},text:function(a){return K(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.appendChild(a)}})},prepend:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(_(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return K(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!la.test(a)&&!$[(Y.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(_(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return ua(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(_(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),f=e.length-1,h=0;f>=h;h++)c=h===f?this:this.clone(!0),n(e[h])[b](c),g.apply(d,c.get());return this.pushStack(d)}});var wa,xa={HTML:"block",BODY:"block"};function ya(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function za(a){var b=d,c=xa[a];return c||(c=ya(a,b),"none"!==c&&c||(wa=(wa||n("